blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72ef04a7112fdfe6416035af77af107421f3f6c0
|
699a2d030163de1230a7b3d274e8913268526df9
|
/practice/22.py
|
d9a48cf29f89cc0e475f7d8607059f4e1b93ddc9
|
[] |
no_license
|
chaoyifei/python
|
b04bde2fcb28fb60777da40313299a7598595d47
|
3679f442ee8ab1f8b18a2989382ea48ea910ec8f
|
refs/heads/master
| 2020-06-25T04:21:53.218396
| 2018-06-11T16:15:50
| 2018-06-11T16:15:50
| 96,958,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
def fun(age,rank):
if rank==1:
return age
else:
return fun(age+2,rank-1)
print(fun(10,5))
|
[
"451089603@qq.com"
] |
451089603@qq.com
|
ae5d4af3010b6833426055e197315c7f3f116d38
|
a6c888da394e0e961cea3a2c025a686ea949b2b6
|
/richweb/node_modules/sails-mongo/node_modules/mongodb/node_modules/bson/build/config.gypi
|
43fececb4b8008073f10977309b9fc5b8316e52a
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
kyleotoole/RichWebGlenKyle
|
3f060993fbfa86d6a846ebc7bbaceb1951ad0ab7
|
ed98cf04c2a95bea0a720bfb1562738a7bd6834c
|
refs/heads/master
| 2021-01-01T05:49:01.115932
| 2013-12-10T13:06:58
| 2013-12-10T13:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/home/kylerdp/.node-gyp/0.10.22",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"pre": "",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.22 linux x64",
"bin_links": "true",
"description": "true",
"fetch_retries": "2",
"init_version": "0.0.0",
"user": "1001",
"force": "",
"ignore": "",
"cache_min": "10",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/kylerdp/.npmrc",
"coverage": "",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/kylerdp/tmp",
"userignorefile": "/home/kylerdp/.npmignore",
"yes": "",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/kylerdp/.npm",
"npaturl": "http://npat.npmjs.org/",
"searchsort": "name",
"version": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"global": "",
"link": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "v0.10.22",
"tag": "latest",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/kylerdp/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"group": "1001",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"kyle-360@hotmail.com"
] |
kyle-360@hotmail.com
|
64a6de3eb6458cc47ecc4139a3032c93beacdb52
|
b3d7f2c8b09a6fc4c1965b9d48baaf6927762e4f
|
/test/hummingbot/connector/exchange/lbank/test_lbank_exchange.py
|
3a70b0ae996f1fb9605e64a1de965701b6bd8e18
|
[
"Apache-2.0"
] |
permissive
|
SaucyOfficiale/hummingbot
|
29f836a66881ae9e0b2a416d2220dd9f5e70a883
|
23a9a90047384176483d427ddfc73c2e21ed4512
|
refs/heads/master
| 2023-01-07T16:36:41.192517
| 2022-12-21T11:52:41
| 2022-12-21T11:52:41
| 183,094,523
| 1
| 0
|
Apache-2.0
| 2019-04-23T20:54:05
| 2019-04-23T20:54:05
| null |
UTF-8
|
Python
| false
| false
| 24,671
|
py
|
import json
import re
from decimal import Decimal
from typing import Any, Callable, Dict, List, Optional, Tuple
from aioresponses import aioresponses
from aioresponses.core import RequestCall
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.exchange.lbank import lbank_constants as CONSTANTS, lbank_web_utils as web_utils
from hummingbot.connector.exchange.lbank.lbank_exchange import LbankExchange
from hummingbot.connector.test_support.exchange_connector_test import AbstractExchangeConnectorTests
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.core.data_type.in_flight_order import InFlightOrder
from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee, TokenAmount, TradeFeeBase
from hummingbot.core.event.events import OrderType
class LbankExchangeTests(AbstractExchangeConnectorTests.ExchangeConnectorTests):
def setUp(self) -> None:
self.api_key = "someKey"
self.api_secret_key = "someSecretKey"
self.auth_method = "HmacSHA256"
self.ex_trading_pair = self.exchange_symbol_for_tokens(self.base_asset, self.quote_asset)
super().setUp()
@property
def all_symbols_url(self):
url = web_utils.public_rest_url(path_url=CONSTANTS.LBANK_TRADING_PAIRS_PATH_URL)
return url
@property
def latest_prices_url(self):
url = web_utils.public_rest_url(path_url=CONSTANTS.LBANK_CURRENT_MARKET_DATA_PATH_URL)
url = f"{url}?symbol={self.base_asset.lower()}_{self.quote_asset.lower()}"
regex_url = re.compile(f"^{url}".replace(".", r"\.").replace("?", r"\?"))
return regex_url
@property
def network_status_url(self):
url = web_utils.public_rest_url(path_url=CONSTANTS.LBANK_GET_TIMESTAMP_PATH_URL)
return url
@property
def trading_rules_url(self):
return self.all_symbols_url
@property
def order_creation_url(self):
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_CREATE_ORDER_PATH_URL)
return url
@property
def balance_url(self):
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_USER_ASSET_PATH_URL)
return url
@property
def all_symbols_request_mock_response(self):
return {
"result": True,
"data": [
{
"symbol": f"{self.base_asset.lower()}_{self.quote_asset.lower()}",
"quantityAccuracy": "4",
"minTranQua": "0.0001",
"priceAccuracy": "2",
}
],
"error_code": 0,
"ts": 1655827232443,
}
@property
def all_symbols_including_invalid_pair_mock_response(self) -> Tuple[str, Any]:
response = {
"result": True,
"data": [{"unrecognized_data_field": "INVALID-PAIR"}],
"error_code": 0,
"ts": 1655827232443,
}
return "INVALID-PAIR", response
@property
def latest_prices_request_mock_response(self):
return {
"result": True,
"data": [
{
"symbol": f"{self.base_asset.lower()}_{self.quote_asset.lower()}",
"ticker": {
"high": 10000,
"vol": 33281.2314,
"low": 9998,
"change": 2,
"turnover": 688968000.92,
"latest": self.expected_latest_price,
},
"timestamp": 1655827019966,
}
],
"error_code": 0,
"ts": 1655827021679,
}
@property
def network_status_request_successful_mock_response(self):
return {"result": True, "data": 1655827102315, "error_code": 0, "ts": 1655827102315}
@property
def trading_rules_request_mock_response(self):
return self.all_symbols_request_mock_response
@property
def trading_rules_request_erroneous_mock_response(self):
# Missing data fields for Trading Rules
response = {
"result": True,
"data": [{"symbol": f"{self.base_asset.lower()}_{self.quote_asset.lower()}"}],
"error_code": 0,
"ts": 1655827232443,
}
return response
@property
def order_creation_request_successful_mock_response(self):
return {"result": True, "data": {"order_id": self.expected_exchange_order_id}}
@property
def balance_request_mock_response_for_base_and_quote(self):
return {
"result": True,
"data": {
"freeze": { # Locked Balance
self.base_asset.lower(): 5.0,
self.quote_asset.lower(): 0.0
},
"asset": { # Total Balance
self.base_asset.lower(): 15.0,
self.quote_asset.lower(): 2000.0
},
"free": { # Available Balance
self.base_asset.lower(): 10.0,
self.quote_asset.lower(): 2000.0
},
},
}
@property
def balance_request_mock_response_only_base(self):
return {
"result": True,
"data": {
"freeze": { # Locked Balance
self.base_asset.lower(): 5.0
},
"asset": { # Total Balance
self.base_asset.lower(): 15.0
},
"free": { # Available Balance
self.base_asset.lower(): 10.0
},
},
}
@property
def balance_event_websocket_update(self):
return {
"data": {
"asset": "15",
"assetCode": self.base_asset.lower(),
"free": "10",
"freeze": "5",
"time": 1655785565477,
"type": "ORDER_CREATE",
},
"SERVER": "V2",
"type": "assetUpdate",
"TS": "2022-06-21T12:26:05.478",
}
@property
def expected_latest_price(self):
return 9999.9
@property
def expected_supported_order_types(self):
return [OrderType.LIMIT, OrderType.MARKET]
@property
def expected_trading_rule(self):
return TradingRule(
trading_pair=self.trading_pair,
min_order_size=Decimal(self.trading_rules_request_mock_response["data"][0]["minTranQua"]),
min_base_amount_increment=Decimal(
f"1e-{self.trading_rules_request_mock_response['data'][0]['quantityAccuracy']}"
),
min_price_increment=Decimal(
f"1e-{self.trading_rules_request_mock_response['data'][0]['priceAccuracy']}"),
)
@property
def expected_logged_error_for_erroneous_trading_rule(self):
erroneous_rule = self.trading_rules_request_erroneous_mock_response["data"][0]
return f"Error parsing trading pair rule {erroneous_rule}. Skipping."
@property
def expected_exchange_order_id(self):
return "07b37475-6624-46ba-8668-09cc609e7032"
@property
def expected_partial_fill_price(self) -> Decimal:
return Decimal(10500)
@property
def expected_partial_fill_amount(self) -> Decimal:
return Decimal("0.5")
@property
def expected_fill_fee(self) -> TradeFeeBase:
return AddedToCostTradeFee(
flat_fees=[TokenAmount(token=self.base_asset, amount=Decimal("1e-6"))]
)
@property
def expected_fill_trade_id(self) -> str:
return "TrID1"
@property
def is_cancel_request_executed_synchronously_by_server(self) -> bool:
return False
@property
def is_order_fill_http_update_included_in_status_update(self) -> bool:
return True
@property
def is_order_fill_http_update_executed_during_websocket_order_event_processing(self) -> bool:
return True
def exchange_symbol_for_tokens(self, base_token: str, quote_token: str) -> str:
return f"{base_token.lower()}_{quote_token.lower()}"
def create_exchange_instance(self):
client_config_map = ClientConfigAdapter(ClientConfigMap())
return LbankExchange(
client_config_map=client_config_map,
lbank_api_key=self.api_key,
lbank_secret_key=self.api_secret_key,
lbank_auth_method=self.auth_method,
trading_pairs=[self.trading_pair])
def validate_auth_credentials_present(self, request_call: RequestCall):
request_headers = request_call.kwargs["headers"]
request_data = request_call.kwargs["data"]
self.assertIn("echostr", request_headers)
self.assertIn("signature_method", request_headers)
self.assertEqual(self.auth_method, request_headers["signature_method"])
self.assertIn("timestamp", request_headers)
self.assertIn("Content-Type", request_headers)
self.assertEqual("application/x-www-form-urlencoded", request_headers["Content-Type"])
self.assertIn("sign", request_data)
def validate_order_creation_request(self, order: InFlightOrder, request_call: RequestCall):
request_data = request_call.kwargs["data"]
self.assertEqual(self.exchange_symbol_for_tokens(self.base_asset, self.quote_asset), request_data["symbol"])
self.assertIn("sign", request_data)
self.assertIn("api_key", request_data)
self.assertEqual(self.api_key, request_data["api_key"])
self.assertIn("type", request_data)
self.assertEqual(order.trade_type.name.lower(), request_data["type"])
self.assertIn("price", request_data)
self.assertEqual(order.price, Decimal(request_data["price"]))
self.assertIn("amount", request_data)
self.assertEqual(order.amount, Decimal(request_data["amount"]))
self.assertIn("custom_id", request_data)
self.assertEqual(order.client_order_id, request_data["custom_id"])
def validate_order_cancelation_request(self, order: InFlightOrder, request_call: RequestCall):
request_data = request_call.kwargs["data"]
self.assertIn("sign", request_data)
self.assertIn("api_key", request_data)
self.assertEqual(self.api_key, request_data["api_key"])
self.assertIn("symbol", request_data)
self.assertEqual(self.exchange_symbol_for_tokens(self.base_asset, self.quote_asset), request_data["symbol"])
self.assertIn("customer_id", request_data)
self.assertEqual(order.client_order_id, request_data["customer_id"])
def validate_order_status_request(self, order: InFlightOrder, request_call: RequestCall):
request_data = request_call.kwargs["data"]
self.assertIn("sign", request_data)
self.assertIn("api_key", request_data)
self.assertEqual(self.api_key, request_data["api_key"])
self.assertIn("symbol", request_data)
self.assertEqual(self.exchange_symbol_for_tokens(self.base_asset, self.quote_asset), request_data["symbol"])
self.assertIn("order_id", request_data)
self.assertEqual(order.exchange_order_id, request_data["order_id"])
def validate_trades_request(self, order: InFlightOrder, request_call: RequestCall):
request_data = request_call.kwargs["data"]
self.assertIn("api_key", request_data)
self.assertEqual(self.api_key, request_data["api_key"])
self.assertIn("symbol", request_data)
self.assertEqual(self.exchange_symbol_for_tokens(self.base_asset, self.quote_asset), request_data["symbol"])
self.assertIn("order_id", request_data)
self.assertEqual(order.exchange_order_id, request_data["order_id"])
def configure_successful_cancelation_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_CANCEL_ORDER_PATH_URL)
response = {"result": True, "data": {"customer_id": order.client_order_id}}
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_erroneous_cancelation_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_CANCEL_ORDER_PATH_URL)
response = {"result": False, "data": {"error": [order.client_order_id]}}
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_one_successful_one_erroneous_cancel_all_response(
self, successful_order: InFlightOrder, erroneous_order: InFlightOrder, mock_api: aioresponses
) -> List[str]:
"""
:return: a list of all configured URLs for the cancelations
"""
all_urls = []
url = self.configure_successful_cancelation_response(order=successful_order, mock_api=mock_api)
all_urls.append(url)
url = self.configure_erroneous_cancelation_response(order=erroneous_order, mock_api=mock_api)
all_urls.append(url)
return all_urls
def configure_completely_filled_order_status_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_ORDER_UPDATES_PATH_URL)
response = self._order_status_request_completely_filled_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_canceled_order_status_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_ORDER_UPDATES_PATH_URL)
response = self._order_status_request_canceled_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_open_order_status_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_ORDER_UPDATES_PATH_URL)
response = self._order_status_request_open_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_http_error_order_status_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_ORDER_UPDATES_PATH_URL)
mock_api.post(url, status=404, callback=callback)
return url
def configure_partially_filled_order_status_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_ORDER_UPDATES_PATH_URL)
response = self._order_status_request_partially_filled_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_partial_fill_trade_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_TRADE_UPDATES_PATH_URL)
response = self._order_fills_request_partial_fill_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_full_fill_trade_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_TRADE_UPDATES_PATH_URL)
response = self._order_fills_request_full_fill_mock_response(order=order)
mock_api.post(url, body=json.dumps(response), callback=callback)
return url
def configure_erroneous_http_fill_trade_response(
self, order: InFlightOrder, mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = web_utils.private_rest_url(path_url=CONSTANTS.LBANK_TRADE_UPDATES_PATH_URL)
mock_api.post(url, status=400, callback=callback)
return url
def _configure_balance_response(
self, response: Dict[str, Any], mock_api: aioresponses, callback: Optional[Callable] = lambda *args, **kwargs: None
) -> str:
url = self.balance_url
mock_api.post(url, body=json.dumps(response))
def order_event_for_new_order_websocket_update(self, order: InFlightOrder):
return {
"SERVER": "V2",
"orderUpdate": {
"accAmt": "0",
"amount": "0",
"avgPrice": "0",
"customerID": order.client_order_id,
"orderAmt": str(order.amount),
"orderPrice": str(order.price),
"orderStatus": 0,
"price": "0",
"remainAmt": str(order.amount),
"role": "taker",
"symbol": self.ex_trading_pair,
"type": "buy",
"updateTime": 1655785565476,
"uuid": self.expected_exchange_order_id,
"volumePrice": "0",
},
"type": "orderUpdate",
"pair": self.ex_trading_pair,
"TS": "2022-06-21T12:26:05.479",
}
def order_event_for_canceled_order_websocket_update(self, order: InFlightOrder):
return {
"SERVER": "V2",
"orderUpdate": {
"accAmt": "0",
"amount": "0",
"avgPrice": "0",
"customerID": order.client_order_id,
"orderAmt": str(order.amount),
"orderPrice": str(order.price),
"orderStatus": -1,
"price": "0",
"remainAmt": "0",
"role": "taker",
"symbol": self.ex_trading_pair,
"type": "buy",
"updateTime": 1655785577941,
"uuid": self.expected_exchange_order_id,
"volumePrice": "0",
},
"type": "orderUpdate",
"pair": self.ex_trading_pair,
"TS": "2022-06-21T12:26:17.943",
}
def order_event_for_full_fill_websocket_update(self, order: InFlightOrder):
return {
"SERVER": "V2",
"orderUpdate": {
"accAmt": str(order.amount),
"amount": str(order.amount),
"avgPrice": str(order.price),
"customerID": order.client_order_id,
"orderAmt": str(order.amount),
"orderPrice": str(order.price),
"orderStatus": 2,
"price": str(order.price),
"remainAmt": "0",
"role": "taker",
"symbol": self.ex_trading_pair,
"type": "buy",
"updateTime": 1655785577941,
"uuid": self.expected_exchange_order_id,
"volumePrice": "0",
},
"type": "orderUpdate",
"pair": self.ex_trading_pair,
"TS": "2022-06-21T12:26:17.943",
}
def trade_event_for_full_fill_websocket_update(self, order: InFlightOrder):
# NOTE: LBank does not have any trade events over their private stream channels
return None
def _order_status_request_completely_filled_mock_response(self, order: InFlightOrder) -> Any:
return {
"result": True,
"data": [
{
"symbol": self.ex_trading_pair,
"amount": str(order.amount),
"create_time": int(order.creation_timestamp),
"price": str(order.price),
"avg_price": str(order.price),
"type": order.trade_type.name.lower(),
"order_id": order.exchange_order_id,
"deal_amount": 0,
"status": 2
}
],
"error_code": 0,
"ts": 1656406392759
}
def _order_status_request_canceled_mock_response(self, order: InFlightOrder) -> Any:
return {
"result": True,
"data": [
{
"symbol": self.ex_trading_pair,
"amount": str(order.amount),
"create_time": int(order.creation_timestamp),
"price": str(order.price),
"avg_price": 0,
"type": order.trade_type.name.lower(),
"order_id": order.exchange_order_id,
"deal_amount": 0,
"status": -1
}
],
"error_code": 0,
"ts": 1656406392759
}
def _order_status_request_open_mock_response(self, order: InFlightOrder) -> Any:
return {
"result": True,
"data": [
{
"symbol": self.ex_trading_pair,
"amount": str(order.amount),
"create_time": int(order.creation_timestamp),
"price": str(order.price),
"avg_price": 0,
"type": order.trade_type.name.lower(),
"order_id": order.exchange_order_id,
"deal_amount": 0,
"status": 0
}
],
"error_code": 0,
"ts": 1656406392759
}
def _order_status_request_partially_filled_mock_response(self, order: InFlightOrder) -> Any:
return {
"result": True,
"data": [
{
"symbol": self.ex_trading_pair,
"amount": str(order.amount),
"create_time": order.creation_timestamp,
"price": str(order.price),
"avg_price": str(self.expected_partial_fill_price),
"type": order.trade_type.name.lower(),
"order_id": order.exchange_order_id,
"deal_amount": str(self.expected_partial_fill_amount),
"status": 1,
"customer_id": order.client_order_id,
}
],
"error_code": 0,
"ts": 1656406392759
}
def _order_fills_request_partial_fill_mock_response(self, order: InFlightOrder):
return {
"result": True,
"data": [
{
"txUuid": self.expected_fill_trade_id,
"orderUuid": order.exchange_order_id,
"tradeType": order.trade_type.name.lower(),
"dealTime": 1562553793113,
"dealPrice": str(self.expected_partial_fill_price),
"dealQuantity": str(self.expected_partial_fill_amount),
"dealVolumePrice": str(self.expected_partial_fill_price * self.expected_partial_fill_amount),
"tradeFee": 0.0000010000,
"tradeFeeRate": 0.000001,
}
],
"error_code": 0,
"ts": 1656406325921
}
def _order_fills_request_full_fill_mock_response(self, order: InFlightOrder):
return {
"result": True,
"data": [
{
"txUuid": self.expected_fill_trade_id,
"orderUuid": order.exchange_order_id,
"tradeType": order.trade_type.name.lower(),
"dealTime": 1562553793113,
"dealPrice": str(order.price),
"dealQuantity": str(order.amount),
"dealVolumePrice": str(order.price * order.amount),
"tradeFee": 0.0000010000,
"tradeFeeRate": 0.000001,
}
],
"error_code": 0,
"ts": 1656406325921
}
|
[
"abel@hummingbot.io"
] |
abel@hummingbot.io
|
c529ecc46016f20514c8238e75e8b5c76ebdca4d
|
45a962c477826051726b5dcec088567623c5894c
|
/manage.py
|
62d32c4571a7034476d475e25659bee94ebe0ab7
|
[] |
no_license
|
Sks2000Sks/BASIC-BANKING-SYSTEM
|
a14c3aab37203b5678b9178acd5de5628e9bf21e
|
80172da18c131e31afd2aece15b98d35a4dedbce
|
refs/heads/main
| 2023-07-18T01:45:44.024448
| 2021-08-18T11:41:21
| 2021-08-18T11:41:21
| 397,579,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bank.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Sks2000Sks.noreply@github.com
|
53243d6c671c147e708797250ac2f0b78b3e21db
|
05a70c12df808455100598d8a6fdb5635c641ab8
|
/Ago-Dic-2018/ArmandoGarcia/practica2/usuarios.py
|
6eb4c7ba62b34fd889e8874f47b87f8d57ce400f
|
[
"MIT"
] |
permissive
|
Jonathan-aguilar/DAS_Sistemas
|
991edcc929c33ba9bb8bc84e741b55c10a8420a3
|
4d02efc64161871084df1bff258112351e5d1241
|
refs/heads/development
| 2023-07-24T12:26:54.698452
| 2021-09-02T20:52:26
| 2021-09-02T20:52:26
| 289,764,892
| 1
| 0
|
MIT
| 2021-09-02T20:52:27
| 2020-08-23T20:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
# Ejercicio 9-3
class usuario:
def __init__(self, first_name, last_name, semestre, escuela):
self.first_name = first_name
self.last_name = last_name
self.semestre = semestre
self.escuela = escuela
def __str__(self):
return 'Nombre: {} \nApellido: {} \nSemestre: {} \nEscuela: {}'.format(self.first_name,self.last_name,self.semestre,self.escuela)
usuario_1 = usuario('Luis','Rodas','Sexto','TEC Monterry')
usuario_2 = usuario('Alfonso','Hinojosa','Primero','Cbtis 300')
usuario_3 = usuario('Alexander','Jimenez','Decimo','Escuela de la vida')
print(usuario_1)
print('\n')
print(usuario_2)
print('\n')
print(usuario_3)
|
[
"mastersky_96@hotmail.com"
] |
mastersky_96@hotmail.com
|
58b9e4472680e80a72d209324fc8d287a77f59e5
|
2c6f33443234c4b7732d7bf0aee51e5844021312
|
/assignment.py
|
b6e8bc282fce2654d016e9d5ac0795b4d201f337
|
[] |
no_license
|
afzalIbnSH/eagle-owl-assignment
|
5cd9a0dfa71cf973a13873c56e5b9c48e865e48c
|
6900eebcc5edade25fe7680054769de239d26aba
|
refs/heads/master
| 2022-12-05T06:02:50.489426
| 2020-08-21T11:16:24
| 2020-08-21T11:16:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,421
|
py
|
"""
customerdata.txt contains transactional data from a fictitious website.
Parse it and generate a report with the following information
1. How many orders did the site receive?
2. What was the total amount of the orders?
3. List the names of the customers who ordered once and did not order again.
4. Get a distribution of customers who ordered exactly once, exactly twice,
and so on up to 4 orders and group the rest as 5 orders and above
5. Generate this report as a simple HTML page with a table.
6. Add a bar graph for the information in Q4 in your HTML report.
"""
from os import getcwd
def structure_data():
"""
Reads the customerdata.txt file and returns data structured as below
{
<phone_number>: {
"name": <name>
"orders": [
{"date": <date>, "amount": <amount>},
..
..
]
},
..
..
}
It also returns the total number and total amount of orders
"""
with open("customerdata.txt") as f:
f.readline()
data = {}
total_orders = 0
total_amount = 0
while True:
try:
date, phone_number, name, amount = f.readline().strip().split(", ")
amount = int(amount)
try:
# Neglect if there is a name mismatch for the same number
data[phone_number]["orders"].append(
{"date": date, "amount": amount}
)
except KeyError:
data[phone_number] = {
"name": name,
"orders": [{"date": date, "amount": amount}],
}
total_amount += amount
total_orders += 1
except ValueError:
break
return data, total_orders, total_amount
def group_customers(data):
"""
Groups customers as per number of orders made
Also collects names of customers who never reordered
"""
groups = {"1": 0, "2": 0, "3": 0, "4": 0, "5+": 0}
non_repeat_customers = []
for value in data.values():
orders_made = len(value["orders"])
try:
groups[str(orders_made)] += 1
except KeyError:
groups["5+"] += 1
if orders_made == 1:
non_repeat_customers.append(value["name"])
return groups, non_repeat_customers
def generate_html(groups, distinct_customers_count):
table = """
<table width="100%" border=1>
<tr>
<th align="left">No of orders</th>
<th align="left">Count of customers</th>
</tr>
"""
for number_of_orders, count in groups.items():
table = (
table
+ f"""
<tr>
<td>{number_of_orders}</td>
<td>{count}</td>
</tr>
"""
)
table = table + "</table>"
bar_chart = '<table border="0" width="100%" cellpadding = "0" cellspacing="0">'
for number_of_orders, count in groups.items():
percentage = round((100 / distinct_customers_count) * count, 2)
bar_chart = (
bar_chart
+ f"""
<tr>
<td width="20%">{number_of_orders} order(s)</td>
<td width="75%">
<table border = "0" width = "100%" cellpadding = "1" cellspacing="1">
<tr>
<td align="left" bgcolor="blue" width="{percentage}%"> </td>
<td align="left">{percentage}%</td>
</tr>
</table>
</td>
</tr>
"""
)
bar_chart = bar_chart + "</table>"
return f"""
<h1>Distribution of customers who ordered exactly once, exactly twice, and so on up to 4 orders and the rest as 5 orders and above</h1>
<br>
<h2>Table:</h2>
{table}
<br>
<h2>Bar chart:</h2>
{bar_chart}
"""
if __name__ == "__main__":
data, total_orders, total_amount = structure_data()
print(f"Total orders => {total_orders}")
print(f"Total amount of orders => {total_amount}")
groups, non_repeat_customers = group_customers(data)
print(f"Customers who never reordered => {non_repeat_customers}")
with open("customerdata.html", "w") as f:
f.write(generate_html(groups, len(data)))
print(f"HTML => Open file://{getcwd()}/{f.name}")
|
[
"aflibnush@gmail.com"
] |
aflibnush@gmail.com
|
78f27398c8a4dfc39b5112cc8e8da601f5e3041d
|
ffcc2b42850fdf8780064d22e2fabf2df894f51d
|
/converter/gameconstants.py
|
71f7c27a47e476df5079d2c00f0354e02590126b
|
[
"MIT"
] |
permissive
|
NNTin/Reply-LoL-Reddit
|
05a31a270136c2237798c46b9aca6e3e9c01a7b5
|
f971a8dfaafcd13912097018fbfb40ab72f09b1e
|
refs/heads/master
| 2020-04-12T05:37:24.746633
| 2017-04-02T12:47:29
| 2017-04-02T12:47:29
| 64,067,308
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,552
|
py
|
matchMode = {"CLASSIC": "Classic","ODIN": "Dominion/Crystal","ARAM": "ARAM","TUTORIAL": "Tutorial","ONEFORALL": "One","ASCENSION": "Ascension","FIRSTBLOOD": "Snowdown","KINGPORO": "King","SIEGE": "Nexus"}
#queueType = {'CUSTOM': 'CUSTOM', 'NORMAL_5x5_BLIND': 'NORMAL_5x5_BLIND', 'RANKED_SOLO_5x5': 'RANKED_SOLO_5x5', 'RANKED_PREMADE_5x5': 'RANKED_PREMADE_5x5', 'BOT_5x5': 'BOT_5x5', 'NORMAL_3x3': 'NORMAL_3x3', 'RANKED_PREMADE_3x3': 'RANKED_PREMADE_3x3', 'NORMAL_5x5_DRAFT': 'NORMAL_5x5_DRAFT', 'ODIN_5x5_BLIND': 'ODIN_5x5_BLIND', 'ODIN_5x5_DRAFT': 'ODIN_5x5_DRAFT', 'BOT_ODIN_5x5': 'BOT_ODIN_5x5', 'BOT_5x5_INTRO': 'BOT_5x5_INTRO', 'BOT_5x5_BEGINNER': 'BOT_5x5_BEGINNER', 'BOT_5x5_INTERMEDIATE': 'BOT_5x5_INTERMEDIATE', 'RANKED_TEAM_3x3': 'RANKED_TEAM_3x3', 'RANKED_TEAM_5x5': 'RANKED_TEAM_5x5', 'BOT_TT_3x3': 'BOT_TT_3x3', 'GROUP_FINDER_5x5': 'GROUP_FINDER_5x5', 'ARAM_5x5': 'ARAM_5x5', 'ONEFORALL_5x5': 'ONEFORALL_5x5', 'FIRSTBLOOD_1x1': 'FIRSTBLOOD_1x1', 'FIRSTBLOOD_2x2': 'FIRSTBLOOD_2x2', 'SR_6x6': 'SR_6x6', 'URF_5x5': 'URF_5x5', 'ONEFORALL_MIRRORMODE_5x5': 'ONEFORALL_MIRRORMODE_5x5', 'BOT_URF_5x5': 'BOT_URF_5x5', 'NIGHTMARE_BOT_5x5_RANK1': 'NIGHTMARE_BOT_5x5_RANK1', 'NIGHTMARE_BOT_5x5_RANK2': 'NIGHTMARE_BOT_5x5_RANK2', 'NIGHTMARE_BOT_5x5_RANK5': 'NIGHTMARE_BOT_5x5_RANK5', 'ASCENSION_5x5': 'ASCENSION_5x5', 'HEXAKILL': 'HEXAKILL', 'BILGEWATER_ARAM_5x5': 'BILGEWATER_ARAM_5x5', 'KING_PORO_5x5': 'KING_PORO_5x5', 'COUNTER_PICK': 'COUNTER_PICK', 'BILGEWATER_5x5': 'BILGEWATER_5x5', 'TEAM_BUILDER_DRAFT_UNRANKED_5x5': 'TEAM_BUILDER_DRAFT_UNRANKED_5x5', 'TEAM_BUILDER_DRAFT_RANKED_5x5': 'TEAM_BUILDER_DRAFT_RANKED_5x5'}
queueTypeByName = {"CUSTOM": {"id": 0, "name": "Custom"}, "NORMAL_3x3": {"id": 8, "name": "Normal 3v3"}, "NORMAL_5x5_BLIND": {"id": 2, "name": "Normal 5v5 Blind Pick"}, "NORMAL_5x5_DRAFT": {"id": 14, "name": "Normal 5v5 Draft Pick"}, "RANKED_SOLO_5x5": {"id": 4, "name": "Ranked Solo 5v5"}, "RANKED_PREMADE_5x5": {"id": 6, "name": "Ranked Premade 5v5"}, "RANKED_PREMADE_3x3": {"id": 9, "name": "Ranked Premade 3v3"}, "RANKED_TEAM_3x3": {"id": 41, "name": "Ranked Team 3v3"}, "RANKED_TEAM_5x5": {"id": 42, "name": "Ranked Team 5v5"}, "ODIN_5x5_BLIND": {"id": 16, "name": "Dominion 5v5 Blind Pick"}, "ODIN_5x5_DRAFT": {"id": 17, "name": "Dominion 5v5 Draft Pick"}, "BOT_5x5": {"id": 7, "name": "Historical Summoner's Rift Coop vs AI"}, "BOT_ODIN_5x5": {"id": 25, "name": "Dominion Coop vs AI"}, "BOT_5x5_INTRO": {"id": 31, "name": "Summoner's Rift Coop vs AI Intro Bot"}, "BOT_5x5_BEGINNER": {"id": 32, "name": "Summoner's Rift Coop vs AI Beginner Bot"}, "BOT_5x5_INTERMEDIATE": {"id": 33, "name": "Historical Summoner's Rift Coop vs AI Intermediate Bot"}, "BOT_TT_3x3": {"id": 52, "name": "Twisted Treeline Coop vs AI"}, "GROUP_FINDER_5x5": {"id": 61, "name": "Team Builder"}, "ARAM_5x5": {"id": 65, "name": "ARAM"}, "ONEFORALL_5x5": {"id": 70, "name": "One for All"}, "FIRSTBLOOD_1x1": {"id": 72, "name": "Snowdown Showdown 1v1"}, "FIRSTBLOOD_2x2": {"id": 73, "name": "Snowdown Showdown 2v2"}, "SR_6x6": {"id": 75, "name": "Summoner's Rift 6x6 Hexakill"}, "URF_5x5": {"id": 76, "name": "Ultra Rapid Fire"}, "ONEFORALL_MIRRORMODE_5x5": {"id": 78, "name": "One for All (Mirror mode)"}, "BOT_URF_5x5": {"id": 83, "name": "Ultra Rapid Fire played against AI"}, "NIGHTMARE_BOT_5x5_RANK1": {"id": 91, "name": "Doom Bots Rank 1"}, "NIGHTMARE_BOT_5x5_RANK2": {"id": 92, "name": "Doom Bots Rank 2"}, "NIGHTMARE_BOT_5x5_RANK5": {"id": 93, "name": "Doom Bots Rank 5"}, "ASCENSION_5x5": {"id": 96, "name": "Ascension"}, "EXAKILL": {"id": 98, "name": "Twisted Treeline 6x6 Hexakill"}, "BILGEWATER_ARAM_5x5": {"id": 100, "name": "Butcher's Bridge"}, "KING_PORO_5x5": {"id": 300, "name": "King Poro"}, "COUNTER_PICK": {"id": 310, "name": "Nemesis"}, "BILGEWATER_5x5": {"id": 313, "name": "Black Market Brawlers"}, "SIEGE": {"id": 315, "name": "Nexus Siege"}, "DEFINITELY_NOT_DOMINION_5x5": {"id": 317, "name": "Definitely Not Dominion"}, "TEAM_BUILDER_DRAFT_UNRANKED_5x5": {"id": 400, "name": "Normal 5v5 Draft Pick"}, "TEAM_BUILDER_DRAFT_RANKED_5x5": {"id": 410, "name": "Ranked 5v5 Draft Pick"}}
queueTypeById = {"0": {"nameId": "CUSTOM", "name": "Custom"},"8": {"nameId": "NORMAL_3x3", "name": "Normal 3v3"},"2": {"nameId": "NORMAL_5x5_BLIND", "name": "Normal 5v5 Blind Pick"},"14": {"nameId": "NORMAL_5x5_DRAFT", "name": "Normal 5v5 Draft Pick"},"4": {"nameId": "RANKED_SOLO_5x5", "name": "Ranked Solo 5v5"},"6": {"nameId": "RANKED_PREMADE_5x5", "name": "Ranked Premade 5v5"},"9": {"nameId": "RANKED_PREMADE_3x3", "name": "Ranked Premade 3v3"},"41": {"nameId": "RANKED_TEAM_3x3", "name": "Ranked Team 3v3"},"42": {"nameId": "RANKED_TEAM_5x5", "name": "Ranked Team 5v5"},"16": {"nameId": "ODIN_5x5_BLIND", "name": "Dominion 5v5 Blind Pick"},"17": {"nameId": "ODIN_5x5_DRAFT", "name": "Dominion 5v5 Draft Pick"},"7": {"nameId": "BOT_5x5", "name": "Historical Summoner's Rift Coop vs AI"},"25": {"nameId": "BOT_ODIN_5x5", "name": "Dominion Coop vs AI"},"31": {"nameId": "BOT_5x5_INTRO", "name": "Summoner's Rift Coop vs AI Intro Bot"},"32": {"nameId": "BOT_5x5_BEGINNER", "name": "Summoner's Rift Coop vs AI Beginner Bot"},"33": {"nameId": "BOT_5x5_INTERMEDIATE", "name": "Historical Summoner's Rift Coop vs AI Intermediate Bot"},"52": {"nameId": "BOT_TT_3x3", "name": "Twisted Treeline Coop vs AI"},"61": {"nameId": "GROUP_FINDER_5x5", "name": "Team Builder"},"65": {"nameId": "ARAM_5x5", "name": "ARAM"},"70": {"nameId": "ONEFORALL_5x5", "name": "One for All"},"72": {"nameId": "FIRSTBLOOD_1x1", "name": "Snowdown Showdown 1v1"},"73": {"nameId": "FIRSTBLOOD_2x2", "name": "Snowdown Showdown 2v2"},"75": {"nameId": "SR_6x6", "name": "Summoner's Rift 6x6 Hexakill"},"76": {"nameId": "URF_5x5", "name": "Ultra Rapid Fire"},"78": {"nameId": "ONEFORALL_MIRRORMODE_5x5", "name": "One for All (Mirror mode)"},"83": {"nameId": "BOT_URF_5x5", "name": "Ultra Rapid Fire played against AI"},"91": {"nameId": "NIGHTMARE_BOT_5x5_RANK1", "name": "Doom Bots Rank 1"},"92": {"nameId": "NIGHTMARE_BOT_5x5_RANK2", "name": "Doom Bots Rank 2"},"93": {"nameId": "NIGHTMARE_BOT_5x5_RANK5", "name": "Doom Bots Rank 5"},"96": {"nameId": "ASCENSION_5x5", "name": "Ascension"},"98": {"nameId": "EXAKILL", "name": "Twisted Treeline 6x6 Hexakill"},"100": {"nameId": "BILGEWATER_ARAM_5x5", "name": "Butcher's Bridge"},"300": {"nameId": "KING_PORO_5x5", "name": "King Poro"},"310": {"nameId": "COUNTER_PICK", "name": "Nemesis"},"313": {"nameId": "BILGEWATER_5x5", "name": "Black Market Brawlers"},"315": {"nameId": "IEGE", "name": "Nexus Siege"},"317": {"nameId": "DEFINITELY_NOT_DOMINION_5x5", "name": "Definitely Not Dominion"},"400": {"nameId": "TEAM_BUILDER_DRAFT_UNRANKED_5x5", "name": "Normal 5v5 Draft Pick"},"410": {"nameId": "TEAM_BUILDER_DRAFT_RANKED_5x5", "name": "Ranked 5v5 Draft Pick"}}
matchType = {'CUSTOM_GAME': 'Custom', 'MATCHED_GAME': 'Matched', 'TUTORIAL_GAME': 'Tutorial'}
subType = {"NONE": "Custom","NORMAL": "Summoner's","NORMAL_3x3": "Twisted","ODIN_UNRANKED": "Dominion/Crystal","ARAM_UNRANKED_5x5": "ARAM","BOT": "Summoner's","BOT_3x3": "Twisted","ANKED_SOLO_5x5": "Summoner's","RANKED_TEAM_3x3": "Twisted","RANKED_TEAM_5x5": "Summoner's","ONEFORALL_5x5": "One","FIRSTBLOOD_1x1": "Snowdown","FIRSTBLOOD_2x2": "Snowdown","SR_6x6": "Summoner's","CAP_5x5": "Team","URF": "Ultra","URF_BOT": "Ultra","NIGHTMARE_BOT": "Summoner's","ASCENSION": "Ascension","HEXAKILL": "Twisted","KING_PORO": "King","COUNTER_PICK": "Nemesis","BILGEWATER": "Black","SIEGE": "Nexus"}
playerStatSummaryType = {"Unranked": "Summoner's","Unranked3x3": "Twisted","OdinUnranked": "Dominion/Crystal","AramUnranked5x5": "ARAM","CoopVsAI": "Summoner's","CoopVsAI3x3": "Twisted","RankedSolo5x5": "Summoner's","RankedTeam3x3": "Twisted","RankedTeam5x5": "Summoner's","OneForAll5x5": "One","FirstBlood1x1": "Snowdown","FirstBlood2x2": "Snowdown","SummonersRift6x6": "Summoner's","CAP5x5": "Team","URF": "Ultra","URFBots": "Ultra","NightmareBot": "Summoner's","Ascension": "Ascension","Hexakill": "Twisted","KingPoro": "King","CounterPick": "Nemesis","Bilgewater": "Black","Siege": "Nexus"}
season = {'PRESEASON3,': 'PRESEASON3,', 'SEASON3,': 'SEASON3,', 'PRESEASON2014,': 'PRESEASON2014,', 'SEASON2014,': 'SEASON2014,', 'PRESEASON2015,': 'PRESEASON2015,', 'SEASON2015,': 'SEASON2015,', 'PRESEASON2016,': 'PRESEASON2016,', 'SEASON2016': 'SEASON2016'}
highestAchievedSeasonTier = {'CHALLENGER,': 'CHALLENGER,', 'MASTER,': 'MASTER,', 'DIAMOND,': 'DIAMOND,', 'PLATINUM,': 'PLATINUM,', 'GOLD,': 'GOLD,', 'SILVER,': 'SILVER,', 'BRONZE,': 'BRONZE,', 'UNRANKED': 'UNRANKED'}
|
[
"ngoctinnguyen1993@gmail.com"
] |
ngoctinnguyen1993@gmail.com
|
addc8cb7baab1254b5c892410c0387f5eea9293f
|
c3d5add5ab632f7b456f89ff5ed392ae0221c0e7
|
/system/init/configuration.py
|
b2099905dcfa97573b595008ca228bba0b70c87b
|
[] |
no_license
|
readysetrock/Sugr_Daddy
|
6596b25ce3ea5cc82c0ddba04881f85b58a7b39a
|
09e6f7e8220a5de104778ddb479c6cad9ae45554
|
refs/heads/master
| 2021-01-15T09:38:08.995910
| 2016-02-21T23:52:28
| 2016-02-21T23:52:28
| 43,457,156
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
"""
Configuration initialization file
This file initializes all of the base configurations defined in the config.base file
"""
from app.config import base
import os
def _get_config(env):
return {
'DEVELOPMENT': base.DevelopmentConfig,
'STAGING': base.StagingConfig,
'PRODUCTION': base.ProductionConfig,
}.get(env, base.DevelopmentConfig)
def initialize_config(app):
env = os.getenv('PYLOT_ENV', 'DEVELOPMENT')
app.config.from_object(_get_config(env))
|
[
"tytowry@gmail.com"
] |
tytowry@gmail.com
|
9f54a3b4dafb6a44a94fe17ce7837f9d314d62cc
|
bfcd398b8045d7b542db9b5c3b54130f3ac7ad66
|
/userdashboard/apps/userd/migrations/0004_remove_message_msguser.py
|
6fa6d3d5dd79ef6ec50579a41530fada9d7f47e9
|
[] |
no_license
|
acn545/Django
|
32b8b216fe1e73885e3bd583e98627b938f79866
|
d2f52da3599e930ef1728fc5f6a6ef7150c413cd
|
refs/heads/master
| 2020-03-18T15:26:14.692243
| 2018-05-25T21:34:50
| 2018-05-25T21:34:50
| 134,907,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-23 21:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userd', '0003_message_msguser'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='msgUser',
),
]
|
[
"anthonycnoble@live.com"
] |
anthonycnoble@live.com
|
265522a7deada1360fac4df736f45501ac5024dc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_285/ch5_2019_06_03_01_06_29_598637.py
|
b4bfd46781ff35c49180a25a814cd9a7dfae311a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
def verifica_primo(n):
if n<0:
return -1
num=3
while num<n:
if n%2==0 or n%num==0:
return False
num+=2
if n==0 or n==1:
return False
else:
return True
def maior_primo_menor_que(n):
if verifica_primo(n)== True:
return n
elif verifica_primo(n)== False:
lista=[]
for e in range(n):
if verifica_primo(n-e)==True:
return n-e
else:
return -1
|
[
"you@example.com"
] |
you@example.com
|
88ba592a61b2d4e2fa4ac35b512f13126a93e8f8
|
985ce7108f56d8f7acd9ac35cdd87f08db5caac4
|
/database.py
|
f0e07da64471c0f83486fd85dbaffc5144f9d23e
|
[] |
no_license
|
ariaramin/login-signin-system
|
ce7d102d38f140ea2be2f6b652f2a2e30d440e15
|
b12c0eb945aca6c44ada375f8efa84ee55df16e5
|
refs/heads/main
| 2023-03-03T03:07:01.603808
| 2021-02-16T15:28:52
| 2021-02-16T15:28:52
| 339,441,150
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
from pymysql import connect
#connect to DB
class connector:
def connection(self):
try:
self.con = connect('localhost','root', '', 'users')
self.cursor = self.con.cursor()
except:
print('error')
#insert a data
class signin(connector):
def __init__(self, username, email, password):
connector.connection(self)
data = (username, email, password)
query = 'INSERT INTO users_info(username, email, password) VALUES (%s,%s,%s)'
self.cursor.execute(query, data)
self.con.commit()
#get username and password
class login(connector):
def __init__(self, username, password):
connector.connection(self)
data = (username, password)
query = 'SELECT * FROM users_info WHERE username LIKE %s AND password LIKE %s'
self.cursor.execute(query, data)
self.values = self.cursor.fetchall()
def get(self):
return self.values
#get email
class getpass(connector):
def __init__(self, email):
connector.connection(self)
data = (email)
query = 'SELECT * FROM users_info WHERE email LIKE %s'
self.cursor.execute(query, data)
self.result = self.cursor.fetchall()
def get(self):
return self.result
#get username
class account(connector):
def __init__(self, username):
connector.connection(self)
data = (username)
query = 'SELECT * FROM users_info WHERE username LIKE %s'
self.cursor.execute(query, data)
self.result = self.cursor.fetchall()
def get(self):
return self.result
|
[
"ariaramin24@gmail.com"
] |
ariaramin24@gmail.com
|
277b12248e81a6e903b561cf68a167cc5ac531a0
|
e0c2a8a544be42490ad86e232b26156317c9fb7f
|
/Encapsulation/Client.py
|
deb75779fb6c2ce70274ac2796081022f4fb4588
|
[] |
no_license
|
hhitman47/oop-python
|
8133cebcdbac5b5f24756772ab20357c4a4aa8af
|
0d44dbaf0213b314a1b55a24b3524ea89554bc56
|
refs/heads/master
| 2022-04-20T22:59:38.345445
| 2020-04-22T17:28:37
| 2020-04-22T17:28:37
| 257,969,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
from Encapsulation.Bank import Bank
class Clinet(Bank):
pass
client_1 = Bank()
#print(client_1.Bank__Client_info())
print(client_1._Bank__bank_name)
while True:
print("1. Check Account Balance ")
print("2. Withdraw Funds ")
menu_option = int(input())
if menu_option == 1:
#client_1._Bank__Client_info()
print("Your Current Balance is : ",client_1.Balance_getter())
elif menu_option == 2:
value = int(input("How much would you like to withdreaw : "))
client_1.Balance_Setter(value)
else:
print("Wrong menu choice ....")
|
[
"h.hitman47@gmail.com"
] |
h.hitman47@gmail.com
|
9902776c082c92c16c89cd39d922e4b461482b88
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_05_01_preview/models/__init__.py
|
b555500e3ca24979e6c5c02f7be553bf65fd61c7
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,815
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccessReviewDecision
from ._models_py3 import AccessReviewDecisionListResult
from ._models_py3 import AccessReviewDecisionProperties
from ._models_py3 import AccessReviewDecisionTarget
from ._models_py3 import AccessReviewDefaultSettings
from ._models_py3 import AccessReviewInstance
from ._models_py3 import AccessReviewInstanceListResult
from ._models_py3 import AccessReviewReviewer
from ._models_py3 import AccessReviewScheduleDefinition
from ._models_py3 import AccessReviewScheduleDefinitionListResult
from ._models_py3 import AccessReviewScheduleDefinitionProperties
from ._models_py3 import AccessReviewScheduleSettings
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorDefinitionProperties
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ServicePrincipalDecisionTarget
from ._models_py3 import UserDecisionTarget
from ._authorization_management_client_enums import AccessRecommendationType
from ._authorization_management_client_enums import AccessReviewActorIdentityType
from ._authorization_management_client_enums import AccessReviewApplyResult
from ._authorization_management_client_enums import AccessReviewInstanceStatus
from ._authorization_management_client_enums import AccessReviewRecurrencePatternType
from ._authorization_management_client_enums import AccessReviewRecurrenceRangeType
from ._authorization_management_client_enums import AccessReviewResult
from ._authorization_management_client_enums import AccessReviewReviewerType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionReviewersType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionStatus
from ._authorization_management_client_enums import AccessReviewScopePrincipalType
from ._authorization_management_client_enums import DecisionTargetType
from ._authorization_management_client_enums import DefaultDecisionType
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AccessReviewDecision",
"AccessReviewDecisionListResult",
"AccessReviewDecisionProperties",
"AccessReviewDecisionTarget",
"AccessReviewDefaultSettings",
"AccessReviewInstance",
"AccessReviewInstanceListResult",
"AccessReviewReviewer",
"AccessReviewScheduleDefinition",
"AccessReviewScheduleDefinitionListResult",
"AccessReviewScheduleDefinitionProperties",
"AccessReviewScheduleSettings",
"ErrorDefinition",
"ErrorDefinitionProperties",
"Operation",
"OperationDisplay",
"OperationListResult",
"ServicePrincipalDecisionTarget",
"UserDecisionTarget",
"AccessRecommendationType",
"AccessReviewActorIdentityType",
"AccessReviewApplyResult",
"AccessReviewInstanceStatus",
"AccessReviewRecurrencePatternType",
"AccessReviewRecurrenceRangeType",
"AccessReviewResult",
"AccessReviewReviewerType",
"AccessReviewScheduleDefinitionReviewersType",
"AccessReviewScheduleDefinitionStatus",
"AccessReviewScopePrincipalType",
"DecisionTargetType",
"DefaultDecisionType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
67beea05ca779525eb2784422ef64d3f81fc9c53
|
23fbb376cbe173d0dd2cdf28b478abce7c9a42e3
|
/Maturka/trojkat.py
|
aeba49521908c67d3bc5445e3e8047eb7f67c257
|
[] |
no_license
|
PanOscar/MaturaInf
|
cfe025b97565f07938b808727616bb52f7cef1d2
|
f2c98a4dc336e72556b95c70f0af7faf6eb7873f
|
refs/heads/master
| 2023-08-13T15:56:51.057069
| 2021-10-05T20:28:21
| 2021-10-05T20:28:21
| 234,733,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from turtle import *
color("blue","white")
lt(180)
for n in range(3):
for i in range(5):
begin_fill()
for j in range(3):
fd(50*(i+1))
rt(120)
fd(250)
rt(120)
end_fill()
done()
|
[
"osi23.78@gmail.com"
] |
osi23.78@gmail.com
|
ecd1fe8a8b5678366ade3ae81684187a171f55f5
|
4c601eaa346e660c296e270cc2d79aea9a3721fe
|
/homeassistant/components/atag/__init__.py
|
237a82f207a51306dfec01869827bd135973d15b
|
[
"Apache-2.0"
] |
permissive
|
basnijholt/home-assistant
|
f55110af9ff602274c0a929c7298ef97a0ef282f
|
ba55b4b8338a2dc0ba3f1d750efea49d86571291
|
refs/heads/dev
| 2023-01-21T11:53:52.621353
| 2020-08-08T15:03:06
| 2020-08-08T15:03:06
| 220,313,680
| 5
| 1
|
Apache-2.0
| 2023-01-13T06:04:49
| 2019-11-07T19:29:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,345
|
py
|
"""The ATAG Integration."""
from datetime import timedelta
import logging
import async_timeout
from pyatag import AtagException, AtagOne
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.water_heater import DOMAIN as WATER_HEATER
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, asyncio
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
_LOGGER = logging.getLogger(__name__)
DOMAIN = "atag"
PLATFORMS = [CLIMATE, WATER_HEATER, SENSOR]
async def async_setup(hass: HomeAssistant, config):
"""Set up the Atag component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Atag integration from a config entry."""
session = async_get_clientsession(hass)
coordinator = AtagDataUpdateCoordinator(hass, session, entry)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=coordinator.atag.id)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
class AtagDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Atag data."""
def __init__(self, hass, session, entry):
"""Initialize."""
self.atag = AtagOne(session=session, **entry.data)
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=30)
)
async def _async_update_data(self):
"""Update data via library."""
with async_timeout.timeout(20):
try:
if not await self.atag.update():
raise UpdateFailed("No data received")
except AtagException as error:
raise UpdateFailed(error)
return self.atag.report
async def async_unload_entry(hass, entry):
"""Unload Atag config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AtagEntity(Entity):
"""Defines a base Atag entity."""
def __init__(self, coordinator: AtagDataUpdateCoordinator, atag_id: str) -> None:
"""Initialize the Atag entity."""
self.coordinator = coordinator
self._id = atag_id
self._name = DOMAIN.title()
@property
def device_info(self) -> dict:
"""Return info for device registry."""
device = self.coordinator.atag.id
version = self.coordinator.atag.apiversion
return {
"identifiers": {(DOMAIN, device)},
"name": "Atag Thermostat",
"model": "Atag One",
"sw_version": version,
"manufacturer": "Atag",
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.coordinator.atag.id}-{self._id}"
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Atag entity."""
await self.coordinator.async_request_refresh()
|
[
"noreply@github.com"
] |
basnijholt.noreply@github.com
|
40aab536212b385feeecf7caa7c71a167555dbc5
|
4e3da889cc9ebf460db841665e3559efbc96b5fb
|
/regions_extractor.py
|
b0500b570b209b637388af4cca3dc48acb2a8732
|
[] |
no_license
|
Jellycious/morph-detection
|
5fdd02de355a8beb407264e4d32b9b9388a35b59
|
9230c1f674ee9028d134b35cc3e15d0fdcd77206
|
refs/heads/main
| 2023-01-30T18:39:47.074924
| 2020-12-08T15:19:02
| 2020-12-08T15:19:02
| 319,674,730
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,597
|
py
|
#!/home/jelle/.virtualenvs/face-morphing/bin/python
# This makes sure that it runs the virtual env
# third party libraries
import dlib
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
# standard libraries
import os
import sys
import pickle
# our libraries
import utils
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./predictors/shape_predictor_68_face_landmarks.dat')
win = dlib.image_window()
def main():
for (trusted, questioned) in utils.pairs_iter():
show_extracted_regions(trusted)
def save_fft_example_image():
for (trusted, questioned) in utils.pairs_iter():
dirn = os.path.dirname(trusted)
for (prefix, trusted, questioned) in utils.get_regions(dirn):
region_img = cv2.imread(trusted, 0)
region_fft = np.fft.fftshift(np.fft.fft2(region_img))
region_fft_img = 20 * np.log(np.abs(region_fft))
print(trusted)
cv2.imwrite('fft_region_example.png', region_fft_img)
return
def check_bona_fide_of_directory(dirn):
if os.path.split(dirn)[1][0] == 'M':
bona_fide = False
return bona_fide
elif os.path.split(dirn)[1][0] == 'B':
bona_fide = True
return bona_fide
else:
raise Exception("Could not determine class of: " + dirn)
def create_frequency_training_data():
frequency_data_list = list()
for (trusted, questioned) in utils.pairs_iter():
# dirn = os.path.join(os.path.dirname(trusted), '33x33')
dirn = os.path.dirname(trusted)
region_dict = dict()
for (prefix, trusted, questioned) in utils.get_regions(dirn):
try:
trusted_img = cv2.imread(trusted, 0)
trusted_fft = np.fft.fft2(trusted_img)
trusted_fft = np.fft.fftshift(trusted_fft)
questioned_img = cv2.imread(questioned, 0)
questioned_fft = np.fft.fft2(questioned_img)
questioned_fft = np.fft.fftshift(questioned_fft)
region_dict[prefix] = (trusted_fft, questioned_fft)
except Exception as e:
print(e)
print(trusted)
bona_fide = True
if os.path.split(dirn)[1][0] == 'M':
bona_fide = False
elif os.path.split(dirn)[1][0] == 'B':
bona_fide = True
else:
raise Exception("Could not determine class of: " + dirn)
frequency_data_list.append((region_dict, bona_fide, os.path.split(dirn)[1]))
spliced = train_test_split(frequency_data_list, train_size=0.8)
train_test_frequency_data = {'training': spliced[0], 'test': spliced[1]}
with open("train_test_frequency_data_RAW.pk", "wb") as output_file:
pickle.dump(train_test_frequency_data, output_file)
def create_spectral_training_data():
spectral_data_list = list()
for (trusted, questioned) in utils.pairs_iter():
# dirn = os.path.join(os.path.dirname(trusted), '33x33')
dirn = os.path.dirname(trusted)
region_dict = dict()
for (prefix, trusted, questioned) in utils.get_regions(dirn):
try:
trusted_img = cv2.imread(trusted, 0)
trusted_spec = get_spectrum_img_32(trusted_img, replace_negatives=False, log=False)
questioned_img = cv2.imread(questioned, 0)
questioned_spec = get_spectrum_img_32(questioned_img, replace_negatives=False, log=False)
region_dict[prefix] = (trusted_spec, questioned_spec)
except Exception as e:
print(e)
print(trusted)
bona_fide = True
if os.path.split(dirn)[1][0] == 'M':
bona_fide = False
elif os.path.split(dirn)[1][0] == 'B':
bona_fide = True
else:
raise Exception("Could not determine class of: " + dirn)
spectral_data_list.append((region_dict, bona_fide, os.path.split(dirn)[1]))
spliced = train_test_split(spectral_data_list, train_size=0.8)
train_test_spectral_data = {'training': spliced[0], 'test': spliced[1]}
with open("train_test_spectral_data_RAW_nolog.pk", "wb") as output_file:
pickle.dump(train_test_spectral_data, output_file)
def get_spectrum_img_32(img, replace_negatives=True, log=True):
'''
EXPECTS AN IMAGE TO BE 32x32 in size
'''
fft = np.fft.fft2(img)
fft = np.fft.fftshift(fft)
magnitude = np.abs(fft)
if log:
magnitude = 20 * np.log(magnitude)
if replace_negatives:
magnitude[magnitude < 0] = 0
CENTER = (16, 16) # since image is 32x32
MAX_RADIUS = 16
warped = cv2.warpPolar(magnitude, (0, 0), CENTER, MAX_RADIUS, cv2.WARP_POLAR_LINEAR + cv2.WARP_FILL_OUTLIERS)
warped_transposed = np.transpose(warped)
spectrum = list()
for n in warped_transposed:
avg = np.average(n)
spectrum.append(avg)
return spectrum
def increase_image_size(img, scaling):
height = len(img)
width = len(img[0])
return cv2.resize(img, (int(width * scaling), int(height * scaling)), interpolation=cv2.INTER_NEAREST)
def complex_to_polar2(carr):
'''
Converts complex numbers into polar coordinates.
Since complex numbers are a single datatype this will increase the dimension of the matrix by 1.
The output of a complex number c will be the tuple (distance, angle)
'''
# probably this function is useless
distance = np.abs(carr)
angle = np.angle(carr)
return np.dstack((distance, angle))
def frequency_domain_to_polar_frequency_domain(parr):
'''
converts frequency domain represented by (hF, vF) to (D, angle)
:param parr: polar coordinate matrix.
'''
return
def extract_regions(imgpath):
'''
Extracts the different regions and stores them in the directory.
Trusted: whether the given image is a trusted image
'''
# create file path
basename = os.path.basename(imgpath)
dirn = os.path.dirname(imgpath)
# Check whether image is trusted or questioned
if basename.startswith('Q'):
imgcat = 'Q'
elif basename.startswith('T'):
imgcat = 'T'
else:
raise Exception("Could not determine img state")
# load image in BGR format
img = cv2.imread(imgpath)
first_face_bounding_box = detector(img, 0)[0]
shape = predictor(img, first_face_bounding_box)
# get all regions
nose_region = create_rectangle(shape.parts()[29], 32)
nose_region_img = extract_rectangle_from_img(img, nose_region)
cv2.imwrite(os.path.join(dirn, 'NO_' + imgcat + '.png'), nose_region_img)
right_cheekbone_region = create_rectangle(right_cheekbone_point(shape), 32)
right_cheekbone_region_img = extract_rectangle_from_img(img, right_cheekbone_region)
cv2.imwrite(os.path.join(dirn, 'RB_' + imgcat + '.png'), right_cheekbone_region_img)
left_cheekbone_region = create_rectangle(left_cheekbone_point(shape), 32)
left_cheekbone_region_img = extract_rectangle_from_img(img, left_cheekbone_region)
cv2.imwrite(os.path.join(dirn, 'LB_' + imgcat + '.png'), left_cheekbone_region_img)
right_cheek_region = create_rectangle(right_cheek_point(shape), 32)
right_cheek_region_img = extract_rectangle_from_img(img, right_cheek_region)
cv2.imwrite(os.path.join(dirn, 'RC_' + imgcat + '.png'), right_cheek_region_img)
left_cheek_region = create_rectangle(left_cheek_point(shape), 32)
left_cheek_region_img = extract_rectangle_from_img(img, left_cheek_region)
cv2.imwrite(os.path.join(dirn, 'LC_' + imgcat + '.png'), left_cheek_region_img)
forehead_region = create_rectangle(forehead_point(shape), 32)
forehead_region_img = extract_rectangle_from_img(img, forehead_region)
cv2.imwrite(os.path.join(dirn, 'FH_' + imgcat + '.png'), forehead_region_img)
chin_region = create_rectangle(chin_point(shape), 32)
chin_region_img = extract_rectangle_from_img(img, chin_region)
cv2.imwrite(os.path.join(dirn, 'CH_' + imgcat + '.png'), chin_region_img)
return
def extract_regions(imgpath, size):
'''
Extracts regions with dimensions: size x size.
The results are stored in a directory named "'size'x'size'"
'''
# create file path
basename = os.path.basename(imgpath)
dirn = os.path.dirname(imgpath)
# Check whether image is trusted or questioned
if basename.startswith('Q'):
imgcat = 'Q'
elif basename.startswith('T'):
imgcat = 'T'
else:
raise Exception("Could not determine img state")
# create directory
new_dir = os.path.join(dirn, '{}x{}'.format(size, size))
try:
os.mkdir(new_dir)
except FileExistsError:
pass
except Exception as e:
print(e)
# load image in BGR format
img = cv2.imread(imgpath)
first_face_bounding_box = detector(img, 0)[0]
shape = predictor(img, first_face_bounding_box)
# get all regions
nose_region = create_rectangle(shape.parts()[29], size)
nose_region_img = extract_rectangle_from_img(img, nose_region)
cv2.imwrite(os.path.join(new_dir, 'NO_' + imgcat + '.png'), nose_region_img)
right_cheekbone_region = create_rectangle(right_cheekbone_point(shape), size)
right_cheekbone_region_img = extract_rectangle_from_img(img, right_cheekbone_region)
cv2.imwrite(os.path.join(new_dir, 'RB_' + imgcat + '.png'), right_cheekbone_region_img)
left_cheekbone_region = create_rectangle(left_cheekbone_point(shape), size)
left_cheekbone_region_img = extract_rectangle_from_img(img, left_cheekbone_region)
cv2.imwrite(os.path.join(new_dir, 'LB_' + imgcat + '.png'), left_cheekbone_region_img)
right_cheek_region = create_rectangle(right_cheek_point(shape), size)
right_cheek_region_img = extract_rectangle_from_img(img, right_cheek_region)
cv2.imwrite(os.path.join(new_dir, 'RC_' + imgcat + '.png'), right_cheek_region_img)
left_cheek_region = create_rectangle(left_cheek_point(shape), size)
left_cheek_region_img = extract_rectangle_from_img(img, left_cheek_region)
cv2.imwrite(os.path.join(new_dir, 'LC_' + imgcat + '.png'), left_cheek_region_img)
forehead_region = create_rectangle(forehead_point(shape), size)
forehead_region_img = extract_rectangle_from_img(img, forehead_region)
cv2.imwrite(os.path.join(new_dir, 'FH_' + imgcat + '.png'), forehead_region_img)
chin_region = create_rectangle(chin_point(shape), size)
chin_region_img = extract_rectangle_from_img(img, chin_region)
cv2.imwrite(os.path.join(new_dir, 'CH_' + imgcat + '.png'), chin_region_img)
return
def show_extracted_regions(imgpath):
win = dlib.image_window()
# load grayscale img
img = cv2.imread(imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
win.clear_overlay()
win.set_image(img)
# detect face
first_face_bounding_box = detector(img, 0)[0]
shape = predictor(img, first_face_bounding_box)
# nose region
#win.add_overlay(create_rectangle(shape.parts()[29], 32), dlib.rgb_pixel(255, 0, 0))
#right_cheekbone_center = right_cheekbone_point(shape)
#win.add_overlay(create_rectangle(right_cheekbone_center, 32), dlib.rgb_pixel(0, 255, 0))
#left_cheekbone_center = left_cheekbone_point(shape)
#win.add_overlay(create_rectangle(left_cheekbone_center, 32), dlib.rgb_pixel(0, 0, 255))
#right_cheek_center = right_cheek_point(shape)
#win.add_overlay(create_rectangle(right_cheek_center, 32), dlib.rgb_pixel(255, 255, 0))
#left_cheek_center = left_cheek_point(shape)
#win.add_overlay(create_rectangle(left_cheek_center, 32), dlib.rgb_pixel(255, 0, 255))
#forehead = forehead_point(shape)
#win.add_overlay(create_rectangle(forehead, 32), dlib.rgb_pixel(0, 255, 255))
#chin = chin_point(shape)
#win.add_overlay(create_rectangle(chin, 32), dlib.rgb_pixel(255, 255, 255))
draw_shape_points(shape, win)
#wait_for_next_input(win)
#extr_img = extract_rectangle_from_img(img, create_rectangle(shape.parts()[29], 32))
#win.clear_overlay()
#win.set_image(extr_img)
wait_for_next_input(win)
win.clear_overlay()
def extract_rectangle_from_img(img, rectangle):
'''
Returns new img with content of the rectangle on the image.
'''
tl = rectangle.tl_corner()
tr = rectangle.tr_corner()
bl = rectangle.bl_corner()
return img[tl.y:bl.y, tl.x:tr.x]
def right_cheekbone_point(shape):
'''
Gets the right cheekbone point.
Shape needs to be shape from dlib 68 landmark predictor.
'''
points = shape.parts()
# cheekbone points
middle_cheekbone_point = points[29]
right_cheekbone_upper = points[15]
right_cheekbone_lower = points[14]
x = int(0.33 * middle_cheekbone_point.x + 0.33 * right_cheekbone_upper.x + 0.33 * right_cheekbone_lower.x)
y = int(0.33 * middle_cheekbone_point.y + 0.33 * right_cheekbone_upper.y + 0.33 * right_cheekbone_lower.y)
right_cheekbone_center = dlib.point(x, y)
return right_cheekbone_center
def left_cheekbone_point(shape):
'''
Gets the left cheekbone point.
Shape needs to be shape from dlib 68 landmark predictor.
'''
# cheekbone points
points = shape.parts()
middle_cheekbone_point = points[29]
left_cheekbone_upper = points[1]
left_cheekbone_lower = points[2]
x = int(0.33 * middle_cheekbone_point.x + 0.33 * left_cheekbone_upper.x + 0.33 * left_cheekbone_lower.x)
y = int(0.33 * middle_cheekbone_point.y + 0.33 * left_cheekbone_upper.y + 0.33 * left_cheekbone_lower.y)
left_cheekbone_center = dlib.point(x, y)
return left_cheekbone_center
def left_cheek_point(shape):
'''
Left point of cheek. Close to the corner of the mouth.
Shape needs to be from dlib 68 landmark predictor.
'''
points = shape.parts()
left_mouth_point = points[48]
left_cheek_upper = points[3]
left_cheek_lower = points[4]
x = int(0.5 * left_mouth_point.x + 0.25 * left_cheek_upper.x + 0.25 * left_cheek_lower.x)
y = int(0.5 * left_mouth_point.y + 0.25 * left_cheek_upper.y + 0.25 * left_cheek_lower.y)
left_cheek_center = dlib.point(x, y)
return left_cheek_center
def right_cheek_point(shape):
'''
Right point of cheek. Close to the corner of the mouth.
Shape needs to be from dlib 68 landmark predictor.
'''
points = shape.parts()
right_mouth_point = points[54]
right_cheek_upper = points[13]
right_cheek_lower = points[12]
# right cheek region
x = int(0.5 * right_mouth_point.x + 0.25 * right_cheek_upper.x + 0.25 * right_cheek_lower.x)
y = int(0.5 * right_mouth_point.y + 0.25 * right_cheek_upper.y + 0.25 * right_cheek_lower.y)
right_cheek_center = dlib.point(x, y)
return right_cheek_center
def forehead_point(shape):
'''
Forehead point.
'''
points = shape.parts()
left = points[19]
right = points[24]
nose_upper = points[27]
x = int(0.5 * left.x + 0.5 * right.x)
y = int(0.5 * left.y + 0.5 * right.y)
y_change = int(0.5 * (nose_upper.y - y))
forehead_point = dlib.point(x, y - y_change)
return forehead_point
def draw_shape_points(shape, win):
for point in shape.parts():
win.add_overlay_circle(point, 2, dlib.rgb_pixel(0, 255, 0))
def chin_point(shape):
'''
Chin point
'''
points = shape.parts()
bottom_mouth = points[57]
right_chin = points[9]
left_chin = points[7]
x = int(0.5 * bottom_mouth.x + 0.25 * right_chin.x + 0.25 * left_chin.x)
y = int(0.5 * bottom_mouth.y + 0.25 * right_chin.y + 0.25 * left_chin.y)
chin_point = dlib.point(x, y)
return chin_point
def create_rectangle(center, size):
if (size % 2) == 0:
s = size / 2
x1 = int(center.x - s)
x2 = int(center.x + s)
y1 = int(center.y - s)
y2 = int(center.y + s)
else:
# since we are dealing with a discrete space.
# the solution is to offset the region to the right and top with 1 pixel.
# not ideal, but this should not matter, since this is done for all images.
s = int(size / 2)
x1 = int(center.x - s) - 1
x2 = int(center.x + s)
y1 = int(center.y - s) - 1
y2 = int(center.y + s)
return dlib.rectangle(x1, y1, x2, y2)
def wait_for_next_input(win):
'''
Wait untill the user presses a key.
If the key is 'x' exit.
'''
keypress = win.get_next_keypress()
if (keypress) == 'x':
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"jellejanwil@gmail.com"
] |
jellejanwil@gmail.com
|
06fdcad4c808d645f460770dc5cda45ab67815ba
|
a4b83f339b9e0bab850e9e2dbd2640c7a032b1ff
|
/dependencies_analysis/src/main.py
|
405318cd5ff8ea64af598d5cc2413eec808b65d2
|
[
"BSD-3-Clause"
] |
permissive
|
afdaniele/director
|
75e9a34393b3dcb6c50de32b6c5739b04f64f23a
|
845ba027f9009803fcf77f44874f2ab9d7ab72e3
|
refs/heads/master
| 2020-04-18T21:31:49.734108
| 2019-05-01T04:42:02
| 2019-05-01T04:42:02
| 167,767,383
| 0
| 0
|
BSD-3-Clause
| 2019-01-27T03:59:31
| 2019-01-27T03:59:31
| null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
from os.path import join
import networkx as nx
from utils import ProgressBar, FileReader
ENVIRONMENT = "director"
DATA_DIR = "/code/dependencies_analysis/data"
ALLOWED_SOURCE_PKG = None
ALLOWED_TARGET_PKG = [
"/code/director/src/python",
"/usr/lib/python2.7",
"/usr/lib/python2.7/dist-packages",
"/usr/local/lib/python2.7/dist-packages"
"/usr/local/lib/python2.7/site-packages"
]
ENTRYPOINT_PKG = "/code/director_entrypoint"
PLUGINS_PKG = "/code/director_plugins"
def file_path(file_id):
return join(DATA_DIR, ENVIRONMENT, file_id)
def cut_zero_in_degree_nodes(G, packages):
cut = []
for n in G.in_degree_iter():
if n[1] == 0:
parts = n[0].split('/')
pack_id = int(parts[0][1:])
if pack_id in [packages[ENTRYPOINT_PKG], packages[PLUGINS_PKG]]: continue
cut.append(n[0])
G.remove_node(n[0])
return cut
def main():
all, dependencies, errors = [], [], []
for section in ["director", "entrypoint", "plugins"]:
files_to_load = ['all_imports.list', 'dependencies.list', 'import_errors.list']
data = []
for file in files_to_load:
fname = '_'.join([section, file])
fr = FileReader(file_path(fname))
print "Loading file '%s'... " % fname,
data.append(fr.lines())
print "Done!"
# unpack data
sec_all, sec_dependencies, sec_errors = data
# extend data
all.extend(sec_all)
dependencies.extend(sec_dependencies)
errors.extend(sec_errors)
# filter dependencies
packages = {
ENTRYPOINT_PKG : 0,
PLUGINS_PKG : 1
}
pbar = ProgressBar(len(dependencies))
G = nx.DiGraph()
excluded_source = set()
excluded_target = set()
print "Processing dependencies... ",
for dep in dependencies:
# convert string -> tuple
dep = eval(dep)
source, target = dep
pbar.next()
# ignore package if not allowed
if ALLOWED_SOURCE_PKG and source[0] not in ALLOWED_SOURCE_PKG:
excluded_source.add(source[0])
continue
if ALLOWED_TARGET_PKG and target[0] not in ALLOWED_TARGET_PKG:
excluded_target.add(target[0])
continue
# get source package id
source_pkg_id = len(packages)
if source[0] in packages:
source_pkg_id = packages[source[0]]
else:
packages[source[0]] = source_pkg_id
# get target package id
target_pkg_id = len(packages)
if target[0] in packages:
target_pkg_id = packages[target[0]]
else:
packages[target[0]] = target_pkg_id
# create unique ID for file
source = 'P%d/%s' % (source_pkg_id, source[1])
target = 'P%d/%s' % (target_pkg_id, target[1])
# add dependency edge to the graph
G.add_edge(source, target)
# print excluded
print "Excluded sources: %d" % len(excluded_source)
for p in excluded_source:
print "\t%s" % p
print ""
print "Excluded targets: %d" % len(excluded_target)
for p in excluded_target:
print "\t%s" % p
print ""
# print packages
print "Packages:"
for p,i in packages.items():
print "\tP%d : %s" % (i,p)
print ""
# print stats
print nx.info(G)
print "Nodes with self-loops:"
for n in G.nodes_with_selfloops():
print "\t"+str(n)
print ""
print "Trimming nodes:"
step = 1
files_to_remove = []
cut = cut_zero_in_degree_nodes(G, packages)
while len(cut) > 0:
files_to_remove.extend(cut)
print "Step %d. Pruned %d nodes." % (step, len(cut))
print "Modules pruned:"
print "\t" + "\n\t".join(cut)
print "Graph stats:"
print nx.info(G)
step += 1
cut = cut_zero_in_degree_nodes(G, packages)
# print stats
print "Graph pruned!"
print nx.info(G)
dot = nx.nx_pydot.to_pydot(G)
# print dir(dot)
# pos = nx.nx_pydot.graphviz_layout(G) #nx.kamada_kawai_layout(G)
# dot.set_layout(pos)
# with open(join(DATA_DIR, 'graph.dot'), "wb") as fo:
# # fo.write(dot)
# fo.write(pos)
# dot.set_size(2000, 2000)
# dot.set_nodesep(20)
# # dot.set_ranksep(20)
#
# dot.write_pdf( join(DATA_DIR, 'graph.pdf'), prog='neato' )
nx.drawing.nx_pydot.write_dot(G, join(DATA_DIR, 'graph.dot'))
print "Files we can remove:"
pkg_id_to_pkg_path = { v : k for k,v in packages.items() }
for f in files_to_remove:
parts = f.split('/')
pack_id = int(parts[0][1:])
pack_path = pkg_id_to_pkg_path[pack_id]
print "\t%s" % join(pack_path, "/".join(parts[1:]))
if __name__ == '__main__':
main()
|
[
"cha@drive.ai"
] |
cha@drive.ai
|
ef4e45fe551d5134930a7c4748f00fe58bf60c90
|
23ec2d36bc6f3706483b4d3c863ffd11b8247839
|
/fundclear/models.py
|
f62dc5d7515d8b25124e2059be0c2a4056f9e151
|
[] |
no_license
|
slee124565/philcop
|
4d6f536d2d0536d2c9110f4ea2d5b88995322cba
|
592ce889f7eb59f4cf3c02a8ffa85cb505ee7add
|
refs/heads/master
| 2021-01-11T03:58:55.028400
| 2018-01-06T15:30:45
| 2018-01-06T15:30:45
| 71,266,174
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,265
|
py
|
import calendar
import logging
from google.appengine.ext import db
from lxml.html import document_fromstring
from lxml import etree
from datetime import date
from dateutil import parser
from dateutil.relativedelta import relativedelta
from urlsrc.models import WebContentModel
import phicops.utils
URL_TEMPLATE = 'http://announce.fundclear.com.tw/MOPSFundWeb/D02_02P.jsp?fundId={fund_id}&beginDate={begin_date}&endDate={end_date}'
DATE_INDEX = 0
VALUE_INDEX = 1
class dFundCodeModel(db.Model):
content = db.BlobProperty()
class FundClearModel(WebContentModel):
fund_name = db.StringProperty()
@classmethod
def get_fund(cls, p_fund_id, p_months=37):
if (p_fund_id == ''):
logging.warn('Invalid Parameter Value p_fund_id ' + p_fund_id)
return None
if p_months < 0:
logging.warning('Invalid Parameter Value p_month ' + str(p_months))
return None
end_date = date.today() - relativedelta(days=+1)
begin_date = end_date - relativedelta(months=p_months)
t_url = URL_TEMPLATE.format(fund_id=p_fund_id,begin_date=begin_date.strftime("%Y/%m/%d"),end_date=end_date.strftime("%Y/%m/%d"))
fund_model = FundClearModel.get_or_insert_webcontent(p_fund_id,t_url,date.today(),p_months)
if fund_model == None:
logging.warning('FundClearModel get_fund fail')
return fund_model
def get_sample_value_list(self, p_date_list):
'''
return a list of [date, nav] according to date list p_date_list
'''
nav_list = self.get_value_list()
return self.sample_value_list(p_date_list, nav_list)
def get_nav_by_date(self, p_date):
nav_list = self.get_value_list()
t_count = 0
if (nav_list[0][DATE_INDEX]).date() <= p_date:
for t_entry in nav_list:
if p_date == t_entry[DATE_INDEX].date():
logging.debug(__name__ + ': get_nav_by_date: matched entry ' + str(nav_list[t_count]))
return nav_list[t_count-1][VALUE_INDEX]
t_count += 1
logging.warning(__name__ + ': get_nav_by_date: no matched date value exist!')
return False
def get_single_html_table(self):
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
#logging.info('total table count: ' + str(t_total))
if t_total < 4:
#return 'TABLE ERROR'
logging.warning('Source HTML TABLE Count ERROR')
return None
html = '<table width="100%" border="1" cellpadding="1" cellspacing="1">'
html += etree.tostring(t_tables[4][0])
for t_table in t_tables[5:-1]:
for t_child in t_table:
if (t_child.tag == 'tr'):
if t_child != None:
html += etree.tostring(t_child)
html += '</table>'
return html
def get_discrete_value_list(self, p_select_day=phicops.utils.MONTH_DAY_END):
value_list = self.get_value_list()
return phicops.utils.get_discrete_date_data_list(value_list, p_select_day)
def get_value_list(self):
'''
return list of [datetime.datetime, float]
'''
dataset = []
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
logging.debug(__name__ + ', get_value_list: total table count: ' + str(t_total))
if t_total < 4:
logging.warning(__name__ + 'Source HTML TABLE ERROR, count ' + str(t_total))
return None
#logging.info('fund title:' + etree.tostring(t_tables[4][0][0]))
t_fund_name = t_tables[4][0][0].text
t_fund_name = t_fund_name.replace('\r\n','')
#t_fund_name = str(t_fund_name).encode('big5').splitlines()
#t_fund_name = t_fund_name[0]
logging.debug(__name__ + ', get_value_list: fund_name: ' + t_fund_name)
if (self.fund_name == None) or (self.fund_name != t_fund_name):
self.fund_name = t_fund_name
self.put()
t_count = 5
while (t_count <= (t_total-1)):
#logging.info('table child len ' + str(len(t_tables[t_count])))
if len(t_tables[t_count]) == 2:
t_date_list = [t_child.text for t_child in t_tables[t_count][0]]
t_value_list = [t_child.text for t_child in t_tables[t_count][1]]
#logging.info(t_date_list)
#logging.info(t_value_list)
#logging.info('t_count:' + str(t_count) + '/' + str(t_total))
for i in range(0,len(t_date_list)):
#logging.info('t_count:' + str(i) + '/' + str(len(t_date_list)))
#logging.info([t_date_list[i],t_value_list[i]])
if i != 0:
if (t_date_list[i].strip() != ''):
#logging.info([t_date_list[i],t_value_list[i]])
#dataset.append([calendar.timegm((parser.parse(t_date_list[i])).timetuple()) * 1000,t_value_list[i]])
dataset.append([t_date_list[i],str(t_value_list[i]).strip()])
#else:
# logging.debug('remove element ('+ str(t_count) + '#' + str(i) + '): ' + str([t_date_list[i],t_value_list[i]]))
else:
#logging.debug('skip table:\n' + etree.tostring(t_tables[t_count]))
logging.debug(__name__ + ', get_value_list: skip table ' + str(t_count))
t_count += 1
#break
t_count = 0
while (t_count < len(dataset)):
#logging.info('t_count ' + str(t_count))
(t_date,t_value) = dataset[t_count]
if (t_value == '--') or (t_value == 'N/A'):
if (t_count ==0):
#logging.debug(__name__ + ', get_value_list: removeing dataset element ' + str(dataset[t_count]))
del dataset[t_count]
continue
else:
#logging.debug('replace value with previous one, date ' + str(dataset[t_count]))
dataset[t_count][1] = dataset[t_count-1][1]
#if (t_count > 192):
# logging.info('DEBUG:' + str([t_date,t_value]))
dataset[t_count][0] = parser.parse(t_date).date()
dataset[t_count][1] = float(dataset[t_count][1])
t_count += 1
#logging.debug(dataset)
return dataset
def get_dateset(self):
'''
return Flow diagram data list
'''
dataset = []
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
logging.info('total table count: ' + str(t_total))
if t_total < 4:
logging.warning('Source HTML TABLE Count ERROR')
return None
#logging.info('fund title:' + etree.tostring(t_tables[4][0][0]))
t_fund_name = t_tables[4][0][0].text
t_fund_name = t_fund_name.replace('\r\n','')
#t_fund_name = str(t_fund_name).encode('big5').splitlines()
#t_fund_name = t_fund_name[0]
logging.info('fund_name: ' + t_fund_name)
if self.fund_name == None:
self.fund_name = t_fund_name
self.put()
t_count = 5
while (t_count <= (t_total-1)):
#logging.info('table child len ' + str(len(t_tables[t_count])))
if len(t_tables[t_count]) == 2:
t_date_list = [t_child.text for t_child in t_tables[t_count][0]]
t_value_list = [t_child.text for t_child in t_tables[t_count][1]]
#logging.info(t_date_list)
#logging.info(t_value_list)
#logging.info('t_count:' + str(t_count) + '/' + str(t_total))
for i in range(0,len(t_date_list)):
#logging.info('t_count:' + str(i) + '/' + str(len(t_date_list)))
#logging.info([t_date_list[i],t_value_list[i]])
if i != 0:
if (t_date_list[i].strip() != ''):
#logging.info([t_date_list[i],t_value_list[i]])
#dataset.append([calendar.timegm((parser.parse(t_date_list[i])).timetuple()) * 1000,t_value_list[i]])
dataset.append([t_date_list[i],t_value_list[i]])
else:
logging.info('remove element ('+ str(t_count) + '#' + str(i) + '): ' + str([t_date_list[i],t_value_list[i]]))
else:
logging.info('skip table:\n' + etree.tostring(t_tables[t_count]))
t_count += 1
#break
t_count = 0
while (t_count < len(dataset)):
#logging.info('t_count ' + str(t_count))
(t_date,t_value) = dataset[t_count]
if (t_value == '--') or (t_value == 'N/A'):
if (t_count ==0):
logging.warning('removeing dataset element ' + str(dataset[t_count]))
del dataset[t_count]
continue
else:
logging.warning('replace value with previous one, date ' + str(dataset[t_count]))
dataset[t_count][1] = dataset[t_count-1][1]
#if (t_count > 192):
# logging.info('DEBUG:' + str([t_date,t_value]))
dataset[t_count][0] = calendar.timegm((parser.parse(t_date)).timetuple()) * 1000
dataset[t_count][1] = float(dataset[t_count][1])
t_count += 1
logging.info(dataset)
return dataset
|
[
"lee.shiueh@gmail.com"
] |
lee.shiueh@gmail.com
|
0f4f37974c4711d3de9916cb7c8e4431e2521018
|
0034018a0fd230a16203509b89652fa570fba1a8
|
/resources/thread.py
|
c936d114ba401ce43ee6886f16f257535e490a53
|
[] |
no_license
|
miguelreisa/engsoftwarebackend
|
7f3a0898b676df2957f06e8d012746c8a50706ce
|
a913cedb236e8da9563e5bd483bbcee56a43ca02
|
refs/heads/master
| 2020-03-18T06:25:04.391236
| 2018-05-22T10:20:31
| 2018-05-22T10:20:31
| 134,394,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
import sqlite3
from flask_restful import Resource, reqparse
from models.conference import ConferenceModel
from models.paper import PaperModel
from models.user import UserModel
from models.token import TokenModel
from models.thread import ThreadModel
from models.threadresponse import ThreadResponseModel
class ThreadResponses(Resource):
parser = reqparse.RequestParser()
parser.add_argument('tokenId',
type = str,
required = True,
help = "This field cannot be left blank!"
)
parser.add_argument('messageAuthor',
type = str,
required = True,
help = "This field cannot be left blank!"
)
parser.add_argument('messageContent',
type = str,
required = True,
help = "This field cannot be left blank!"
)
def get(self, threadId):
thread = ThreadModel.find_by_id(threadId)
if thread:
return thread.json2()
return {'message' : 'Thread not found'}, 404
def post(self, threadId):
data = ThreadResponses.parser.parse_args()
user = UserModel.find_by_username(data['messageAuthor'])
if user:
userToken = TokenModel.find_by_username(data['messageAuthor'])
if userToken:
if userToken.tokenId == data['tokenId']:
#if PaperModel.find_by_name(data['paperName']):
#return {"message" : "Paper with that name already exists."}, 400
thread_selected = ThreadModel.find_by_id(threadId)
if thread_selected is None:
return {"message" : "Thread selected doesn't exist."}, 400
thread_response = ThreadResponseModel(data['messageAuthor'], data['messageContent'], threadId)
thread_response.save_to_db()
return {'message' : 'Thread response accepted and created!'}, 201
#return {"message":"An error occurred, if this continues please contact us."}, 500
return {'message' : 'Invalid input.'}, 403
class Thread(Resource):
parser = reqparse.RequestParser()
parser.add_argument('tokenId',
type = str,
required = True,
help = "This field cannot be left blank!"
)
parser.add_argument('threadAuthor',
type = str,
required = True,
help = "This field cannot be left blank!"
)
parser.add_argument('topic',
type = str,
required = True,
help = "This field cannot be left blank!"
)
parser.add_argument('conferenceId',
type = int,
required = True,
help = "This field cannot be left blank!"
)
def post(self):
data = Thread.parser.parse_args()
user = UserModel.find_by_username(data['threadAuthor'])
if user:
userToken = TokenModel.find_by_username(data['threadAuthor'])
if userToken:
if userToken.tokenId == data['tokenId']:
#if PaperModel.find_by_name(data['paperName']):
#return {"message" : "Paper with that name already exists."}, 400
conference_selected = ConferenceModel.find_by_id(data['conferenceId'])
if conference_selected is None:
return {"message" : "Conference selected doesn't exist."}, 400
thread = ThreadModel(data['threadAuthor'], data['topic'], data['conferenceId'])
thread.save_to_db()
return {'message' : 'Thread accepted and created!'}, 201
#return {"message":"An error occurred, if this continues please contact us."}, 500
return {'message' : 'Invalid input.'}, 403
|
[
"migpe.reis@gmail.com"
] |
migpe.reis@gmail.com
|
6edfde8225367d10e96adf64cbd1036b97fdc7b0
|
df3217c8747d36f4beb3e7543228967fb078cd8b
|
/data_analysis/create_sample.py
|
96d5cb81a805586a29655147f9f11d9b22fd7658
|
[] |
no_license
|
Timthony/dogs_vs_cats
|
acaad004f443d3b338abf74812bf5fef077bfa66
|
25e7e4b530c823a4c8428651c084ba9be3a833e4
|
refs/heads/master
| 2020-03-19T07:40:41.339341
| 2018-05-08T20:14:53
| 2018-05-08T20:14:53
| 136,138,485
| 1
| 1
| null | 2018-06-05T07:35:48
| 2018-06-05T07:35:47
| null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 05:11:38 2018
@author: liguo
从训练集和测试集中选取部分样本,以进行特征观察
"""
import os
import shutil
import random
TRAIN_PATH = "../train/"
TEST_PATH = "../test/"
SAMPLE_PATH = "sample/"
TRAIN_SAMPLE_PATH = SAMPLE_PATH + "train/"
TRAIN_CAT_SAMPLE_PATH = TRAIN_SAMPLE_PATH + "cat/"
TRAIN_DOG_SAMPLE_PATH = TRAIN_SAMPLE_PATH + "dog/"
TEST_SAMPLE_PATH = SAMPLE_PATH + "test/"
SAMPLE_SIZE = 50
def checkDir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
checkDir(SAMPLE_PATH)
checkDir(TRAIN_SAMPLE_PATH)
checkDir(TRAIN_CAT_SAMPLE_PATH)
checkDir(TRAIN_DOG_SAMPLE_PATH)
checkDir(TEST_SAMPLE_PATH)
# 创建训练集样本
files = os.listdir(TRAIN_PATH)
random.shuffle(files)
train_cat_sample_num = 0
train_dog_sample_num = 0
i = 0
while train_cat_sample_num < SAMPLE_SIZE or train_dog_sample_num < SAMPLE_SIZE:
if 'cat' in files[i] and train_cat_sample_num < SAMPLE_SIZE:
shutil.copyfile(TRAIN_PATH + files[i], TRAIN_CAT_SAMPLE_PATH + files[i])
train_cat_sample_num += 1
if 'dog' in files[i] and train_dog_sample_num < SAMPLE_SIZE:
shutil.copyfile(TRAIN_PATH + files[i], TRAIN_DOG_SAMPLE_PATH + files[i])
train_dog_sample_num += 1
i += 1
# 创建测试集样本
files = os.listdir(TEST_PATH)
random.shuffle(files)
for i in range(SAMPLE_SIZE*2):
shutil.copyfile(TEST_PATH + files[i], TEST_SAMPLE_PATH + files[i])
|
[
"liguo@fenbi.com"
] |
liguo@fenbi.com
|
e050beb4b72499f095479534ef15503012f24674
|
8eb8f8d88994b22db7e661f0cdf6b0ed34a172bf
|
/sap/cli/object.py
|
595a1294a6cb05cc0b517617033f29ad4a79cc2e
|
[
"Apache-2.0"
] |
permissive
|
kksat/sapcli
|
f85d3376979157145436f5b87ee67ecf423b3ab4
|
8a21809d5a636f126a12d2da4af864b01867e490
|
refs/heads/master
| 2023-04-20T14:59:31.410367
| 2021-05-05T12:54:15
| 2021-05-05T12:54:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,255
|
py
|
"""ADT Object CLI templates"""
import sys
import os
import collections
import sap.cli.core
from sap.cli.core import InvalidCommandLineError, printout
import sap.errors
import sap.adt
import sap.adt.wb
import sap.cli.wb
_NAME_INDEX = 0
_SUFFIX_INDEX = 1
def object_name_from_source_file(filesystem_path):
"""Splits the given file system path into object name and suffix.
It is expected that the object name makes the file name prefix up to
the first dot.
Example:
./src/object.abap
^^^^--- suffix (the 2nd return value)
^^^^^^-------- object name (the 1st return value)
"""
basename = os.path.basename(filesystem_path)
parts = basename.split('.', 1)
if len(parts) <= 1 or not parts[_NAME_INDEX] or not parts[_SUFFIX_INDEX]:
raise InvalidCommandLineError(f'"{basename}" does not match the pattern NAME.SUFFIX')
return parts
def write_args_to_objects(command, connection, args, metadata=None):
"""Converts parameters of the action 'write object' into a iteration of
objects with the text lines content
"""
name = args.name
text_lines = None
if name == '-':
for filepath in args.source:
if filepath == '-':
raise InvalidCommandLineError('Source file cannot be - when Object name is - too')
obj = command.instance_from_file_path(connection, filepath, args, metadata=metadata)
with open(filepath, 'r') as filesrc:
text_lines = filesrc.readlines()
yield (obj, text_lines)
elif len(args.source) == 1:
if args.source[0] == '-':
text_lines = sys.stdin.readlines()
else:
with open(args.source[0], 'r') as filesrc:
text_lines = filesrc.readlines()
yield (command.instance(connection, args.name, args, metadata=metadata), text_lines)
else:
raise InvalidCommandLineError('Source file can be a list only when Object name is -')
def printout_activation_stats(stats):
"""Prints out activation statistics"""
printout('Warnings:', stats.warnings)
printout('Errors:', stats.errors)
def printout_adt_object(prefix, obj):
"""Prints out ADT object in identifiable way"""
printout(f'{prefix}{obj.objtype.code} {obj.name}')
def activate_object_list(activator, object_enumerable, count):
"""Starts object activation and handles results"""
try:
stats = activator.activate_sequentially(object_enumerable, count)
except sap.cli.wb.StopObjectActivation as ex:
printout('Activation has stopped')
printout_activation_stats(ex.stats)
if ex.stats.active_objects:
printout('Active objects:')
for obj in ex.stats.active_objects:
printout_adt_object(' ', obj)
return 1
else:
printout('Activation has finished')
printout_activation_stats(stats)
if stats.inactive_objects:
printout('Inactive objects:')
for obj in stats.inactive_objects:
printout_adt_object(' ', obj)
return 1
return 1 if stats.errors > 0 else 0
class CommandGroupObjectTemplate(sap.cli.core.CommandGroup):
"""Template Class converting command line parameters to ADT Object methods
calls.
"""
def instance(self, connection, name, args, metadata=None):
"""Returns new instance of the ADT Object proxy class"""
raise NotImplementedError()
def instance_from_file_path(self, connection, filepath, args, metadata=None):
"""Returns new instance of the ADT Object proxy class
where the object name should be deduced from
the given file path.
"""
name, _ = object_name_from_source_file(filepath)
return self.instance(connection, name, args, metadata=metadata)
def build_new_metadata(self, connection, args):
"""Creates an instance of the ADT Object Metadata class for a new object"""
raise NotImplementedError()
def define_create(self, commands):
"""Declares the Create command with its parameters and returns
the definition.
Notice, that this command does not declare the parameter package
which should be create by descendants if necessary.
"""
create_cmd = commands.add_command(self.create_object, name='create')
create_cmd.append_argument('name')
create_cmd.append_argument('description')
create_cmd.declare_corrnr()
return create_cmd
def define_read(self, commands):
"""Declares the Read command with its parameters and returns
the definition
"""
read_cmd = commands.add_command(self.read_object_text, name='read')
read_cmd.append_argument('name')
return read_cmd
def define_write(self, commands):
"""Declares the Write command with its parameters and returns
the definition.
"""
write_cmd = commands.add_command(self.write_object_text, name='write')
write_cmd.append_argument('name',
help='an object name or - for getting it from the source file name')
write_cmd.append_argument('source', nargs='+',
help='a path or - for reading stdin; multiple allowed only when name is -')
write_cmd.append_argument('-a', '--activate', action='store_true',
default=False, help='activate after write')
write_cmd.append_argument('--ignore-errors', action='store_true',
default=False, help='Do not stop activation in case of errors')
write_cmd.append_argument('--warning-errors', action='store_true',
default=False, help='Treat Activation warnings as errors')
write_cmd.declare_corrnr()
return write_cmd
def define_activate(self, commands):
"""Declares the Activate command with its parameters and returns the
definition.
Notice that it allows multiple names on input.
"""
activate_cmd = commands.add_command(self.activate_objects, name='activate')
activate_cmd.append_argument('name', nargs='+')
activate_cmd.append_argument('--ignore-errors', action='store_true',
default=False, help='Do not stop activation in case of errors')
activate_cmd.append_argument('--warning-errors', action='store_true',
default=False, help='Treat Activation warnings as errors')
return activate_cmd
def define(self):
"""Defines the commands Create, Read, Write, and Activate and returns
the command list
"""
cls = self.__class__
if hasattr(cls, '_instance'):
return None
# pylint: disable=protected-access
cls._instance = self
commands = cls.get_commands()
self.define_create(commands)
self.define_read(commands)
self.define_write(commands)
self.define_activate(commands)
return commands
def build_new_object(self, connection, args, metadata):
"""Creates an instance of the ADT Object proxy class for a new object"""
return self.instance(connection, args.name, args, metadata=metadata)
def create_object(self, connection, args):
"""Creates the given object."""
metadata = self.build_new_metadata(connection, args)
obj = self.build_new_object(connection, args, metadata)
obj.description = args.description
obj.create(corrnr=args.corrnr)
def read_object_text(self, connection, args):
"""Retrieves the request command prints it out based on command line
configuration.
"""
obj = self.instance(connection, args.name, args)
print(obj.text)
# pylint: disable=no-self-use
def build_activator(self, args):
"""For children to customize"""
activator = sap.cli.wb.ObjectActivationWorker()
activator.continue_on_errors = args.ignore_errors
activator.warnings_as_errors = args.warning_errors
return activator
def write_object_text(self, connection, args):
"""Changes source code of the given program include"""
toactivate = collections.OrderedDict()
printout('Writing:')
for obj, text in write_args_to_objects(self, connection, args):
printout('*', str(obj))
with obj.open_editor(corrnr=args.corrnr) as editor:
editor.write(''.join(text))
toactivate[obj.name] = obj
if not args.activate:
return 0
activated_items = toactivate.items()
return activate_object_list(self.build_activator(args), activated_items, count=len(activated_items))
def activate_objects(self, connection, args):
"""Actives the given object."""
activated_items = ((name, self.instance(connection, name, args)) for name in args.name)
return activate_object_list(self.build_activator(args), activated_items, count=len(args.name))
# pylint: disable=abstract-method
class CommandGroupObjectMaster(CommandGroupObjectTemplate):
"""Commands for objects that belongs to a package.
The class CommandGroupObjectTemplate defines the command create without the
parameter packages because there are objects that belongs to a container
object (i.e. Function Module).
"""
def build_new_metadata(self, connection, args):
"""Creates an instance of the ADT Object Metadata class for a new object"""
return sap.adt.ADTCoreData(language='EN', master_language='EN',
package=args.package.upper(), responsible=connection.user.upper())
def define_create(self, commands):
"""Calls the super's define_create and inserts the parameter package
right behind the parameter description
"""
create_cmd = super().define_create(commands)
create_cmd.insert_argument(2, 'package')
return create_cmd
|
[
"jakub@thefilaks.net"
] |
jakub@thefilaks.net
|
dfa25b44c043edce1aa14686208aa2b37f404f0e
|
5a2579dfae9ccd1b9c8c0471f07a6de5eb4eac15
|
/_Turtle2/11.py
|
d5032d7e319ff7deaa1201a416094bf7c9e59d36
|
[] |
no_license
|
finchnest/Python-prac-files
|
0983d4645992f4ad26603a4f259cc563604c52e2
|
61da3c9240b3022d59ef2ed7cc90fb598045e470
|
refs/heads/master
| 2022-02-28T19:32:44.515138
| 2019-10-08T18:07:37
| 2019-10-08T18:07:37
| 203,052,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from turtle import *
def rec():
pendown()
forward(200)
right(90)
forward(200)
right(90)
forward(200)
right(90)
forward(200)
right(90)
forward(100)
right(90)
forward(200)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(200)
rec()
|
[
"noreply@github.com"
] |
finchnest.noreply@github.com
|
90a3e8ebed6129293171a632099674f3a120fbc8
|
33f0e02c56f2d49b629557e4d5bad5df848974d1
|
/agri/dynamic.sh
|
fe8778fe5aeca6ed3563349ba635367903eec17d
|
[] |
no_license
|
wuyohee2004/learn
|
b7126e1471d8dcea3f60d9af17a199d62f4c4aec
|
b6c0302c541a26f296693438841869f1b366e355
|
refs/heads/master
| 2016-09-05T17:13:50.940534
| 2014-06-22T11:08:49
| 2014-06-22T11:08:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
sh
|
#!/usr/bin/python
def dynamic(arr):
max = arr[0]
sum = arr[0]
for i in range(1,len(arr)):
print ("the %d st loop" %i)
if sum >= 0:
sum += arr[i]
else:
sum = arr[i]
if sum > max:
max = sum
return max
a=[1,-9,12,-33,1,5,6,-99,10,20,12]
print dynamic(a)
|
[
"wuyohee2004@hotmail.com"
] |
wuyohee2004@hotmail.com
|
21d409ea07621875c07d01754c51b28d26439459
|
545caa38cd01d618f4b27ea41928656a2a4071fe
|
/Linked list/Q2_7.py
|
aaeabc70548be4f6f1e1f5c891183e63714a4869
|
[] |
no_license
|
tusharkailash/CTCI
|
e4dcd6e7709a3dd3887c8efa7e52000688bd83d6
|
ed71f602a32c7f11212617168c28d20c833940a5
|
refs/heads/master
| 2020-08-22T20:21:15.822941
| 2019-10-21T03:37:38
| 2019-10-21T03:37:38
| 216,471,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
# Implement a function to check if a linked list is a palindrome.
class ListNode:
def __init__(self,x):
self.val = x
self.next = None
class Solution(object):
def palindrome(self,head):
slow = fast = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next #mid-point formed here
#reverse the second half
prev = None
while slow:
next = slow.next
slow.next = prev
prev = slow
slow = next
#check the first half with the second half
while prev:
if head.val != prev.val:
return False
prev = prev.next
head = head.next
return True
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(1)
head.next.next.next = ListNode(2)
head.next.next.next.next = ListNode(1)
print Solution().palindrome(head)
|
[
"tusharkailash29@gmail.com"
] |
tusharkailash29@gmail.com
|
971927d242b085015088addb9a51eb5f257f38f0
|
0fb0ed44dded93eab10b7ed4dfc916c3875eb3e3
|
/Musicapp/migrations/0023_alter_post_options.py
|
57df348fb12b375b8ca1e91d3a494f5d90a38c5a
|
[] |
no_license
|
Darkbeast-glitch/Realmusic
|
ceb87ed88eb70ab47979e1657d389e7dfbce988c
|
23a6b956e322f8557210a90d55171e72aa8734e4
|
refs/heads/main
| 2023-08-27T19:30:18.643534
| 2021-10-21T03:21:31
| 2021-10-21T03:21:31
| 417,349,671
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Generated by Django 3.2.8 on 2021-10-21 02:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Musicapp', '0022_auto_20211021_0226'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={},
),
]
|
[
"bbjulius900@gmail.com"
] |
bbjulius900@gmail.com
|
519493ac81fc5d1155c473041a6e5c07b171f538
|
a8ee0d102b54edb6316f50405353d53cc8a38553
|
/app.py
|
2625086947a20c7221b803db8673f8a12c29108e
|
[] |
no_license
|
ved08/Github-API
|
f1e1a6606ddf3040e71a3f6d980d161622e2f44a
|
b06c4525c6df65f925be7bfc6bd6b68023902f0b
|
refs/heads/master
| 2022-12-04T05:25:10.365899
| 2020-08-26T19:25:00
| 2020-08-26T19:25:00
| 290,744,160
| 1
| 0
| null | 2020-08-27T10:19:23
| 2020-08-27T10:19:22
| null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
from flask import Flask
from flask import jsonify
from flask_cors import CORS
import requests
from bs4 import BeautifulSoup
import html5lib
app=Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
@app.route("/")
def hello():
return "hello world"
@app.route("/repo/<id>")
def hh(id):
urll="https://github.com/"+id+"?tab=repositories"
r=requests.get(urll)
soup=BeautifulSoup(r.content,'html5lib')
a=soup.find(id="user-repositories-list")
z=a.find_all('li')
yo={}
er=[]
for i in range(0,len(z)):
s=z[i]
try:
y=s.find('a')
x=y['href']
try:
ur=[]
sds=s.find('a',class_="muted-link mr-3").getText()
j=s.find('p').getText()
name=y.getText()
url="https://github.com"+x
k={'name':name.strip(),'description':j.strip(),'url':url,"stars":sds.strip()}
er=er+[k]
except:
ur=[]
h=y.getText()
y="https://github.com"+x
k={'name': h.strip(),'url':y,"stars":sds.strip()}
er=er+[k]
except:
print("none")
return jsonify(er)
@app.route('/user/<id>')
def st(id):
urll="https://github.com/"+id
r=requests.get(urll)
soup=BeautifulSoup(r.content,'html5lib')
asd=soup.find(class_="flex-order-1 flex-md-order-none mt-2 mt-md-0")
dd=asd.find(class_="mb-3")
followers=dd.find_all('a')[0].find('span').getText().strip(' ')
following=dd.find_all('a')[1].find('span').getText().strip(' ')
totstars=dd.find_all('a')[2].find('span').getText().strip(' ')
las={'Followers':followers,'Following':following,'Total_stars':totstars}
return(las)
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
ved08.noreply@github.com
|
155abd7d13912aa0987c80a0c964ad7d4fc7990e
|
09b22d1bd1263e4082e6bba7afa2f2b7a66afd4a
|
/FaceDetection/Smile Detector.py
|
5dfcb45bf4e354761a24d3842578b36decee18d7
|
[] |
no_license
|
yogeshkushwahait/Machine-Learning-Using-Python
|
b70bc5334c4178fecc175451b8b7e04e50a60917
|
8102ce7b0cba5d48e923f979ae0a8e71c25857b1
|
refs/heads/master
| 2022-03-28T05:21:24.332537
| 2019-11-05T06:34:00
| 2020-01-09T16:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
# coding: utf-8
# In[2]:
import cv2
# In[3]:
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml');
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml');
# In[4]:
def detect(gray,frame):
faces = face_cascade.detectMultiScale(gray, 1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = frame[y:y+h,x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1,22)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
smiles = smile_cascade.detectMultiScale(roi_gray, 1.7,22)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(0,0,255),2)
return frame
# In[5]:
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
[
"rishav.jnit@gmail.com"
] |
rishav.jnit@gmail.com
|
ae037578c4aae2b761816ff3ddd5b625e2564255
|
9375d105fd18d5d7af31b7350b56d82894fabb17
|
/03_Lists_Tuples/01_lists.py
|
4cb9046835ab6fcf2957d5cc75b3124818aa2459
|
[] |
no_license
|
shas-hank7/python
|
597ebcd31e8855b88e7c8cfb1f15b709dbb73b4f
|
9ef2a80f087936eccb36ec51a3758be981dc80f4
|
refs/heads/master
| 2023-08-02T11:29:16.172087
| 2021-09-14T19:54:08
| 2021-09-14T19:54:08
| 406,503,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
a=[1,2,3,4, 'Hello']
print(a)
#List Slicing
""" friends=["Shashank", "Binod", "Parmod", "Vinod", 45, "Raje"]
print(friends[-5:-1])
print(friends[-5:])
print(friends[-5::2]) """
a=[3,4,2,1]
#a.sort()
print(a.sort())
|
[
"shashank770@gmail.com"
] |
shashank770@gmail.com
|
cc08681f4493d7f771380a720f36835b308b17ae
|
a48fbf8dd09b6f5f3c879495905a04187758ac19
|
/TrafficSignDetection/PreprocessCode/mser_red.py
|
51767f9a65789a939e6fb71e8cc184e585adcf58
|
[] |
no_license
|
davidwang318/ComputerVision-Classical
|
b2c751678cc472677384af0d0ba4cc44bef31ef8
|
6763dc02ee872d44c99e5273b6cc64c90dfa7824
|
refs/heads/master
| 2021-07-07T19:17:22.376654
| 2021-04-21T02:23:04
| 2021-04-21T02:23:04
| 238,592,340
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
import cv2
import numpy as np
import os
index = 2600
while True:
# Input image
#img_b = cv2.imread("frame_normalize_blue_0/frame%d.jpg" %index, -1)
img_r = cv2.imread("frame_normalize_red_0/frame%d.jpg" %index, -1)
img_rgb = cv2.imread("Loose_Red_result/%d.jpg" %index)
img_tem = cv2.imread("frame_denoise/frame%d.jpg" %index)
images = [img_b]
mask = np.zeros(img_b.shape).astype(np.uint8)
for img in images:
# Region proposals based on MSER feature
mser = cv2.MSER_create(_min_area=300, _max_area=10000, _max_variation=0.5, _min_diversity=0.2, _delta=2)
regions = np.array(mser.detectRegions(img)[0])
hulls = [cv2.convexHull(p) for p in regions]
for p in hulls:
epsilon = 0.008 * cv2.arcLength(p, True)
approx = cv2.approxPolyDP(p, epsilon, True)
x, y, w, h = cv2.boundingRect(p)
if 0.6 <= w/h <= 1.4 and 200 < w*h < 12000 and y < 700:
cv2.rectangle(mask, (x, y), (x+w,y+h), 1, -1)
img_rgb = cv2.bitwise_and(img_rgb, img_rgb, mask=mask)
cv2.imwrite("blue_hsv_mser/frame%d.jpg" %index, img_rgb)
cv2.imshow('img', img_rgb)
cv2.imshow('template', img_tem)
index += 1
cv2.waitKey(1)
cv2.destroyAllWindows()
|
[
"davidwang106@gmail.com"
] |
davidwang106@gmail.com
|
aad1a694a84413a0ae76f87d45095993d9197327
|
e28ea03cf2a1082c57d49cc0da97b2115a8af823
|
/cmsplugin_wordpress_api/cms_plugins.py
|
0e912853fa81407ad0aa06004385fa0789768d19
|
[
"MIT"
] |
permissive
|
ImaginaryLandscape/django-wordpress-api
|
a5c0ecfacef2ea999b439012d7b9f84192038d11
|
1dd205d095b6f637779cc437ffa8b6004e40075a
|
refs/heads/master
| 2023-04-24T14:43:03.089617
| 2021-02-02T19:27:52
| 2021-02-02T19:27:52
| 269,103,150
| 1
| 0
|
MIT
| 2020-06-03T14:00:15
| 2020-06-03T14:00:14
| null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django import forms
from .models import LatestBlogs, BlogsByAuthor, BlogsByCategory, BlogsByTag
from .utils import get_authors, get_categories, get_tags
class AuthorForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AuthorForm, self).__init__(*args, **kwargs)
self.fields['authors'] = forms.ChoiceField(choices=get_authors())
class CategoryForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.fields['categories'] = forms.ChoiceField(choices=get_categories())
class TagForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TagForm, self).__init__(*args, **kwargs)
self.fields['tags'] = forms.ChoiceField(choices=get_tags())
@plugin_pool.register_plugin
class LatestBlogsPlugin(CMSPluginBase):
module = "Wordpress Blogs"
model = LatestBlogs
render_template = "cmsplugin_wordpress_api/latest_blogs.html"
@plugin_pool.register_plugin
class BlogsByAuthorPlugin(CMSPluginBase):
form = AuthorForm
module = "Wordpress Blogs"
model = BlogsByAuthor
render_template = "cmsplugin_wordpress_api/blogs_by_author.html"
@plugin_pool.register_plugin
class BlogsByCategoryPlugin(CMSPluginBase):
form = CategoryForm
module = "Wordpress Blogs"
model = BlogsByCategory
render_template = "cmsplugin_wordpress_api/blogs_by_category.html"
@plugin_pool.register_plugin
class BlogsByTagPlugin(CMSPluginBase):
form = TagForm
module = "Wordpress Blogs"
model = BlogsByTag
render_template = "cmsplugin_wordpress_api/blogs_by_tag.html"
|
[
"dbertrand@imagescape.com"
] |
dbertrand@imagescape.com
|
ab7b1c1bf14852dceacb9b0d162e57497089bcec
|
d828cbdacfce323f7014e232cff8961ae4219918
|
/apps/api/migrations/0031_auto_20210811_1621.py
|
c061d671a2054a0c6c00818ba8fd002cd74b2a0a
|
[] |
no_license
|
MK-Achmerouanine/ML_Perception
|
a4e1347970f0c2d920cf96c80aa252d415d3e16d
|
c6b08e6f7a939b3a80913a033bbc007809ffacfc
|
refs/heads/main
| 2023-07-15T02:28:26.520143
| 2021-08-29T00:09:34
| 2021-08-29T00:09:34
| 394,370,442
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# Generated by Django 3.2.6 on 2021-08-11 16:21
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0030_auto_20210811_1614'),
]
operations = [
migrations.AlterField(
model_name='endpoint',
name='slug',
field=models.SlugField(blank=True, default=uuid.UUID('287ef094-8fba-4221-b865-36b8ef49eb65'), null=True, verbose_name='Slug'),
),
migrations.AlterField(
model_name='imagetotranslate',
name='slug',
field=models.SlugField(default=uuid.UUID('748ef6c8-9c32-455b-963d-7f35777accc7'), verbose_name='Slug'),
),
migrations.AlterField(
model_name='texttoaudio',
name='slug',
field=models.SlugField(default=uuid.UUID('56abffb0-2777-4bf3-b0c2-ea4f3e758477'), verbose_name='Slug'),
),
]
|
[
"baouhamismail98@gmail.com"
] |
baouhamismail98@gmail.com
|
ff20d14ea17953aab8f491c608bffae140cdf61b
|
2b5d21e22604aaefe2d03efc79d55983800416a7
|
/Programming/Practice/03/Python/arithmetic_for_different_types.py
|
e57cb25dccdfd9fe1af481a966df7facce0920a0
|
[] |
no_license
|
VladSmiyan/prt
|
bdc6fa03b60c540f60b6f3f6d9eb08ec00020fb5
|
d7403bcaea6236f7e67ef3deb8d1da21ed2b0222
|
refs/heads/main
| 2023-01-02T12:27:49.413441
| 2020-10-30T19:11:54
| 2020-10-30T19:11:54
| 308,720,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
#!/usr/bin/python3
# Задание 3 - Арифметика для разных типов
# Оператор (//) Python соответствует оператору (/) C++
# (в C++ мы используем int'ы)
print('3. Арифметика для разных типов: ')
a, b = map(int, input('Введите a и b: ').split(' '))
print((a + b), (a - b), (a * b), (a / b))
|
[
"noreply@github.com"
] |
VladSmiyan.noreply@github.com
|
4402400f9646593187e43c7982a4e61d0d01b033
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/pandas/tags/V00-00-02/SConscript
|
b42efd570012fd5fb90cb1ffb13d916266eb5ce8
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
#------------------------------------------------------------------------
# File and Version Information:
# $Id: SConscript 4651 2012-10-26 16:55:30Z salnikov@SLAC.STANFORD.EDU $
#
# Description:
# SConscript file for package pandas
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
pkg = "pandas"
pkg_ver = "0.13.1b"
PREFIX = pjoin('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = pjoin('$PYTHON_LIBDIRNAME', '$PYTHON', "site-packages")
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage(pkg, **locals())
|
[
"gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
gapon@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
7242a46a79f26f1224fb7ddb1288eee1b2dcbd7a
|
7f24ccbb1f054f9caa5c5ddb7ff486622030df3e
|
/configs/iou-aware_rcnn_final/iou_aware_rcnn_r50_baseline2_2.py
|
9628e160b4739e9c28d0ab06dc624ff173dc176f
|
[
"Apache-2.0"
] |
permissive
|
shouwangzhe134/IoU-Aware-R-CNN
|
674808e158ae85bf2b621002cfea7827104e94ce
|
262f038ff401da53511f60c29bd4c2957befbb0c
|
refs/heads/main
| 2023-07-31T07:10:53.510129
| 2021-09-20T03:00:40
| 2021-09-20T03:00:40
| 408,291,765
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,065
|
py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
work_dir='./work_dirs/iou-aware_rcnn_final/'
# model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='IoUAwareRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
dict(
type='IoUAwareBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
reg_class_agnostic=True,
loss_iou=dict(type='L1Loss', loss_weight=1.0))
]))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False
),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,# the initial proposals without gt bboxes
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='IoUSampler',
num=32),
pos_weight=-1,
debug=False
),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,# the regressed proposals from the initial proposals
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='IoUSampler',
num=32),
pos_weight=-1,
debug=False
)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
#score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
#ms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.001),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
)
|
[
"2014210059@tju.edu.cn"
] |
2014210059@tju.edu.cn
|
8eafbee307efaea9b8d218e26c0dfc888e0b2028
|
418c3f3ca224cad9f7c538ab6f01c7675d8885bf
|
/our_normals/source/points_to_surf_model.py
|
cd4ac12aff7da4b83ee72401eaf6cb4e6baeff1e
|
[
"MIT"
] |
permissive
|
joycewangsy/pcpnet_normal
|
9df0d1aa4f6911d8c006afd4c5cc3efcc54d4a78
|
b4c8b5b5bf52481b49cf1c3cf14f7c84eb04d0fa
|
refs/heads/main
| 2023-01-29T06:26:46.510436
| 2020-12-02T05:46:24
| 2020-12-02T05:46:24
| 317,741,889
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,527
|
py
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
import source.network as sn
from source.base import utils
input_dims_per_point = 3
class STN(nn.Module):
def __init__(self, net_size_max=1024, num_scales=1, num_points=500, dim=3, sym_op='max'):
super(STN, self).__init__()
self.net_size_max = net_size_max
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(self.dim, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, self.net_size_max, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(self.net_size_max, int(self.net_size_max / 2))
self.fc2 = nn.Linear(int(self.net_size_max / 2), int(self.net_size_max / 4))
self.fc3 = nn.Linear(int(self.net_size_max / 4), self.dim*self.dim)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(self.net_size_max)
self.bn4 = nn.BatchNorm1d(int(self.net_size_max / 2))
self.bn5 = nn.BatchNorm1d(int(self.net_size_max / 4))
if self.num_scales > 1:
self.fc0 = nn.Linear(self.net_size_max * self.num_scales, self.net_size_max)
self.bn0 = nn.BatchNorm1d(self.net_size_max)
def forward(self, x):
batch_size = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
x_scales = x.new_empty(x.size(0), self.net_size_max * self.num_scales, 1)
for s in range(self.num_scales):
x_scales[:, s*self.net_size_max:(s+1)*self.net_size_max, :] = \
self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, self.net_size_max*self.num_scales)
if self.num_scales > 1:
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = torch.eye(self.dim, dtype=x.dtype, device=x.device).view(1, self.dim*self.dim).repeat(batch_size, 1)
x = x + iden
x = x.view(-1, self.dim, self.dim)
return x
class QSTN(nn.Module):
def __init__(self, net_size_max=1024, num_scales=1, num_points=500, dim=3, sym_op='max'):
super(QSTN, self).__init__()
self.net_size_max = net_size_max
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(self.dim, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, self.net_size_max, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(self.net_size_max, int(self.net_size_max / 2))
self.fc2 = nn.Linear(int(self.net_size_max / 2), int(self.net_size_max / 4))
self.fc3 = nn.Linear(int(self.net_size_max / 4), 4)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(self.net_size_max)
self.bn4 = nn.BatchNorm1d(int(self.net_size_max / 2))
self.bn5 = nn.BatchNorm1d(int(self.net_size_max / 4))
if self.num_scales > 1:
self.fc0 = nn.Linear(self.net_size_max*self.num_scales, self.net_size_max)
self.bn0 = nn.BatchNorm1d(self.net_size_max)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
x_scales = x.new_empty(x.size(0), self.net_size_max*self.num_scales, 1)
for s in range(self.num_scales):
x_scales[:, s*self.net_size_max:(s+1)*self.net_size_max, :] = \
self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, self.net_size_max*self.num_scales)
if self.num_scales > 1:
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
# add identity quaternion (so the network can output 0 to leave the point cloud identical)
iden = x.new_tensor([1, 0, 0, 0])
x_quat = x + iden
# convert quaternion to rotation matrix
x = utils.batch_quat_to_rotmat(x_quat)
return x, x_quat
class PointNetfeat(nn.Module):
def __init__(self, net_size_max=1024, num_scales=1, num_points=500, use_point_stn=True, use_feat_stn=True,
output_size=100, sym_op='max'):
super(PointNetfeat, self).__init__()
self.net_size_max = net_size_max
self.num_points = num_points
self.num_scales = num_scales
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.sym_op = sym_op
self.output_size = output_size
if self.use_point_stn:
self.stn1 = QSTN(net_size_max=net_size_max, num_scales=self.num_scales,
num_points=num_points, dim=3, sym_op=self.sym_op)
if self.use_feat_stn:
self.stn2 = STN(net_size_max=net_size_max, num_scales=self.num_scales,
num_points=num_points, dim=64, sym_op=self.sym_op)
self.conv0a = torch.nn.Conv1d(input_dims_per_point, 64, 1)
self.conv0b = torch.nn.Conv1d(64, 64, 1)
self.bn0a = nn.BatchNorm1d(64)
self.bn0b = nn.BatchNorm1d(64)
self.conv1 = torch.nn.Conv1d(64, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, output_size, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(output_size)
if self.num_scales > 1:
self.conv4 = torch.nn.Conv1d(output_size, output_size*self.num_scales, 1)
self.bn4 = nn.BatchNorm1d(output_size*self.num_scales)
if self.sym_op == 'max':
self.mp1 = torch.nn.MaxPool1d(num_points)
elif self.sym_op == 'sum':
self.mp1 = None
else:
raise ValueError('Unsupported symmetric operation: %s' % self.sym_op)
def forward(self, x):
# input transform
if self.use_point_stn:
trans, trans_quat = self.stn1(x[:, :3, :]) # transform only point data
# an error here can mean that your input size is wrong (e.g. added normals in the point cloud files)
x_transformed = torch.bmm(trans, x[:, :3, :]) # transform only point data
x = torch.cat((x_transformed, x[:, 3:, :]), dim=1)
else:
trans = None
trans_quat = None
# mlp (64,64)
x = F.relu(self.bn0a(self.conv0a(x)))
x = F.relu(self.bn0b(self.conv0b(x)))
# feature transform
if self.use_feat_stn:
trans2 = self.stn2(x)
x = torch.bmm(trans2, x)
else:
trans2 = None
# mlp (64,128,output_size)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
# mlp (output_size,output_size*num_scales)
if self.num_scales > 1:
x = self.bn4(self.conv4(F.relu(x)))
# symmetric max operation over all points
if self.num_scales == 1:
if self.sym_op == 'max':
x = self.mp1(x)
elif self.sym_op == 'sum':
x = torch.sum(x, 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % self.sym_op)
else:
x_scales = x.new_empty(x.size(0), self.output_size*self.num_scales**2, 1)
if self.sym_op == 'max':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*self.output_size:(s+1)*self.num_scales*self.output_size, :] = \
self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
elif self.sym_op == 'sum':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*self.output_size:(s+1)*self.num_scales*self.output_size, :] = \
torch.sum(x[:, :, s*self.num_points:(s+1)*self.num_points], 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % self.sym_op)
x = x_scales
x = x.view(-1, self.output_size * self.num_scales ** 2)
return x, trans, trans_quat, trans2
class PointSIFTfeat(nn.Module):
def __init__(self,net_size_max=1024, num_scales=1, num_points=500, use_point_stn=True, use_feat_stn=True,
output_size=100, sym_op='max'):
super(PointSIFTfeat, self).__init__()
self.net_size_max = net_size_max
self.num_points = num_points
self.num_scales = num_scales
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.sym_op = sym_op
self.output_size = output_size
if self.use_point_stn:
self.stn1 = QSTN(net_size_max=net_size_max, num_scales=self.num_scales,
num_points=num_points, dim=3, sym_op=self.sym_op)
self.pointsift_res_m1 = sn.PointSIFT_res_module(radius=0.1, output_channel=64, merge='concat')
self.pointnet_sa_m1 = sn.Pointnet_SA_module(npoint=1024, radius=0.1, nsample=32, in_channel=64, mlp=[64, 128],
group_all=False)
self.pointsift_res_m2 = sn.PointSIFT_res_module(radius=0.25, output_channel=128, extra_input_channel=128)
self.pointnet_sa_m2 = sn.Pointnet_SA_module(npoint=256, radius=0.2, nsample=32, in_channel=128, mlp=[128, 256],
group_all=False)
self.pointsift_res_m3_1 = sn.PointSIFT_res_module(radius=0.5, output_channel=256, extra_input_channel=256)
self.pointsift_res_m3_2 = sn.PointSIFT_res_module(radius=0.5, output_channel=512, extra_input_channel=256,
same_dim=True)
self.pointnet_sa_m3 = sn.Pointnet_SA_module(npoint=64, radius=0.4, nsample=32, in_channel=512, mlp=[512, self.output_size],
group_all=True)
self.convt = nn.Sequential(
nn.Conv1d(512 + 256, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU()
)
def forward(self, xyz, points=None):
"""
Input:
xyz: is the raw point cloud(B * N * 3)
Return:
"""
B = xyz.size()[0]
if self.use_point_stn:
trans, trans_quat = self.stn1(xyz[:, :3, :]) # transform only point data
# an error here can mean that your input size is wrong (e.g. added normals in the point cloud files)
x_transformed = torch.bmm(trans, xyz[:, :3, :]) # transform only point data
xyz = torch.cat((x_transformed, xyz[:, 3:, :]), dim=1)
else:
trans = None
trans_quat = None
# ---- c1 ---- #
l1_xyz, l1_points = self.pointsift_res_m1(xyz.transpose(2,1), points)
# print('l1_xyz:{}, l1_points:{}'.format(l1_xyz.shape, l1_points.shape))
c1_xyz, c1_points = self.pointnet_sa_m1(l1_xyz, l1_points)
# print('c1_xyz:{}, c1_points:{}'.format(c1_xyz.shape, c1_points.shape))
# ---- c2 ---- #
l2_xyz, l2_points = self.pointsift_res_m2(c1_xyz, c1_points)
# print('l2_xyz:{}, l2_points:{}'.format(l2_xyz.shape, l2_points.shape))
c2_xyz, c2_points = self.pointnet_sa_m2(l2_xyz, l2_points)
# print('c2_xyz:{}, c2_points:{}'.format(c2_xyz.shape, c2_points.shape))
# ---- c3 ---- #
l3_1_xyz, l3_1_points = self.pointsift_res_m3_1(c2_xyz, c2_points)
# print('l3_1_xyz:{}, l3_1_points:{}'.format(l3_1_xyz.shape, l3_1_points.shape))
l3_2_xyz, l3_2_points = self.pointsift_res_m3_2(l3_1_xyz, l3_1_points)
# print('l3_2_xyz:{}, l3_2_points:{}'.format(l3_2_xyz.shape, l3_2_points.shape))
l3_points = torch.cat([l3_1_points, l3_2_points], dim=-1)
l3_points = l3_points.permute(0, 2, 1).contiguous()
l3_points = self.convt(l3_points)
l3_points = l3_points.permute(0, 2, 1).contiguous()
l3_xyz = l3_2_xyz
# print('l3_xyz:{}, l3_points:{}'.format(l3_xyz.shape, l3_points.shape))
# ---- c4 ---- #
c4_xyz, c4_points = self.pointnet_sa_m3(l3_xyz, l3_points)
# print(c4_points.shape)
trans2=None
x = c4_points.view(B, 1024)
return x, trans, trans_quat, trans2
class PointsToSurfModel(nn.Module): # basing on PointNetDenseCls
def __init__(self, net_size_max=1024, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True,
sym_op='max', use_query_point=False,
sub_sample_size=500, do_augmentation=True, single_transformer=False, shared_transformation=False):
super(PointsToSurfModel, self).__init__()
self.net_size_max = net_size_max
self.num_points = num_points
self.use_query_point = use_query_point
self.use_point_stn = use_point_stn
self.sub_sample_size = sub_sample_size
self.num_query_points = int(self.use_query_point)
self.do_augmentation = do_augmentation
self.single_transformer = bool(single_transformer)
self.shared_transformation = shared_transformation
if self.single_transformer:
self.feat_local_global = PointNetfeat(
net_size_max=net_size_max,
num_points=self.num_points + self.sub_sample_size,
num_scales=1,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.fc1_local_global = nn.Linear(int(self.net_size_max), int(self.net_size_max))
self.bn1_local_global = nn.BatchNorm1d(int(self.net_size_max))
else:
if self.use_point_stn and self.shared_transformation:
self.point_stn = QSTN(net_size_max=net_size_max, num_scales=1,
num_points=self.num_points+self.sub_sample_size, dim=3, sym_op=sym_op)
self.feat_local = PointNetfeat(
net_size_max=net_size_max,
num_points=self.num_points,
num_scales=1,
use_point_stn=False,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.feat_global = PointNetfeat(
net_size_max=net_size_max,
num_points=self.sub_sample_size,
num_scales=1,
use_point_stn=use_point_stn and not self.shared_transformation,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.fc1_local = nn.Linear(int(self.net_size_max), int(self.net_size_max / 2))
self.fc1_global = nn.Linear(int(self.net_size_max), int(self.net_size_max / 2))
self.bn1_local = nn.BatchNorm1d(int(self.net_size_max / 2))
self.bn1_global = nn.BatchNorm1d(int(self.net_size_max / 2))
self.fc2 = nn.Linear(int(self.net_size_max / 2) * 2, int(self.net_size_max / 4))
self.fc3 = nn.Linear(int(self.net_size_max / 4), int(self.net_size_max / 8))
self.fc4 = nn.Linear(int(self.net_size_max / 8), output_dim)
self.bn2 = nn.BatchNorm1d(int(self.net_size_max / 4))
self.bn3 = nn.BatchNorm1d(int(self.net_size_max / 8))
def forward(self, x):
patch_features = x['patch_pts_ps'].transpose(1, 2)
shape_features = x['pts_sub_sample_ms'].transpose(1, 2)
shape_query_point = x['imp_surf_query_point_ms'].unsqueeze(2)
# move global points to query point so that both local and global information are centered at the query point
shape_features -= shape_query_point.expand(shape_features.shape)
# # debug output for a single patch with its sub-sample
# if True:
# from source import utils_eval
# out_file = 'debug/train_sample.ply'
# evaluation.visualize_patch(
# patch_pts_ps=patch_features[0].detach().cpu().numpy().transpose(),
# patch_pts_ms=None,
# #query_point_ps=patch_query_point[0].detach().cpu().numpy().transpose().squeeze(),
# query_point_ps=torch.zeros(3),
# pts_sub_sample_ms=shape_features[0].detach().cpu().numpy().transpose(),
# #query_point_ms=shape_query_point[0].detach().cpu().numpy().transpose().squeeze(),
# query_point_ms=torch.zeros(3),
# file_path=out_file)
# print('wrote training sample to {}'.format(out_file))
if self.single_transformer:
local_global_features = torch.cat((patch_features, shape_features), dim=2)
local_global_features_transformed, _, _, _ = self.feat_local_global(local_global_features)
patch_features = F.relu(self.bn1_local_global(self.fc1_local_global(local_global_features_transformed)))
else:
if self.use_point_stn and self.shared_transformation:
feats = torch.cat((patch_features, shape_features), dim=2)
trans, trans_quat = self.point_stn(feats[:, :3, :])
shape_features_transformed = torch.bmm(trans, shape_features[:, :3, :])
patch_features_transformed = torch.bmm(trans, patch_features[:, :3, :])
shape_features = torch.cat([shape_features_transformed, shape_features[:, 3:, :]], dim=1)
patch_features = torch.cat([patch_features_transformed, patch_features[:, 3:, :]], dim=1)
shape_features, trans_global_pts, _, _ = \
self.feat_global(shape_features)
shape_features = F.relu(self.bn1_global(self.fc1_global(shape_features)))
if self.use_point_stn and not self.shared_transformation:
# rotate patch-space points like the subsample
patch_features = torch.bmm(trans_global_pts, patch_features)
patch_features, _, _, _ = \
self.feat_local(patch_features)
patch_features = F.relu(self.bn1_local(self.fc1_local(patch_features)))
# rotate query points like the patch
patch_features = torch.cat((patch_features, shape_features), dim=1)
patch_features = F.relu(self.bn2(self.fc2(patch_features)))
patch_features = F.relu(self.bn3(self.fc3(patch_features)))
patch_features = self.fc4(patch_features)
return patch_features,trans,trans_global_pts
class siftPointsToSurfModel(nn.Module): # basing on PointNetDenseCls
def __init__(self, net_size_max=1024, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True,
sym_op='max', use_query_point=False,
sub_sample_size=500, do_augmentation=True, single_transformer=False, shared_transformation=False):
super(siftPointsToSurfModel, self).__init__()
self.net_size_max = net_size_max
self.num_points = num_points
self.use_query_point = use_query_point
self.use_point_stn = use_point_stn
self.sub_sample_size = sub_sample_size
self.num_query_points = int(self.use_query_point)
self.do_augmentation = do_augmentation
self.single_transformer = bool(single_transformer)
self.shared_transformation = shared_transformation
if self.single_transformer:
self.feat_local_global = PointNetfeat(
net_size_max=net_size_max,
num_points=self.num_points + self.sub_sample_size,
num_scales=1,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.fc1_local_global = nn.Linear(int(self.net_size_max), int(self.net_size_max))
self.bn1_local_global = nn.BatchNorm1d(int(self.net_size_max))
else:
if self.use_point_stn and self.shared_transformation:
self.point_stn = QSTN(net_size_max=net_size_max, num_scales=1,
num_points=self.num_points+self.sub_sample_size, dim=3, sym_op=sym_op)
self.feat_local = PointNetfeat(
net_size_max=net_size_max,
num_points=self.num_points,
num_scales=1,
use_point_stn=False,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.feat_global = PointSIFTfeat(
net_size_max=net_size_max,
num_points=self.sub_sample_size,
num_scales=1,
use_point_stn=use_point_stn and not self.shared_transformation,
use_feat_stn=use_feat_stn,
output_size=self.net_size_max,
sym_op=sym_op)
self.fc1_local = nn.Linear(int(self.net_size_max), int(self.net_size_max / 2))
self.fc1_global = nn.Linear(int(self.net_size_max), int(self.net_size_max / 2))
self.bn1_local = nn.BatchNorm1d(int(self.net_size_max / 2))
self.bn1_global = nn.BatchNorm1d(int(self.net_size_max / 2))
self.fc2 = nn.Linear(int(self.net_size_max / 2) * 2, int(self.net_size_max / 4))
self.fc3 = nn.Linear(int(self.net_size_max / 4), int(self.net_size_max / 8))
self.fc4 = nn.Linear(int(self.net_size_max / 8), output_dim)
self.bn2 = nn.BatchNorm1d(int(self.net_size_max / 4))
self.bn3 = nn.BatchNorm1d(int(self.net_size_max / 8))
def forward(self, x):
patch_features = x['patch_pts_ps'].transpose(1, 2)
shape_features = x['pts_sub_sample_ms'].transpose(1, 2)
shape_query_point = x['imp_surf_query_point_ms'].unsqueeze(2)
# move global points to query point so that both local and global information are centered at the query point
shape_features -= shape_query_point.expand(shape_features.shape)
# # debug output for a single patch with its sub-sample
# if True:
# from source import utils_eval
# out_file = 'debug/train_sample.ply'
# evaluation.visualize_patch(
# patch_pts_ps=patch_features[0].detach().cpu().numpy().transpose(),
# patch_pts_ms=None,
# #query_point_ps=patch_query_point[0].detach().cpu().numpy().transpose().squeeze(),
# query_point_ps=torch.zeros(3),
# pts_sub_sample_ms=shape_features[0].detach().cpu().numpy().transpose(),
# #query_point_ms=shape_query_point[0].detach().cpu().numpy().transpose().squeeze(),
# query_point_ms=torch.zeros(3),
# file_path=out_file)
# print('wrote training sample to {}'.format(out_file))
if self.single_transformer:
local_global_features = torch.cat((patch_features, shape_features), dim=2)
local_global_features_transformed, _, _, _ = self.feat_local_global(local_global_features)
patch_features = F.relu(self.bn1_local_global(self.fc1_local_global(local_global_features_transformed)))
else:
if self.use_point_stn and self.shared_transformation:
feats = torch.cat((patch_features, shape_features), dim=2)
trans, trans_quat = self.point_stn(feats[:, :3, :])
shape_features_transformed = torch.bmm(trans, shape_features[:, :3, :])
patch_features_transformed = torch.bmm(trans, patch_features[:, :3, :])
shape_features = torch.cat([shape_features_transformed, shape_features[:, 3:, :]], dim=1)
patch_features = torch.cat([patch_features_transformed, patch_features[:, 3:, :]], dim=1)
shape_features, trans_global_pts, _, _ = \
self.feat_global(shape_features)
shape_features = F.relu(self.bn1_global(self.fc1_global(shape_features)))
if self.use_point_stn and not self.shared_transformation:
# rotate patch-space points like the subsample
patch_features = torch.bmm(trans_global_pts, patch_features)
patch_features, _, _, _ = \
self.feat_local(patch_features)
patch_features = F.relu(self.bn1_local(self.fc1_local(patch_features)))
# rotate query points like the patch
patch_features = torch.cat((patch_features, shape_features), dim=1)
patch_features = F.relu(self.bn2(self.fc2(patch_features)))
patch_features = F.relu(self.bn3(self.fc3(patch_features)))
patch_features = self.fc4(patch_features)
return patch_features,trans,trans_global_pts
|
[
"923751881@qq.com"
] |
923751881@qq.com
|
7db428084f3310782f3c761b98d335ad28b9dfd0
|
bd2622778606ba60d863ad9695724dcbf52eb247
|
/Generator/MethodGenerator/__init__.py
|
7fce20c4976e2a5f0445612790a503aa47abd4cd
|
[] |
no_license
|
Danielhhs/UITestingCommandGenerator
|
7bebbbbdafe7578586df5c30cd2db03bb4bf415e
|
a106f47fdf89f9a00a6765237906349db11a65c7
|
refs/heads/master
| 2021-01-16T21:41:06.489612
| 2016-05-30T08:11:30
| 2016-05-30T08:11:30
| 59,992,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
__author__ = 'huanghongsen'
|
[
"hhuang@microstrategy.com"
] |
hhuang@microstrategy.com
|
6ab209a70725243dab54bf6c366a0983bfb3f00d
|
826ea45107bde5d09b4d4e53b5de5d499990c5a2
|
/myproject/venv/bin/wheel
|
67a2bbb1d1c12be05a98613692de3809f7dc9b97
|
[] |
no_license
|
xiaoqian123/wechat_lawinfo_back
|
26010aa781c043cb87f1ffc5503a8769e7c07bba
|
f15ceab89e28a0b3b7a933ebcec62a31801da2a8
|
refs/heads/master
| 2020-03-24T20:58:07.215532
| 2018-07-31T11:52:28
| 2018-07-31T11:52:28
| 143,006,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
#!/tmp/myproject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"luhongxin@weinventing.com"
] |
luhongxin@weinventing.com
|
|
007f483db4f55e9196bfd7c756d6ec6a1d47b18a
|
977a0fe8964570de1f89671bbc7021b5b89c4a79
|
/ml_models/outlier_detect/__init__.py
|
768ea86efa3a4a5a13992e7a8f7e0517304e5c39
|
[] |
no_license
|
SatanAngelos/ML
|
d46ed6a7b191ea091da29b035e81434affbb379c
|
61be5dc5e4214a6ecebed0b814832885a1f26697
|
refs/heads/main
| 2023-08-28T19:32:13.956085
| 2021-10-22T01:22:07
| 2021-10-22T01:22:07
| 419,658,400
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from .hbos import *
from .iforest import *
from .knn import *
from .phbos import *
from .lof import *
|
[
"xuxl@t.edu.vn"
] |
xuxl@t.edu.vn
|
69053ce7572bab092815a048e7c63b0f8cd43adf
|
b52e5ed4c912e9d68ad033812a88158ec1c30ca7
|
/charts/samples/updating_chart_properties.py
|
cf4d5f32986114c4c57e87387512251949cd059f
|
[] |
no_license
|
catocodex/charts
|
edb4a0ab2edd0be81c33fa683820e310575ee3cc
|
b8c47c56c5715dbe4bf95de075065ce605f7a009
|
refs/heads/master
| 2022-12-15T11:20:37.133725
| 2020-09-15T04:40:38
| 2020-09-15T04:40:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
from django.shortcuts import render
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from charts.fusioncharts import FusionCharts
from django.views.decorators.clickjacking import xframe_options_exempt
@xframe_options_exempt
# Loading Data from a Static JSON String
# It is a example to show a Column 2D chart where data is passed as JSON string format.
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
# Create an object for the column2d chart using the FusionCharts class constructor
column2d = FusionCharts("column2d", "ex1", 600, 400, "chart-1", "json",
# The chart data is passed as a string to the `dataSource` parameter.
"""{
"chart":
{
"caption": "Countries With Most Oil Reserves [2017-18]",
"subcaption": "In MMbbl = One Million barrels",
"xaxisname": "Country",
"yaxisname": "Reserves (MMbbl)",
"numbersuffix": "K",
"theme": "fusion"
},
"data": [{
"label": "Venezuela",
"value": "290"
}, {
"label": "Saudi",
"value": "260"
}, {
"label": "Canada",
"value": "180"
}, {
"label": "Iran",
"value": "140"
}, {
"label": "Russia",
"value": "115"
}, {
"label": "UAE",
"value": "100"
}, {
"label": "US",
"value": "30"
}, {
"label": "China",
"value": "30"
}]
}""")
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'chart-product-level-api.html', {'output' : column2d.render(), 'chartTitle': 'Updating chart properties at runtime'})
|
[
"carlos.tovar@ariwonto.com"
] |
carlos.tovar@ariwonto.com
|
dc008fa679bb4de8e9a90b20ea024a0de74ae5bb
|
9b5fe9ec46b6715e118a003da92989688e0429f3
|
/Udacity/Tournament Results and Catalog/Restaurant Menu and Catalog/Oauth/project.py
|
e834ca5214b59cbc939fa0c47c8f3ae1b03222f0
|
[] |
no_license
|
dreedgibson/Projects
|
a8057828594b93e8ff8ad39d5363bf331096ae07
|
07b932b8b5ca4d5b3f3fbfddb63bedb523ce59f8
|
refs/heads/master
| 2021-01-11T22:00:33.854325
| 2017-03-27T22:24:51
| 2017-03-27T22:24:51
| 78,891,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,190
|
py
|
from flask import Flask, render_template, request, redirect,jsonify, url_for, flash
app = Flask(__name__)
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem, User
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
CLIENT_ID = json.loads(
open('client_secrets.json','r').read())['web']['client_id']
#Connect to Database and create database session
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create a state token to prevent request forgery
# Store in session for later validation
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.
digits) for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE = state)
@app.route('/login/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state'), 401)
response.headers['Content-Type'] = 'application/json'
return response
code = request.data
try:
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps('Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials. access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' %access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps("Tokens user ID doesn't match given user ID"), 401)
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps("Tokens client ID doesn't match app's"), 401)
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected'), 200)
response.headers['Content-Type'] = 'application/json'
login_session['provider'] = 'google'
login_session['credentials'] = credentials
login_session['gplus_id'] = gplus_id
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params = params)
data = json.loads(answer.text)
login_session['username'] = data["name"]
login_session['picture'] = data["picture"]
login_session['email'] = data["email"]
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
@app.route('/gdisconnect')
def gdisconnect():
credentials = login_session['credentials']
if credentials is None:
response = make_response(json.dumps('Current user is not connected'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# facebook login
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
# exchange client token for server side token
app_id = json.loads(open('fb_client_secrets.json','r').read())['web']['app_id']
app_secret = json.loads(open('fb_client_secrets.json','r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (app_id,app_secret,access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
userinfo_url = "https://graph.facebook.com/v2.2/me"
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data['id']
url = "https://graph.facebook.com/v2.2/me/picture?%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
url ='https://graph.facebook.com/%s/permissions' % facebook_id
h= httplib2.Http()
result = h.request(url, 'DELETE')[1]
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['facebook_id']
return "you have been logged out"
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['credentials']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully logged out")
return redirect(url_for('showRestaurants'))
else:
flash("you aren't logged in")
return redirect(url_for('showRestaurants'))
#JSON APIs to view Restaurant Information
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
Menu_Item = session.query(MenuItem).filter_by(id = menu_id).one()
return jsonify(Menu_Item = Menu_Item.serialize)
@app.route('/restaurant/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(restaurants= [r.serialize for r in restaurants])
#Show all restaurants
@app.route('/')
@app.route('/restaurant/')
def showRestaurants():
restaurants = session.query(Restaurant).order_by(asc(Restaurant.name))
if 'username' not in login_session:
return render_template('publicrestaurants.html', restaurants = restaurants)
else:
return render_template('restaurants.html', restaurants = restaurants)
#Create a new restaurant
@app.route('/restaurant/new/', methods=['GET','POST'])
def newRestaurant():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newRestaurant = Restaurant(name = request.form['name'],
user_id = login_session['user_id'])
session.add(newRestaurant)
flash('New Restaurant %s Successfully Created' % newRestaurant.name)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
#Edit a restaurant
@app.route('/restaurant/<int:restaurant_id>/edit/', methods = ['GET', 'POST'])
def editRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect('/login')
editedRestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
if request.form['name']:
editedRestaurant.name = request.form['name']
flash('Restaurant Successfully Edited %s' % editedRestaurant.name)
return redirect(url_for('showRestaurants'))
else:
return render_template('editRestaurant.html', restaurant = editedRestaurant)
#Delete a restaurant
@app.route('/restaurant/<int:restaurant_id>/delete/', methods = ['GET','POST'])
def deleteRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect('/login')
restaurantToDelete = session.query(Restaurant).filter_by(id = restaurant_id).one()
if restaurantToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to delete this restaurant.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
session.delete(restaurantToDelete)
flash('%s Successfully Deleted' % restaurantToDelete.name)
session.commit()
return redirect(url_for('showRestaurants', restaurant_id = restaurant_id))
else:
return render_template('deleteRestaurant.html',restaurant = restaurantToDelete)
#Show a restaurant menu
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
creator = getUserInfo(restaurant.user_id)
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()
if 'username' not in login_session or creator.id != login_session['user_id']:
return render_template('publicmenu.html', items = items,
restaurant = restaurant, creator = creator)
else:
return render_template('menu.html', items = items, restaurant = restaurant,
creator = creator)
#Create a new menu item
@app.route('/restaurant/<int:restaurant_id>/menu/new/',methods=['GET','POST'])
def newMenuItem(restaurant_id):
if 'username' not in login_session:
return redirect('/login')
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
newItem = MenuItem(name = request.form['name'], description = request.form['description'], price = request.form['price'], course = request.form['course'], restaurant_id = restaurant_id, user_id = restaurant.user_id)
session.add(newItem)
session.commit()
flash('New Menu %s Item Successfully Created' % (newItem.name))
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id = restaurant_id)
#Edit a menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit', methods=['GET','POST'])
def editMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(MenuItem).filter_by(id = menu_id).one()
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
flash('Menu Item Successfully Edited')
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('editmenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = editedItem)
#Delete a menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete', methods = ['GET','POST'])
def deleteMenuItem(restaurant_id,menu_id):
if 'username' not in login_session:
return redirect('/login')
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
itemToDelete = session.query(MenuItem).filter_by(id = menu_id).one()
if itemToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to delete this item.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Menu Item Successfully Deleted')
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('deleteMenuItem.html', item = itemToDelete)
def getUserID(email):
try:
user = session.query(User).filter_by(email = email).one()
return user.id
except:
return None
def getUserInfo(user_id):
user = session.query(User).filter_by(id = user_id).one()
return user
def createUser(login_session):
newUser = User(name=login_session['username'], email =
login_session['email'], picture = login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email =
login_session['email']).one()
return user.id
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"dennis.reed.gibson@gmail.com"
] |
dennis.reed.gibson@gmail.com
|
d6daf791ae48ee6f94e3de12b0ee88e5643381eb
|
4c8eb3d5a63ac2a8ddbb7f997b27f08b8019d5f7
|
/models/polyencoder/config_polyencoder.py
|
3feea746ce3222911402f234fed24a7eb875a35c
|
[] |
no_license
|
RaphaelOlivier/collision-bert
|
94974ed8c94295b9d7ed760cb0ad79f2856d34aa
|
43eda087bf6d632bdb150d98e934206327f8d082
|
refs/heads/master
| 2023-01-20T08:09:47.449597
| 2020-11-21T20:01:43
| 2020-11-21T20:01:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
from parlai.core.opt import load_opt_file
from constant import PARLAI_DIR
PARLAI_MODEL_DIR = PARLAI_DIR + '/models/'
POLYENC_MODEL_DIR = PARLAI_MODEL_DIR + 'model_poly/'
POLYENC_OPT_FILE = POLYENC_MODEL_DIR + 'model.opt'
BI_MODEL_DIR = PARLAI_MODEL_DIR + 'model_bi/'
BIENC_OPT_FILE = BI_MODEL_DIR + 'model.opt'
PRETRAINED_BI_MODEL_DIR = PARLAI_MODEL_DIR + 'bi_model_huge_reddit/'
PRETRAINED_BIENC_OPT_FILE = PRETRAINED_BI_MODEL_DIR + 'model.opt'
PRETRAINED_POLY_MODEL_DIR = PARLAI_MODEL_DIR + 'poly_model_huge_reddit/'
PRETRAINED_POLYENC_OPT_FILE = PRETRAINED_POLY_MODEL_DIR + 'model.opt'
def load_poly_encoder_opt():
opt = load_opt_file(POLYENC_OPT_FILE)
if isinstance(opt['fixed_candidates_path'], str):
opt['fixed_candidates_path'] = PARLAI_DIR + opt['fixed_candidates_path']
opt['data_path'] = PARLAI_DIR + 'data'
opt['datapath'] = PARLAI_DIR + 'data'
opt['model_file'] = POLYENC_MODEL_DIR + 'model'
opt['dict_file'] = POLYENC_MODEL_DIR + 'model.dict'
opt['encode_candidate_vecs'] = False
return opt
def load_bi_encoder_opt():
opt = load_opt_file(BIENC_OPT_FILE)
if isinstance(opt['fixed_candidates_path'], str):
opt['fixed_candidates_path'] = PARLAI_DIR + opt['fixed_candidates_path']
opt['data_path'] = PARLAI_DIR + 'data'
opt['datapath'] = PARLAI_DIR + 'data'
opt['model_file'] = BI_MODEL_DIR + 'model'
opt['dict_file'] = BI_MODEL_DIR + 'model.dict'
opt['encode_candidate_vecs'] = False
return opt
def load_pretrained_poly_encoder_opt():
opt = load_opt_file(PRETRAINED_POLYENC_OPT_FILE)
if isinstance(opt['fixed_candidates_path'], str):
opt['fixed_candidates_path'] = PARLAI_DIR + opt['fixed_candidates_path']
opt['data_path'] = PARLAI_DIR + 'data'
opt['datapath'] = PARLAI_DIR + 'data'
opt['model_file'] = PRETRAINED_POLY_MODEL_DIR + 'model'
opt['dict_file'] = PRETRAINED_POLY_MODEL_DIR + 'model.dict'
opt['encode_candidate_vecs'] = False
return opt
def load_pretrained_bi_encoder_opt():
opt = load_opt_file(PRETRAINED_BIENC_OPT_FILE)
if isinstance(opt['fixed_candidates_path'], str):
opt['fixed_candidates_path'] = PARLAI_DIR + opt['fixed_candidates_path']
opt['data_path'] = PARLAI_DIR + 'data'
opt['datapath'] = PARLAI_DIR + 'data'
opt['model_file'] = PRETRAINED_BI_MODEL_DIR + 'model'
opt['dict_file'] = PRETRAINED_BI_MODEL_DIR + 'model.dict'
opt['encode_candidate_vecs'] = False
return opt
|
[
"csong27@outlook.com"
] |
csong27@outlook.com
|
7c6e1b6c33ea05d437ed23695e22c0b12681b0c2
|
6a10d4d17bef4918e8f188d98ada1d599578ca62
|
/An3/Sem1/Python/Lab4/s4.py
|
a61acbc85fd7dbd9b457dcb6312cfbfc002c5148
|
[] |
no_license
|
iceeye7gabi/FII
|
c9870203b25606464e4f5613e0c7eabad946c85f
|
d1a0726f687290c517a47bb96478010403354436
|
refs/heads/master
| 2022-11-19T09:37:47.740963
| 2020-07-20T09:27:03
| 2020-07-20T09:27:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
def problema4(my_list):
result = []
for elem in my_list:
if isinstance(elem, int):
result.append(elem)
return sorted(result, reverse=True)
my_list = [404, 2, 'trei', 4, 5.0, [5, 6]]
print(problema4(my_list))
def checkPrime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
return True
def problema3(m):
result = []
for i in range(2, m):
if checkPrime(i) is True:
result.append(i)
return result
print(problema3(14))
|
[
"noreply@github.com"
] |
iceeye7gabi.noreply@github.com
|
92237f74bc30ca5a47e2331be90e813865a10295
|
ce0e38abc34bfda2ad29f66fdb1cde80393a3c68
|
/panorama.py
|
9cc654f8ccd74b5fe5382ec64e9132d5e25e0f8d
|
[] |
no_license
|
tshr-d-dragon/Panorama_Stitching
|
28d1b2c70de9eeb01f8a698e468fcc80fb1bdecc
|
8c48b719ecbe6c37414371e6290457a78292bedd
|
refs/heads/main
| 2023-05-07T05:48:35.624435
| 2021-05-18T16:01:49
| 2021-05-18T16:01:49
| 368,261,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
import cv2
import os
stitcher = cv2.Stitcher_create()
path = 'cv2/panorama'
images = []
for i in os.listdir(path):
a = cv2.imread(f'{path}/{i}')
images.append(a)
_, Panorama = stitcher.stitch(images)
if _ == 0:
print('Panorama Operation Successful')
cv2.imwrite(f'{path}/Panorama.jpg', Panorama)
else:
print('Panorama Operation Unsuccessful')
|
[
"noreply@github.com"
] |
tshr-d-dragon.noreply@github.com
|
d66a5b4ced8bf2e594226a025143dab3d66cedfd
|
712dad88ba912bcb10ca491e4d8abe1c40e24581
|
/part_3/part3.2.py
|
66b3bcf6df05dc068f333ae42ffb5b0409222c30
|
[] |
no_license
|
kartikeya-badola/Visual-Sudoku-Solver
|
e3d4fe7cbb849f46b7a78e52488fd58435412110
|
19885388edca46d254883f2f25c03a3b0628fb2b
|
refs/heads/master
| 2023-07-05T23:49:09.785432
| 2021-08-17T06:17:41
| 2021-08-17T06:17:41
| 397,127,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,350
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import torch.nn.functional as F
import copy
from sklearn.utils import shuffle
import torch.utils.data as data
from torch.utils.data import random_split
from tqdm import tqdm
from PIL import Image
import os
import argparse
import random
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
from PIL import Image
from tqdm import tqdm
parser = argparse.ArgumentParser(description='COL 870 Assignment 2.3')
parser.add_argument('--train_path', default='/content/Assignment 2/visual_sudoku/train', type=str, help='path to train images')
parser.add_argument('--test_query', default = '/content/Assignment 2/visual_sudoku/train/query/', type=str, help='path to test_query')
parser.add_argument('--sample_images', default = '/content/Assignment 2/sample_images.npy', type=str, help='path to sample images.npy')
args = parser.parse_args()
img_list = []
for l in tqdm(sorted(os.listdir(args.train_path+'/target'))):
pil_img = Image.open(args.train_path+'/target/'+l)
tensor = transforms.ToTensor()(pil_img)
img_list.append(tensor.unsqueeze(0))
imgs = torch.cat(img_list,dim=0)
patches = imgs.unfold(2, 28, 28).unfold(3, 28, 28)
patches = patches.reshape(10000,1,64,28,28).permute(0,2,1,3,4)
class TripletDataset(data.Dataset):
def __init__(self):
self.target = patches.reshape(640000,28,28)
self.augment = transforms.Compose([transforms.ToPILImage(),
transforms.RandomAffine(degrees=15, translate=(0.1,0.1), scale=(1.2,1.2)),
transforms.ToTensor()
])
def __len__(self):
return 14 * self.target.shape[0]
def __getitem__(self, S):
i, j = S // 14, S % 14
image = self.target[i]
if j < 7:
idx = 8 * (i // 8) + (i + j + 1) % 8
else:
idx = 64 * (i // 64) + (i + 8 * (j - 6)) % 64
negative = self.target[idx]
return image.unsqueeze(0), self.augment(image), negative.unsqueeze(0)
trainset = TripletDataset()
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,
shuffle=True, num_workers=4)
class residual_block(nn.Module):
def __init__(self,in_channels: int, downsample: bool,normalization):
super(residual_block,self).__init__()
self.downsample=downsample
self.perform_norm=not (normalization is None)
print('using norm?',self.perform_norm)
if downsample:
self.identity_downsample = nn.Conv2d(in_channels=in_channels,out_channels=in_channels*2,kernel_size=1,stride=2,padding=0)
self.layer1 = nn.Conv2d(in_channels=in_channels,out_channels=in_channels*2,kernel_size=3,stride=2,padding=1)
self.layer2 = nn.Conv2d(in_channels=in_channels*2,out_channels=in_channels*2,kernel_size=3,stride=1,padding=1)
if self.perform_norm:
self.norm1 = normalization(in_channels*2)
self.norm2 = normalization(in_channels*2)
else:
self.layer1 = nn.Conv2d(in_channels=in_channels,out_channels=in_channels,kernel_size=3,stride=1,padding=1)
self.layer2 = nn.Conv2d(in_channels=in_channels,out_channels=in_channels,kernel_size=3,stride=1,padding=1)
if self.perform_norm:
self.norm1 = normalization(in_channels)
self.norm2 = normalization(in_channels)
def forward(self,x):
residual=x
if self.downsample:
residual=self.identity_downsample(x)
if self.perform_norm:
x = self.norm2(self.layer2(nn.ReLU()(self.norm1(self.layer1(x)))))
else:
x = self.layer2(nn.ReLU()(self.layer1(x)))
x = nn.ReLU()(residual+x)
return x
class block_of_residual_blocks(nn.Module):
def __init__(self,num_blocks: int, in_channels: int ,firstblock: bool,normalization):
super(block_of_residual_blocks,self).__init__()
self.blocks = []
if firstblock:
self.blocks.append(residual_block(in_channels=in_channels,downsample=False,normalization=normalization))
for i in range(num_blocks-1):
self.blocks.append(residual_block(in_channels=in_channels,downsample=False,normalization=normalization))
else:
self.blocks.append(residual_block(in_channels=in_channels,downsample=True,normalization=normalization))
for i in range(num_blocks-1):
self.blocks.append(residual_block(in_channels=2*in_channels,downsample=False,normalization=normalization))
self.blocks = nn.ModuleList(self.blocks)
def forward(self,x):
for block in self.blocks:
x=block(x)
return x
class my_resnet(nn.Module):
def __init__(self,num_blocks: int,normalization):
super(my_resnet,self).__init__()
self.input_layer = nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1)
self.perform_norm = not (normalization is None)
print('using input norm?',self.perform_norm)
if self.perform_norm:
self.input_norm = normalization(16)
self.block1 = block_of_residual_blocks(num_blocks=num_blocks,in_channels=16,firstblock=True,normalization=normalization)
self.block2 = block_of_residual_blocks(num_blocks=num_blocks,in_channels=16,firstblock=False,normalization=normalization)
self.block3 = block_of_residual_blocks(num_blocks=num_blocks,in_channels=32,firstblock=False,normalization=normalization)
# self.fc = nn.Linear(8*8,num_classes)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self,x):
x= self.input_layer(x)
if self.perform_norm:
x = self.block3(self.block2(self.block1(nn.ReLU()(self.input_norm(x)))))
else:
x = self.block3(self.block2(self.block1(nn.ReLU()(x))))
x = self.pool(x)
x = torch.flatten(x, 1)
return F.sigmoid(x)
model = my_resnet(4,nn.BatchNorm2d).to(device)
loss = nn.TripletMarginLoss(margin=2.0)
optimizer = optim.Adam(model.parameters())
for epoch in range(1):
print('Epoch {}/{}'.format(epoch, 9))
print('-' * 10)
# Each epoch has a training and validation phase
model.train()
running_loss = 0.0
running_corrects = 0
running_total = 0
num_batches=0
# predlist=torch.zeros(0,dtype=torch.long, device='cpu')
# labellist=torch.zeros(0,dtype=torch.long, device='cpu')
# Iterate over data.
# for inputs, labels in dataloaders[phase]:
for i,(a,p,n) in tqdm(enumerate(trainloader)):
if i==5000:
break
# im = im.to(device)
# labels = labels.to(device)
emb_a = model(a.to(device))
emb_p = model(p.to(device))
emb_n = model(n.to(device))
loss_value = loss(emb_a,emb_p,emb_n)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
# statistics
running_loss += loss_value.item()
running_total += len(a)
if i%1000==1:
print('loss:',running_loss/i)
torch.save(model,'triplet_loss.pth')
# if phase == 'train':
# scheduler.step()
# epoch_loss = running_loss/i
# epoch_acc = running_corrects/running_total
# print('{} Loss: {:.4f} acc: {:.4f}'.format(epoch_loss, epoch_acc))
# model = torch.load('triplet_loss.pth').to(device)
torch.save(model,'triplet_loss.pth')
imgs = np.load(args.sample_images)
classes = torch.Tensor(imgs[1:-1]/255.0)
model.eval()
gt_emb = model(classes.unsqueeze(1).to(device))
gt_emb = gt_emb.unsqueeze(0).repeat(64,1,1)
sudoku=[]
for p in tqdm(patches):
p_emb = model(p.to(device))
p_emb = p_emb.unsqueeze(1).repeat(1,8,1)
# print(p_emb.shape,gt_emb.shape)
x = torch.norm(gt_emb-p_emb,dim=2)
# print(x)
_,pred = x.min(dim=1)
# assert 1==2
sudoku.append(pred.cpu().reshape(8,8))
for i,s in enumerate(sudoku):
sudoku[i]=s.unsqueeze(0)
for i,s in enumerate(sudoku):
sudoku[i]=s+1
sudoku_dataset = torch.cat(sudoku,dim=0)
torch.save(sudoku_dataset,'target_sudoku.pth')
for i in range(1,9):
os.mkdir('gan_Dataset/'+str(i))
for i in tqdm(range(len(sudoku))):
s= sudoku[i].reshape(64)
imgs = patches[i]
for j in range(64):
torchvision.utils.save_image(1-imgs[j],'gan_Dataset/'+str(s[j].item())+'/'+str(i)+'_'+str(j)+'.png')
img_list = []
for l in tqdm(sorted(os.listdir(args.test_query))):
pil_img = Image.open(args.test_query+l)
tensor = transforms.ToTensor()(pil_img)
img_list.append(tensor.unsqueeze(0))
imgs = torch.cat(img_list,dim=0)
patches = imgs.unfold(2, 28, 28).unfold(3, 28, 28)
patches = patches.reshape(10000,1,64,28,28).permute(0,2,1,3,4)
sudoku=[]
for p in tqdm(patches):
p_emb = model(p.to(device))
p_emb = p_emb.unsqueeze(1).repeat(1,8,1)
# print(p_emb.shape,gt_emb.shape)
x = torch.norm(gt_emb-p_emb,dim=2)
# print(x)
_,pred = x.min(dim=1)
# assert 1==2
sudoku.append(pred.cpu().reshape(8,8))
for i,s in enumerate(sudoku):
sudoku[i]=s.unsqueeze(0)
for i,s in enumerate(sudoku):
sudoku[i]=s+1
sudoku_dataset = torch.cat(sudoku,dim=0)
torch.save(sudoku_dataset,'test_query_sudoku.pth')
|
[
"kartikeya.badola@gmail.com"
] |
kartikeya.badola@gmail.com
|
9fda23a05e1bd04a774047aebab26396f716dc34
|
f3333e23a0f86d917a15271c0c7a458d50b55779
|
/flask/app.py
|
b447205ef9736c026201b9ec8d7ee62dfe9f2b8e
|
[] |
no_license
|
johnwashburne/hackSC
|
e7c7fd26658875f3f14851a28db779016b3386a8
|
ba684ff292a89380446abc31141d1f2c5b976180
|
refs/heads/master
| 2020-12-26T16:36:50.992505
| 2020-02-04T01:53:30
| 2020-02-04T01:53:30
| 237,566,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,138
|
py
|
from flask import Flask, render_template, make_response, jsonify, request, session, send_from_directory, flash, redirect, url_for
from translator import translate_data, translate_questions, translate_phrase, translate_dict
import html
from PIL import Image, ImageDraw, ImageFont
from classifier import Classifier
import json
import os
app = Flask(__name__)
app.secret_key = 'super secret key'
app.config['UPLOAD_FOLDER'] = "images"
lower = 'abcdefghijklmno'
# fixes issue with cached images being displayed
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
@app.route('/getQuestions/', methods=['GET'])
def get_questions():
lang = request.args.get("lang")
document = request.args.get("document")
# classify uploaded image
if document == 'upload':
c = Classifier()
questions = c.process('images/upload.png')
with open('upload.json', 'w') as outfile:
json.dump(questions, outfile)
else:
questions = json.load(open('{}.json'.format(document), 'r'))
if request.args.get('type') == 'json':
return jsonify(questions)
questions = translate_questions(questions, lang)
placeholder = translate_phrase('Answer here', lang)
a = ""
for q in questions:
if q['qtype'] == 'text' and len(q['children']) == 0:
a += '''
<div class="form-group">
<label for="{}">{}</label>
<input type="name" name="{}" class="form-control" placeholder="{}"></div>
'''.format(q['main']['text'].split('.')[0], q['main']['text'], q['main']['text'].split('.')[0], placeholder)
elif q['qtype'] == 'text':
a += '''
<div class="form-group">
<label for="{}">{}</label><br>
'''.format(q['main']['text'].split('.')[0], q['main']['text'])
count = 0
for s in q['children']:
a += '''
<div class="form-group">
<label for="{}">{}</label>
<input type="name" class="form-control" id="{}" placeholder="{}" name="{}"></div>
'''.format(q['main']['text'].split('.')[0] + lower[count] + '. ', lower[count] + '. ' + s['text'], q['main']['text'].split('.')[0] + lower[count] + '. ', placeholder, q['main']['text'].split('.')[0] + lower[count])
count += 1
a += "</div>"
elif q['qtype'] == 'radio':
a += '''
<label for="{}">{}</label>
<div class="form-group form-inline" id ="{}">
'''.format(q['main']['text'].split('.')[0], q['main']['text'], q['main']['text'].split('.')[0])
count = 0
for s in q['children']:
a += '''
<div class="form-check form-check-inline">
<input class="form-check-input" type="radio" name="{}" id="Yes" value="{}" checked>
<label class="form-check-label" for="Yes">{}</label>
</div>
'''.format(q['main']['text'].split('.')[0], count, s['text'])
count += 1
a += "</div>"
else:
a += '''
<p>{}</p>
'''.format(q['main']['text'])
for s in q['children']:
a += '''
<input type="checkbox" name="vehicle1" value="Bike">{}<br>
'''.format(s['text'])
a += """
<input class="btn btn-primary" type="submit" value="Submit">
"""
return a
@app.route('/formSubmit', methods=['GET', 'POST'])
def form_submit():
if request.method == 'POST':
answers = request.form.to_dict()
answers = translate_dict(answers, 'en')
questions = json.load(open('{}.json'.format(session.get('document')), 'r'))
for key in answers:
if key[-1] not in lower:
questions[int(key)-1]['answer'] = answers[key]
else:
questions[int(key[:-1])-1]['children'][lower.index(key[-1])]['answer'] = answers[key]
im = Image.open('{}/{}.png'.format(app.config['UPLOAD_FOLDER'], session.get('document')))
im = im.convert('RGBA')
coordinates = questions[0]['main']['box']
height = int(im.height*coordinates['Height'])
text_layer = Image.new('RGBA', im.size, (255,255,255,0))
fnt = ImageFont.truetype('arial.ttf', height)
d = ImageDraw.Draw(text_layer)
for question in questions:
# textbox inputs no subfields
if question['qtype'] == 'text' and len(question['children']) == 0:
coordinates = question['main']['box']
d.text((im.width*coordinates['Left'], im.height*coordinates['Top']+(height*1.4)), question['answer'], font=fnt, fill=(0,0,0,255))
# texbox input w/ subfields
elif question['qtype'] == 'text':
for i in question['children']:
coordinates = i['box']
d.text((im.width*coordinates['Left'], im.height*coordinates['Top']+(height*1.4)), i['answer'], font=fnt, fill=(0,0,0,255))
# radio button input
elif question['qtype'] == 'radio':
answer = question['answer']
coordinates = question['children'][int(answer)]['box']
d.text((im.width*coordinates['Left']-im.width*.025, im.height*coordinates['Top']),"X", font=fnt, fill=(0,0,0,255))
out = Image.alpha_composite(im, text_layer)
document = session.get('document')
out.save('{}/{}1.png'.format(app.config['UPLOAD_FOLDER'], document))
out = out.convert('RGB')
out.save('{}/{}.pdf'.format(app.config['UPLOAD_FOLDER'], document))
session.clear()
return render_template('result.html', fileName='{}1.png'.format(document), dlFileName='{}.pdf'.format(document))
return 'you are a failure'
@app.route('/document/<lang>')
def document_select(lang):
session['lang'] = lang
data = [{'title':'I-589', 'body':'Complete the Application for Asylum and for Withholding of Removal', 'upload': 'Go'},
{'title':'I-485', 'body':'Complete the Application to Register Permanent Residence or Adjust Status.', 'upload': 'Go'},
{'title':'Other', 'body':'Complete a form different from the ones presented.', 'upload':'Upload'}]
data = translate_data(data, lang)
data[0]['url'] = '/i589/{}'.format(lang)
data[1]['url'] = '/i485/{}'.format(lang)
data[2]['url'] = '/upload/{}'.format(lang)
for d in data:
d['body'] = html.unescape(d['body'])
return render_template("document.html", data=data)
@app.route('/forms/<filename>', methods=['GET'])
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/i589/<lang>')
def i589(lang):
session['document'] = 'i589'
return render_template('form.html', lang=lang, document='i589')
@app.route('/i485/<lang>')
def i485(lang):
session['document'] = 'i485'
return render_template('form.html', lang=lang, document='i485')
@app.route('/uploadForm/<lang>')
def upload_form(lang):
session['document'] = 'upload'
return render_template('form.html', lang=lang, document='upload')
@app.route('/upload/<lang>', methods=['GET', 'POST'])
def upload(lang):
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file:
file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'upload.png'))
return redirect(url_for('upload_form', lang=lang))
return render_template('upload.html', lang=lang)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
app.run()
|
[
"johnwashburne@outlook.com"
] |
johnwashburne@outlook.com
|
b9f2e13111c68a6e023b3157402e3b2464bf08a3
|
e11e1cf6d7916009543bbb54100882b8989d09c5
|
/docs/conf.py
|
b582669c7b1543755c6730e8330bf81a82a83265
|
[
"MIT"
] |
permissive
|
sgmagar/django-circleci-example
|
fac705d5a6b700c64a81c19a5c47acd24b3b8c60
|
7352f0bc9a1b10deb2d9be592c89f73ca455b883
|
refs/heads/master
| 2020-12-14T15:50:03.686396
| 2019-05-02T13:07:43
| 2019-05-02T13:07:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,064
|
py
|
# Django CircleCI example documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Django CircleCI example"
copyright = """2019, Timmy O'Mahony"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "django_circleci_exampledoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"django_circleci_example.tex",
"Django CircleCI example Documentation",
"""Timmy O'Mahony""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"django_circleci_example",
"Django CircleCI example Documentation",
["""Timmy O'Mahony"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"django_circleci_example",
"Django CircleCI example Documentation",
"""Timmy O'Mahony""",
"Django CircleCI example",
"""A CircleCI Django example project""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"hey@timmyomahony.com"
] |
hey@timmyomahony.com
|
2ad628a29a18f6ef3219307d5349625a027c37a8
|
16856157985fcc8c06973c3fc552f05ec7b5c6e7
|
/core/migrations/0004_wordnormal_record.py
|
cc26f2bb72f36d2d785f686c6690578862b617fb
|
[] |
no_license
|
glader/ZeonSearch
|
c592e024540d153e12bcf1de1f73fe73bc8c179c
|
8132f1b9978d0af2e8b703ddd9b394d72a5424ed
|
refs/heads/master
| 2021-01-23T21:18:29.046316
| 2017-10-11T07:20:22
| 2017-10-11T07:20:22
| 102,892,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-08 16:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20170908_2134'),
]
operations = [
migrations.AddField(
model_name='wordnormal',
name='record',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.Record', verbose_name='\u0417\u0430\u043f\u0438\u0441\u044c'),
preserve_default=False,
),
]
|
[
"glader@yandex-team.ru"
] |
glader@yandex-team.ru
|
cd6ca5bdf2986ee0c718790d29620cf74d6edd35
|
2e0f9c2c31fb2d4a9b56ab841a3bba0f1a5b5b17
|
/trainval_net.py
|
15ca80be407d2c1be8d2ec669bdfc4d118f3a93b
|
[] |
no_license
|
LanyiWanggg/domain_adaption_caption
|
f78376bd2c09e50e111b9e8b0bfadf82a4795a36
|
98a7bd3cc82df0d1b70ddfc659fdd74ed625dde0
|
refs/heads/master
| 2023-05-30T22:38:51.703735
| 2021-06-08T20:15:29
| 2021-06-08T20:15:29
| 375,111,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,213
|
py
|
# --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
from processCaption import Vocabulary
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient, get_lr_at_iter
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset',
dest='dataset',
help='training dataset',
default='cityscape_multi',
type=str)
parser.add_argument('--net',
dest='net',
help='vgg16, res101',
default='vgg16',
type=str)
parser.add_argument('--start_epoch',
dest='start_epoch',
help='starting epoch',
default=1,
type=int)
parser.add_argument('--epochs',
dest='max_epochs',
help='number of epochs to train',
default=20,
type=int)
parser.add_argument('--disp_interval',
dest='disp_interval',
help='number of iterations to display',
default=100,
type=int)
parser.add_argument('--checkpoint_interval',
dest='checkpoint_interval',
help='number of iterations to display',
default=10000,
type=int)
parser.add_argument('--save_dir',
dest='save_dir',
help='directory to save models',
default="results",
type=str)
parser.add_argument('--nw',
dest='num_workers',
help='number of worker to load data',
default=0,
type=int)
parser.add_argument('--cuda',
dest='cuda',
help='whether use CUDA',
action='store_true',
default=True)
parser.add_argument('--ls',
dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs',
dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs',
dest='batch_size',
help='batch_size',
default=1,
type=int)
parser.add_argument('--cag',
dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o',
dest='optimizer',
help='training optimizer',
default="sgd",
type=str)
parser.add_argument('--lr',
dest='lr',
help='starting learning rate',
default=0.001,
type=float)
parser.add_argument('--lr_decay_step',
dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5,
type=int)
parser.add_argument('--lr_decay_gamma',
dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1,
type=float)
# set training session
parser.add_argument('--s',
dest='session',
help='training session',
default=1,
type=int)
# resume trained model
parser.add_argument('--r',
dest='resume',
help='resume checkpoint or not',
default=False,
type=bool)
parser.add_argument('--checksession',
dest='checksession',
help='checksession to load model',
default=1,
type=int)
parser.add_argument('--checkepoch',
dest='checkepoch',
help='checkepoch to load model',
default=1,
type=int)
parser.add_argument('--checkpoint',
dest='checkpoint',
help='checkpoint to load model',
default=0,
type=int)
# log and diaplay
parser.add_argument('--use_tfb',
dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
parser.add_argument('--warm_up',
dest='warm_up',
help='warm_up iters',
default=200,
type=int)
parser.add_argument('--save_model_dir',
dest='save_model_dir',
help='save_model_dir',
default="cityscape_multi",
type=str)
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch * batch_size,
train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,
1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch,
self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),
0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = [
'ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]',
'MAX_NUM_GT_BOXES', '20'
]
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = [
'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]',
'MAX_NUM_GT_BOXES', '50'
]
elif args.dataset == "cityscape_multi":
args.imdb_name = "cityscape_multi_train"
args.imdbval_name = "cityscape_multi_val"
args.set_cfgs = [
'ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]',
'MAX_NUM_GT_BOXES', '20'
]
elif args.dataset == "foggy_cityscape_multi":
args.imdb_name = "foggy_cityscape_multi_train"
args.imdbval_name = "foggy_cityscape_multi_val"
args.set_cfgs = [
'ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]',
'MAX_NUM_GT_BOXES', '20'
]
args.cfg_file = "cfgs/{}_ls.yml".format(
args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
#torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print(
"WARNING: You have a CUDA device, so you should probably run with --cuda"
)
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index, _ = combined_roidb(args.imdb_name)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
output_dir = args.save_dir + "/" + args.net + "/" + args.save_model_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler_batch,
num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes,
pretrained=True,
class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes,
101,
pretrained=True,
class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes,
50,
pretrained=True,
class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes,
152,
pretrained=True,
class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
#tr_momentum = cfg.TRAIN.MOMENTUM
#tr_momentum = args.momentum
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{
'params': [value],
'lr': lr,
'weight_decay': cfg.TRAIN.WEIGHT_DECAY
}]
if args.cuda:
fasterRCNN.cuda()
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.resume:
load_name = os.path.join(
output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession,
args.checkepoch,
args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.mGPUs:
fasterRCNN = nn.DataParallel(fasterRCNN)
iters_per_epoch = int(train_size / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
base_lr = lr
for step in range(iters_per_epoch):
if epoch == 1 and step <= args.warm_up:
lr = base_lr * get_lr_at_iter(step / args.warm_up)
else:
lr = base_lr
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
fasterRCNN.zero_grad()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16":
clip_gradient(fasterRCNN, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" %
(fg_cnt, bg_cnt, end - start))
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
logger.add_scalars("logs_s_{}/losses".format(args.session),
info,
(epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
save_name = os.path.join(
output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint(
{
'session':
args.session,
'epoch':
epoch + 1,
'model':
fasterRCNN.module.state_dict()
if args.mGPUs else fasterRCNN.state_dict(),
'optimizer':
optimizer.state_dict(),
'pooling_mode':
cfg.POOLING_MODE,
'class_agnostic':
args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
if args.use_tfboard:
logger.close()
|
[
"law190@pitt.edu"
] |
law190@pitt.edu
|
e51fbe1d83fc18c0809ef65f62e0f7a148dfdd77
|
a672f984782a1fa328069363671c328da3e4d8bd
|
/weatherdaily/views.py
|
40d1077ff5095487d6bbd95bd1a79c2aceed8864
|
[] |
no_license
|
avs8/yourweather
|
396a0b093cbc9fc9b501eb979418e10eecfadf2b
|
2415769dad416c9fcf99d57cba93b455d30447fc
|
refs/heads/master
| 2021-08-29T08:11:16.664340
| 2017-12-13T14:15:44
| 2017-12-13T14:15:44
| 112,365,929
| 0
| 0
| null | 2017-12-11T14:35:38
| 2017-11-28T17:20:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
from django.shortcuts import render, render_to_response
from .forms import WeatherForm
from django.http import HttpResponse
from django.template import RequestContext
from .models import *
def index(request):
args = {}
if request.POST:
form = WeatherForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse("Thanks for submitting your information!!")
else:
form = WeatherForm()
args = {}
args['form'] = form
return render(request, 'weatherdaily/index.html', args)
|
[
"ajitavsingh_8@yahoo.com"
] |
ajitavsingh_8@yahoo.com
|
22026862c4779187068f89cb47fe0e6b11a7c0f0
|
18a6b272d4c55b24d9c179ae1e58959674e53afe
|
/tf_rl/test/CartPole/CartPole_recording_test.py
|
b8d3760e15ebd4d0ca5d66f39bae6090d71f9a17
|
[
"MIT"
] |
permissive
|
Rowing0914/TF2_RL
|
6cce916f409b3d4ef2a5a40a0611908f20d08b2c
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
refs/heads/master
| 2022-12-10T09:58:57.456415
| 2021-05-23T02:43:21
| 2021-05-23T02:43:21
| 233,476,950
| 9
| 1
|
MIT
| 2022-12-08T07:02:42
| 2020-01-12T23:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
import gym
env = gym.make('CartPole-v0')
gym.wrappers.Monitor(env, './tmp/cartpole-experiment-1', force=True, video_callable=lambda episode_id: True)
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
env.close()
|
[
"kosakaboat@gmail.com"
] |
kosakaboat@gmail.com
|
bdb8cf15fdb8a61b89ec89976790c6752abf3dcf
|
2f78d6d5c92449acb0ade6f8e89ef40acc205f85
|
/DAA/knapsack.py
|
c0383d90a963ecd08b05bd356dc2cb0502308d1c
|
[] |
no_license
|
bhagyeshdholaria/mit_codes
|
e80a767de7841cdb1ca1f7487a69dc5b17236824
|
a1894b93bc749a43eaa8d7e49791346c033da2f8
|
refs/heads/master
| 2023-03-06T17:19:48.664264
| 2021-02-14T11:21:10
| 2021-02-14T11:21:10
| 231,590,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
def ksp(warr,parr,m):
pw=[]
final=[0]*len(warr)
p=0
for i in range(len(warr)): #n
pw.append(parr[i]/warr[i])#n
while m!=0:#n
k = pw.index(max(pw))
if warr[k] <= m:
m -= warr[k]
final[k] = 1
p += parr[k]
pw[k] = -1
else:
final[k] = m / warr[k]
p += m*pw[k]
m = 0
pw[k] = -1
print("\nmax profit:",p)
print("Elements Selection:",final)
while 1:
try:
warr = list(map(int,input("Enter weights seperated by a space:").split()))
parr = list(map(int,input("Enter Profits seperated by a space:").split()))
if len(warr) != len(parr):
print("\n***Enter equal no of profits and weights***\n")
continue
m= int(input("Enter knapsack capacity:"))
except:
print("\n***Enter valid values***\n")
continue
ksp(warr,parr,m)
|
[
"bhagyeshdholaria@gmail.com"
] |
bhagyeshdholaria@gmail.com
|
011ccef6c00ec639d7eee1281266fbff05e9f6b7
|
c5b72e3ea097c8647f8e393b35aa8df721c0cba0
|
/notification.py
|
6053c0a0a5f6e5cc8a81f99910a15670bf8e4e2c
|
[] |
no_license
|
CBEB/jd
|
6f88c9dc1d5b98ef3f3650adf410a638b5d1e6f4
|
99afaae5ca3010a53d38f8c84ec827dec0c24a26
|
refs/heads/main
| 2023-06-23T19:38:05.154281
| 2021-07-13T06:29:28
| 2021-07-13T06:29:28
| 345,933,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
import requests
import os
import smtplib
from email.mime.text import MIMEText
import time
#######################
# 通知服务
#######################
# [0,1,2,3] 0:不通知 1:server酱 2:SMTP邮件服务 3:bark服务
needYou2Know = 0
SCKEY = '' # Server酱的SCKEY
email_dict = {
"sender": '', # ① sender是邮件发送人邮箱
"passWord": '', # ② passWord是服务器授权码
"mail_host": 'smtp.qq.com', # ③ mail_host是服务器地址(这里是QQsmtp服务器)
"port": 465, # ④ QQsmtp服务器的端口号为465或587
"receiver": '' # ⑤ receiver是邮件接收人
}
BARK = '' # bark服务,自行搜索
##################################################################
def n0(a, b):
"""空函数,即不使用通知服务"""
print(">>>>未开启通知服务")
return
def send_email(subject, msg_content):
"""SMTP邮件服务,暂不支持读取Secrets"""
if not email_dict["sender"]:
print("SMTP服务的未设置!!\n取消推送")
return
print("SMTP服务启动")
mail_host = email_dict["mail_host"]
mail_user = email_dict["sender"]
mail_pass = email_dict["passWord"]
sender = mail_user
receiver = email_dict["receiver"]
message = MIMEText(msg_content, 'plain', 'utf-8')
message['From'] = sender
message['To'] = receiver
message['Subject'] = subject
try:
smtpObj = smtplib.SMTP_SSL(mail_host, email_dict["port"]) # ssl加密
# smtpObj = smtplib.SMTP(mail_host, 25) # 明文发送
smtpObj.login(mail_user, mail_pass)
print("邮件登录成功")
smtpObj.sendmail(sender, receiver, message.as_string())
smtpObj.quit()
print("邮件发送成功")
except smtplib.SMTPException as e:
print("Error: 无法发送邮件", e)
def serverJ(title, content):
"""server酱服务"""
sckey = SCKEY
if "SCKEY" in os.environ:
"""
判断是否运行自GitHub action,"SCKEY" 该参数与 repo里的Secrets的名称保持一致
"""
sckey = os.environ["SCKEY"]
if not sckey:
print("server酱服务的SCKEY未设置!!\n取消推送")
return
print("serverJ服务启动")
data = {
"text": title,
"desp": content
}
response = requests.post(f"https://sc.ftqq.com/{sckey}.send", data=data)
print(response.text)
def bark(title, content):
"""bark服务"""
bark_token = BARK
title = content.replace("#", "")
content = content.replace("#", "")
if "BARK" in os.environ:
"""
判断是否运行自GitHub action,"BARK" 该参数与 repo里的Secrets的名称保持一致
"""
bark_token = os.environ["BARK"]
if not bark_token:
print("bark服务的bark_token未设置!!\n取消推送")
return
response = requests.get(
f"""https://api.day.app/{bark_token}/{title}/{content}""")
print(response.text)
notify = [n0, serverJ, send_email, bark][needYou2Know]
if __name__ == "__main__":
print("通知服务测试")
start = time.time()
notify("JD_tools脚本通知服务", "needYou2Know\n通知服务测试")
print("耗时: ", time.time()-start, "s")
|
[
"ggw@a.com"
] |
ggw@a.com
|
2cf4a770d57e959caa25911922f9826db78558dd
|
c309fa04fe4f3803a92c98cd7297ea98a8c47d71
|
/tencent/tencent/pipelines.py
|
52ce22659d23ff8c389648ae51f03824657e20e9
|
[] |
no_license
|
OoGod/tencentRecuitmentSpider
|
538b9544103692d6f33544c4e029065b53daa9f0
|
de8c086d9cca0a6aa8bd8e238ee0067b8333b930
|
refs/heads/master
| 2020-04-28T21:42:45.114679
| 2019-03-16T14:53:57
| 2019-03-16T14:53:57
| 175,591,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class TencentPipeline(object):
def __init__(self):
self.filename = open("tencent.json", "w")
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii = False) + ",\n"
self.filename.write(text)
return item
def close_spider(self, spider):
self.filename.close()
|
[
"1617605689@qq.com"
] |
1617605689@qq.com
|
2dae2c56c83c3067962ddff4ccc886190c2d0810
|
a0ccbe5aa927c05e2ac468963d133a2649431515
|
/realtors/admin.py
|
b6ff1fa78697c74f1120772e62461f381dfbd6ff
|
[] |
no_license
|
Salman85365/btre-RealEstate
|
f2e636562c80c619ebd1db47b963c62852185a7e
|
2a4bc163d01ea3927d7d40af3828fc59f36a0251
|
refs/heads/master
| 2023-04-14T09:01:33.306540
| 2021-04-28T13:24:28
| 2021-04-28T13:24:28
| 362,467,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from django.contrib import admin
from .models import Realtor
class RealtorAdmin(admin.ModelAdmin):
list_display =('name','hire_date','email','phone')
list_display_links=('name','email','phone')
search_fields=('name','email')
admin.site.register(Realtor, RealtorAdmin)
# Register your models here.
|
[
"salman8585365@gmail.com"
] |
salman8585365@gmail.com
|
a6e7c6a73dfe263727c51c21d89a1afe7a338a7e
|
6071669daca2b70f0bd6067e4a54c2558ccafd0d
|
/project/site/__init__.py
|
c76991b8a23ee6169cacae9380ce599e16a53728
|
[] |
no_license
|
nymann/Accountant
|
3d4ee2a85daa2879a7505727c7a7a76eb2df2446
|
6d2c7260388575883e02c3f2a426536861b860c5
|
refs/heads/master
| 2021-10-09T21:48:16.315340
| 2019-01-03T19:36:08
| 2019-01-03T19:36:08
| 132,523,024
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from flask import Blueprint
site = Blueprint(
'site',
__name__,
template_folder='templates',
static_folder='static',
)
from . import views
|
[
"s144833@student.dtu.dk"
] |
s144833@student.dtu.dk
|
06f6d45f1419ff49988efce532928e427ada4086
|
0607c5e5edf323c35fca70e9dd857cf17722b8e2
|
/rw_visual.py
|
75249fac44872174314f6da61fa0c0cc509aaaf3
|
[] |
no_license
|
paszo/data_visualization
|
68b31b07f9d2dad1213065c937d53b9b30756911
|
d3d19670585583ad4b918f047987597301efda3c
|
refs/heads/master
| 2020-05-05T09:52:06.942624
| 2019-04-19T17:45:35
| 2019-04-19T17:45:35
| 179,920,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import matplotlib.pyplot as plt
from random_walk import RandomWalk
# Make a random walk and plot the points.
rw = RandomWalk(50000)
rw.fill_walk()
# Set the size of the plotting window.
plt.figure(dpi=128, figsize=(10, 6))
point_numbers = list(range(rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolor='none', s=1)
# Emphasize the first and last points.
plt.scatter(0, 0, c='green', edgecolors='none', s=100)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=100)
# Remove the axes.
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
plt.show()
|
[
"paszo2008@gmail.com"
] |
paszo2008@gmail.com
|
093bd3bfd9cfc28adf2c304250b74955b9339704
|
5c68f2e003f371317c72d19aaaeec1ad41888d00
|
/blog/urls.py
|
30ae7f985a8772c3196b0419760bc14d8affcae7
|
[] |
no_license
|
kutritee/my-first-blog
|
75faf850e2abfac135fff0740e96d4145a08bc19
|
01b813282faf3e963dbb65f84e5b44da1590016f
|
refs/heads/master
| 2021-07-25T19:20:51.710020
| 2017-11-04T15:02:37
| 2017-11-04T15:02:37
| 109,483,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from django.conf.urls import url
from . import views
from django.shortcuts import render, get_object_or_404
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
]
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
|
[
"katri.raffi@gmail.com"
] |
katri.raffi@gmail.com
|
21fe54b94c5e5b3cd05505d1e5b489b734e9a820
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/setuptools/setuptools/_distutils/archive_util.pyi
|
38458fc0e00349e77ebf105fdf5a52e850bc9e25
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 548
|
pyi
|
def make_archive(
base_name: str,
format: str,
root_dir: str | None = ...,
base_dir: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_tarball(
base_name: str,
base_dir: str,
compress: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_zipfile(base_name: str, base_dir: str, verbose: int = ..., dry_run: int = ...) -> str: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
d8c5df127d6718475f170e98770c24e5d59e6fe1
|
08eead02cfa5bdfbd5d7733573c253254f64e1b2
|
/Collection/ms/01 Arrays/02_valid_alphaNumericPalindrome.py
|
019b3d18366840f6d8c52276c3adb4b09facc4c4
|
[
"MIT"
] |
permissive
|
vicchu/leetcode-101
|
4cd33d8658375889b831e1fa1ce7062fa5b322e6
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
refs/heads/main
| 2023-06-20T04:39:48.666282
| 2021-07-15T16:03:34
| 2021-07-15T16:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
class Solution:
def checkRange(self, x):
if (x.lower() >= 'a' and x.lower() <= 'z') or (x.lower() >= '0' and x.lower() <= '9'):
return True
return False
def isPalindrome(self, s: str) -> bool:
# Make everything Lower & Filter non-alphaNumeric
alphaNumericStr = [x.lower() for x in s if self.checkRange(x)]
return alphaNumericStr == alphaNumericStr[::-1]
|
[
"kmanadkat@gmail.com"
] |
kmanadkat@gmail.com
|
150bdc2e80df90fdee9674431ee71b1d81186d77
|
d2be5705c55199f3d55c1c07b06dbe8fea26461e
|
/ATC/ATC001-C.py
|
c59cbf7cfe1e58c14aa342219b54f140c211f042
|
[] |
no_license
|
hsuetsugu/atc
|
6ad1a2f06e528c4f96bf7ebd2eb9ab33c49eeea7
|
ad2a4dcf392f66ad8bf0349c0bf8b8a3413567aa
|
refs/heads/master
| 2021-05-19T09:08:06.143811
| 2020-06-28T23:51:50
| 2020-06-28T23:51:50
| 251,620,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# -*- coding: utf-8 -*-
# C - FFT
# https://atcoder.jp/contests/atc001/tasks/fft_c
'''
4
1 1
2 2
3 4
4 8
'''
N = int(input())
a = [0] * 2 * N
b = [0] * 2 * N
for i in range(N):
a[i], b[i] = map(int, input().split())
for k in range(1, 2*N+1):
sum_ = 0
if k == 1:
print(sum_)
continue
for idx_k in range(1, N+1):
sum_ = sum_ + a[idx_k - 1] * b[k - idx_k - 1]
print(sum_)
|
[
"hsuetsugu@sglab.co.jp"
] |
hsuetsugu@sglab.co.jp
|
2699cb945ccb2374364ae003531ffc3b5fb90aa7
|
e7436666fd09e85a088d27ae741739d4c2425259
|
/mir/test_essentia.py
|
fa8f40d1ca219bfc96a3581dbf0e98198942999a
|
[
"CC0-1.0"
] |
permissive
|
faroit/midihack
|
ea647c3850c69012719b8f5486cf2a9613ea24ce
|
5ce927576c4967499e164d95262a8391173c23ab
|
refs/heads/master
| 2021-01-22T13:02:58.790799
| 2014-06-01T21:12:56
| 2014-06-01T21:12:56
| 19,882,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,122
|
py
|
from __future__ import division
#import music21
from music21 import *
import essentia.standard
import numpy as np
def stretch(data, bins, interpolation_type='linear'):
ip1d = interpolate.interp1d(np.arange(data.shape[0]),
data,
kind=interpolation_type)
return ip1d(np.linspace(0, data.shape[0] - 1, bins))
def onsets2BPM(stream21):
onsets = []
for i in stream.notes:
onsets.append(i.offset)
onsets = numpy.array(onsets)
onsets = stretch(onsets, 44100)
onsets = essentia.array(onsets)
getTempo = essentia.standard.TempoTapDegara()
return
def corrSequence( seq ):
myCorr = np.zeros([len(seq),1]);
for i in range(0,len(seq)):
myCorr[i] = np.sum(seq == np.roll(seq, i))
return myCorr/len(seq)
def findRepeat( seq ):
if len( seq ) == 1:
return -1
else:
# compute "string correlation"
corrSeqBool = corrSequence( seq );
# return index of loop start
loopStartIdx = corrSeqBool[1:].argmax() + 1;
# interpret "correlation" as score value
loopScore = corrSeqBool[loopStartIdx];
##print loopScore
thresh = 1;
if loopScore >= thresh:
loop = loopStartIdx;
else:
loop = -1
return loop
def analyzeStream( seq ):
""
pass
#s = converter.parse('./data/6.mid')
#s.plot('pianoroll')
s = converter.parse('./data/lessSimple2bars.mid')
#s = converter.parse('./data/verySimple2bars.mid')
chordList = [];
sChords = s.chordify()
sChordsFlat = sChords.flat
for myChord in sChordsFlat:
if "Chord" in myChord.classes:
if 2 < len( myChord ) < 6: #and ( myChord.isTriad() ):
chordList.append(myChord.pitchedCommonName)
numpyArray = np.array(chordList)
print findRepeat( numpyArray )
# print xChord.pitchClasses
# print xChord.intervalVector
#cc.show('text')
#s.show('text')
#print harmony.chordSymbolFigureFromChord(chord.Chord(['C3','E3','G3'])) #standard example
#print findRepeat()
|
[
"fabian-robert.stoeter@audiolabs-erlangen.de"
] |
fabian-robert.stoeter@audiolabs-erlangen.de
|
cece48f8cb5615e6431c63285a03c652e7424655
|
24e932fa3b14c8779dd36ba900c9344046e9ed46
|
/main.py
|
a27940edb010dbb11286ecbd1fa3ac3441918bb2
|
[] |
no_license
|
RavenCheng1120/VFX
|
c9a86b8d88b602bed45787d2414ebac888913e85
|
d08310599931a8803b575898698b43c732d863a9
|
refs/heads/main
| 2023-03-29T09:52:55.602270
| 2021-04-01T06:43:11
| 2021-04-01T06:43:11
| 350,090,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
import cv2
import numpy as np
import glob
from Robertson import Robertson
from PIL import Image
def readImages(folder):
imageFormat = ['png', 'JPG']
files = []
[files.extend(glob.glob(folder + '*.' + e)) for e in imageFormat]
imageList = [cv2.imread(file) for file in sorted(files)]
exposureTimeList = []
images = glob.glob(folder+"*.JPG")
for image in sorted(images):
with open(image, 'rb') as file:
tempImage = Image.open(file)
exifdata = tempImage.getexif()
# 0x829A: "ExposureTime"
data = exifdata.get(0x829A)
if isinstance(data, bytes):
data = data.decode()
dataValue = data[0] / data[1]
exposureTimeList.append(dataValue)
return imageList, exposureTimeList
if __name__ == '__main__':
sourceImages, ExposureTimes = readImages('SocialScienceLibrary/')
myhdrPic = Robertson().process(sourceImages, ExposureTimes)
|
[
"ravencheng66@gmail.com"
] |
ravencheng66@gmail.com
|
418b0f88b0eab98e3c05436c6fea4a9e02badb90
|
2d2a0651507d90b1e8e672b3f4074e7118c4d945
|
/AutoLiker/Trial.py
|
6e6635805eb2aa01b1c11607d7b336f583de74de
|
[] |
no_license
|
j105sahil/Youtube-Auto-Liker
|
42053b079a3546b7da81148c1eaaad8532d58af9
|
f410ce5bdf90af9b793e238e9380524abdb8741b
|
refs/heads/master
| 2020-03-22T21:43:50.213692
| 2018-09-06T07:24:03
| 2018-09-06T07:24:03
| 140,710,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
import openpyxl
import webbrowser,sys
import pyautogui
from time import sleep
import random
import subprocess
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
workbook = openpyxl.load_workbook('trial.xlsx')
sheet = workbook.get_sheet_by_name('Sheet1')
i=1
while True:
if i%10==0:
sleep(10)
subprocess.call("taskkill /IM chrome.exe")
if sheet.cell(row = i , column = 1).value!=None:
a=random.randint(5,500)
b=random.randint(5,500)
webbrowser.get(chrome_path).open(sheet.cell(row = i , column = 1).value)
pyautogui.moveTo(a,b,duration=3)
#postion = pyautogui.locateCenterOnScreen('Capture.PNG')
#pyautogui.click(postion)
else:
break
sleep(5)
i +=1
subprocess.call("taskkill /IM chrome.exe")
|
[
"noreply@github.com"
] |
j105sahil.noreply@github.com
|
26556f32fd856b8732227ea6ddcc48bd711e6877
|
4412fd856cfbdfab98122b11ea01e447a76851b3
|
/rodentdb/querysets.py
|
063568ed8e0276d9a070a898fd61d93c93f1a13b
|
[] |
no_license
|
fchampalimaud/rodentdb
|
d8e8c0c7552de638d3a2fd57de287401997fdf3c
|
4a970c09da78f22a8c57d8ea98d29a569f531613
|
refs/heads/master
| 2021-06-18T02:05:19.200858
| 2019-09-17T18:09:57
| 2019-09-17T18:09:57
| 185,334,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from django.db import models
from users.mixins import PyformsPermissionsMixin
# FIXME import this when users model is not present
# try:
# from users.mixins import PyformsPermissionsMixin
# except ImportError:
# PyformsPermissionsMixin = None
# # PyformsPermissionsMixin = object
class RodentQuerySet(PyformsPermissionsMixin, models.QuerySet):
...
|
[
"hugo.cachitas@research.fchampalimaud.org"
] |
hugo.cachitas@research.fchampalimaud.org
|
7b227f8b4fb8363cfc5ac52f09135c06f74aed51
|
0b5c517c147df3dc4d60a21277c3f29daa3fbfbb
|
/temp_20170919.py
|
1c4fe34099c009937a8c80764709d18bda0024bb
|
[] |
no_license
|
acercyc/V_REP_armDemo_stage_1
|
c91682010fd9d5017ccab1ee1f8c1f4b0090d9d8
|
f7b3b69f7375e5f7184c8d233f87c0cb0568d4f6
|
refs/heads/master
| 2021-05-16T09:32:00.452940
| 2017-09-22T05:48:01
| 2017-09-22T05:48:01
| 104,433,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
from acerlib.RemoteSession import findX11DisplayPort
findX11DisplayPort()
import importlib
from datetime import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
from numpy import array as npa
from scipy import interpolate
from VrepPythonApi import vrep
from VrepPythonApi import vrep_ext
from armDemo import ArmDemo, waitSecs, ProxSensor_top
from armDemo_stage1_loadData import imgPreprocess
from acerlib import logging_ext, keras_ext
from keras.models import load_model
arm = ArmDemo('192.168.1.170', commThreadCycleInMs=1)
arm.restartSim()
for i in range(20):
arm.moveTo([0.8, 0.3, 0.25])
arm.waitToDistination()
arm.resetArmPosition()
arm.waitToDistination()
|
[
"acercyc@gmail.com"
] |
acercyc@gmail.com
|
59b0ce8038051b923da1779ad300057f96e36409
|
d90724db44810e51a0586ab6e379afb4f41ca9c9
|
/guess_number.py
|
40affe78bc1220a38afd81e50ae91064bd98ac9a
|
[] |
no_license
|
Rutvi-Patel/cecs-174
|
e31cbd31a261677f3f3125d6aaa55aa92d5f125e
|
6011871cede8eec64ef3f1dc442387f08d7a937f
|
refs/heads/master
| 2020-05-01T06:14:28.009489
| 2019-09-27T22:03:53
| 2019-09-27T22:03:53
| 177,325,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import random
print("I am thinking of a number between 1 and 10.")
ask = "yes"
while ask == 'yes':
num = random.randint(1, 10)
for x in range (5):
use =input("Take a guess :")
while not use.isdigit() or not 0<int(use)<11:
print("Wrong value, re-enter:")
use =input("Take a guess :")
user = int(use)
while user>num or user<num:
if x == 4 and user != num:
print("You guessed wrong, the number I was thinking of was",num)
ask = str(input("Enter yes to try again else enter no to stop:"))
break
if user>num:
print("Your guess is too high.")
break
if user<num:
print("Your guess is too low.")
break
else:
print("Good job, you got it with",x+1,"guesses")
ask = "no"
break
|
[
"noreply@github.com"
] |
Rutvi-Patel.noreply@github.com
|
b75806926dcb3bbbf4251fca79a3bd28f9300dab
|
71b86fc54e811c3a06d6d2db32a65a212f642bac
|
/scripts/create_prod_optfreq_jobs.py
|
298897a5ec19bd62538151015a4d4e84b1ae396f
|
[
"MIT"
] |
permissive
|
yunsiechung/ard_gsm
|
c773f4454836b54f6bb788c8d038d60d628ffcf2
|
82826011b0edf7122e16063094b04ecd16bf1cf5
|
refs/heads/master
| 2021-05-20T12:01:32.981441
| 2020-01-20T17:08:31
| 2020-01-20T17:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,879
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import glob
import os
import re
from ard_gsm.qchem import QChem
from ard_gsm.util import iter_sub_dirs, read_xyz_file
def main():
args = parse_args()
num_regex = re.compile(r'\d+')
maxnum = float('inf') if args.maxnum is None else args.maxnum
for gsm_sub_dir in iter_sub_dirs(args.gsm_dir, pattern=r'gsm\d+'):
gsm_num = int(num_regex.search(os.path.basename(gsm_sub_dir)).group(0))
if gsm_num > maxnum:
continue
out_dir = os.path.join(args.out_dir, os.path.basename(gsm_sub_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
elif not args.overwrite:
continue
qstart_file = os.path.join(gsm_sub_dir, 'qstart')
qtmp = QChem(logfile=qstart_file)
charge, multiplicity = qtmp.get_charge(), qtmp.get_multiplicity()
print(f'Extracting from {gsm_sub_dir}...')
for gsm_log in glob.iglob(os.path.join(gsm_sub_dir, 'gsm*.out')):
num = int(num_regex.search(os.path.basename(gsm_log)).group(0))
string_file = os.path.join(gsm_sub_dir, f'stringfile.xyz{num:04}')
if not (os.path.isfile(string_file) and os.path.getsize(string_file) > 0):
continue
if args.ignore_errors and has_error(gsm_log):
continue
if args.ignore_errors or is_successful(gsm_log):
# Optimize van-der-Waals wells instead of separated products
# Also check if product optimization during GSM failed
xyzs = read_xyz_file(string_file, with_energy=True)
last_energy = xyzs[-1][-1]
second_to_last_energy = xyzs[-2][-1]
if last_energy > second_to_last_energy: # Something went wrong in product optimization
continue
path = os.path.join(out_dir, f'prod_optfreq{num:04}.in')
q = QChem(config_file=args.config)
q.make_input_from_coords(path, *xyzs[-1][:-1], charge=charge, multiplicity=multiplicity, mem=args.mem)
def is_successful(gsm_log):
"""
Success is defined as having converged to a transition state.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if '-XTS-' in line or '-TS-' in line:
return True
return False
def has_error(gsm_log):
"""
Check if last node is high in energy or if the path is dissociative.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if 'high energy' in line and '-exit early-' in line:
return True
if 'terminating due to dissociation' in line:
return True
return False
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('gsm_dir', metavar='GSMDIR', help='Path to directory containing GSM folders')
parser.add_argument('out_dir', metavar='ODIR', help='Path to output directory')
parser.add_argument('--mem', type=int, metavar='MEM', help='Q-Chem memory')
parser.add_argument('--overwrite', action='store_true', help='Overwrite input files in existing directories')
parser.add_argument('--maxnum', type=int, metavar='NUM', help='Only make jobs from GSM folders up to this number')
parser.add_argument('--ignore_errors', action='store_true',
help='Extract from all GSM calculations ignoring (most) errors')
parser.add_argument(
'--config', metavar='FILE',
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'config', 'qchem.opt_freq'),
help='Configuration file for product optfreq jobs in Q-Chem'
)
return parser.parse_args()
if __name__ == '__main__':
main()
|
[
"cgrambow@mit.edu"
] |
cgrambow@mit.edu
|
53932616f06d5bdf5c3873b0eccb0779b2212eb4
|
15229da763b74cccf53a62fac0e7f9282eef309e
|
/hardware/mdp/ltl-MDP-face.py
|
0f65a0f046a947469acd6901fb43fd82f4acf8ed
|
[] |
no_license
|
InspectorDidi/Neuromorphic_Hardware_learns_to_learn
|
214162bb8be5ac3cb590c57e3c06e1340453ada4
|
920858c172cdd85521646d0ab8ee077002aa1b17
|
refs/heads/master
| 2020-08-03T10:44:36.026398
| 2019-04-05T04:40:50
| 2019-04-05T04:40:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,258
|
py
|
import logging.config
import os
from pypet import Environment
from pypet import pypetconstants
import sys
sys.path.append('../LTL')
from ltl.optimizers.crossentropy.distribution import Gaussian
from ltl.optimizers.face.optimizer import FACEOptimizer, FACEParameters
from ltl.paths import Paths
from ltl.recorder import Recorder
import numpy as np
from ltl.logging_tools import create_shared_logger_data, configure_loggers
from NetworkMDP_Optimizee import DLSMDPOptimizee
logger = logging.getLogger('bin.ltl-fun-face')
def main():
name = 'LTL-MDP-FACE'
try:
with open('path.conf') as f:
root_dir_path = f.read().strip()
except FileNotFoundError:
raise FileNotFoundError(
"You have not set the root path to store your results."
" Write the path to a path.conf text file in the bin directory"
" before running the simulation"
)
paths = Paths(name, dict(run_no='test'), root_dir_path=root_dir_path)
print("All output logs can be found in directory ", paths.logs_path)
traj_file = os.path.join(paths.output_dir_path, 'data.h5')
# Create an environment that handles running our simulation
# This initializes a PyPet environment
env = Environment(trajectory=name, filename=traj_file, file_title=u'{} data'.format(name),
comment=u'{} data'.format(name),
add_time=True,
# freeze_input=True,
# multiproc=True,
# use_scoop=True,
wrap_mode=pypetconstants.WRAP_MODE_LOCK,
automatic_storing=True,
log_stdout=False, # Sends stdout to logs
log_folder=os.path.join(paths.output_dir_path, 'logs')
)
create_shared_logger_data(logger_names=['bin', 'optimizers'],
log_levels=['INFO', 'INFO'],
log_to_consoles=[True, True],
sim_name=name,
log_directory=paths.logs_path)
configure_loggers()
# Get the trajectory from the environment
traj = env.trajectory
optimizee = DLSMDPOptimizee(traj)
# NOTE: Outerloop optimizer initialization
# TODO: Change the optimizer to the appropriate Optimizer class
parameters = FACEParameters(min_pop_size=25, max_pop_size=25, n_elite=10, smoothing=0.2, temp_decay=0,
n_iteration=100,
distribution=Gaussian(), n_expand=5, stop_criterion=np.inf, seed=109)
optimizer = FACEOptimizer(traj, optimizee_create_individual=optimizee.create_individual,
optimizee_fitness_weights=(-1.),
parameters=parameters,
optimizee_bounding_func=optimizee.bounding_func)
# Add post processing
env.add_postprocessing(optimizer.post_process)
# Run the simulation with all parameter combinations
env.run(optimizee.simulate)
## Outerloop optimizer end
optimizer.end(traj)
# Finally disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
main()
|
[
"franz.scherr@tugraz.at"
] |
franz.scherr@tugraz.at
|
e9077fa3fbda560e0da3e80119eca9188995a279
|
11abaae6662dfbbe5d323459fa93c505112a0739
|
/import_tiger_lm.py
|
d013b1ffeeffff1638eaf618c70fc5297e1fee47
|
[] |
no_license
|
Web5design/simplegeo-bulk-import
|
c6d71ff372ac7d1f1201308440b1d081053616d7
|
a514f5b36ad095a90f8266d2df3f11461851566c
|
refs/heads/master
| 2021-01-18T17:34:02.486434
| 2010-09-01T21:21:57
| 2010-09-01T21:21:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
from bulk_import import create_client, add_records
import sys
def skip_unnamed_landmarks(id, point, attrs):
if not attrs["fullname"]: return None
return attrs["pointid"], point, attrs
client = create_client()
for input_file in sys.argv[1:]:
add_records(client, "net.nocat.tigerlm", input_file, skip_unnamed_landmarks)
|
[
"schuyler@simplegeo.com"
] |
schuyler@simplegeo.com
|
5a850bb6a63234b950eedb091013eaf3870c052c
|
88237dd1932c346acade7001f22d0fc7190da55c
|
/torch/testing/_internal/distributed/rpc/rpc_test.py
|
a149c541a090e422aa02b0beb16eeb237c85ba35
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
kkleidal/pytorch
|
4d7e95d0035bb03509dce6624e857c118279d08d
|
815d38395a5081adef6ecec3372474f6f41b2a7c
|
refs/heads/master
| 2023-02-06T11:00:27.985596
| 2020-12-17T17:30:02
| 2020-12-17T17:32:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178,866
|
py
|
import concurrent.futures
import contextlib
import json
import logging
import os
import sys
from threading import Lock
import time
import unittest
from collections import namedtuple
from functools import partial
from unittest import mock
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu, captured_output
from torch.testing._internal.common_utils import IS_MACOS, load_tests
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
single_threaded_process_group_agent,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::threshold",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a):
self.a = a
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with torch.autograd.profiler.profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with torch.autograd.profiler.profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with torch.autograd.profiler.profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with torch.autograd.profiler.profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(remote_events_list, EXPECTED_REMOTE_EVENTS)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@single_threaded_process_group_agent
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with torch.autograd.profiler.profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@single_threaded_process_group_agent
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
remaining_remote_events = {
evt for evt in function_events if evt.node_id == dst_rank
} - {record_function_remote_event}
# These ops are created by the hack of casting record_function to a
# tensor, so they should not count in the actual UDF profiled time.
# TODO remove after https://github.com/pytorch/pytorch/issues/43868
# is resolved.
remote_events_denylist = [
"aten::zeros",
"aten::empty",
"aten::zero_",
"aten::fill_",
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[event_name.find(remote_op_key) + len(remote_op_key) :]
# Ideally, we should validate that the sum of remote operations within
# record_function are less than record_function's CPU time. However,
# there is a known bug in profiling
# (https://github.com/pytorch/pytorch/issues/45160) due to which we
# can't do this. So, we just validate they are child events.
prof.key_averages()
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
record_function_children_names = [
convert_remote_to_local(c.name)
for c in get_cpu_children(record_function_remote_event)
]
for evt in remaining_remote_events:
local_name = convert_remote_to_local(evt.name)
if local_name not in remote_events_denylist:
self.assertTrue(
local_name in record_function_children_names,
f"{local_name} not in {record_function_children_names}",
)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with torch.autograd.profiler.profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with torch.autograd.profiler.profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with torch.autograd.profiler.profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertTrue(scope_event.time_range.start < rpc_event.time_range.start)
self.assertTrue(scope_event.time_range.end > rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
self.assertEqual(sorted(top_level_event_names), sorted(expected_top_level_event_names))
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
self._assert_top_level_events(inner_events, ['aten::sub'])
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, ['aten::add', 'aten::sub'])
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with torch.autograd.profiler.profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with torch.autograd.profiler.profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def test_rref_type(self):
def launched_rpc(events):
expected_name = "rpc_sync#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with torch.autograd.profiler.profile() as p:
t = rref._get_type()
self.assertTrue(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
with torch.autograd.profiler.profile() as p:
for _ in range(10):
t = rref._get_type()
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
self.assertEqual(rref._get_type(), MyClass)
@dist_init
def test_rref_type_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type()
@dist_init
def test_rref_type_owner(self):
rref = RRef(torch.ones(2) + 1)
self.assertEqual(rref._get_type(), type(torch.ones(2)))
rref = RRef(MyClass(0))
self.assertEqual(rref._get_type(), MyClass)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
AttributeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
class ProcessGroupAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# ProcessGroupRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `ProcessGroupRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.init_method
)
with self.assertLogs("torch.distributed.rpc", logging.WARNING) as cm:
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIn(
"To silence this warning pass `backend=BackendType.PROCESS_GROUP` explicitly.",
"\n".join(cm.output),
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.ProcessGroupAgent)
def test_logs_deprecation_warning(self):
with self.assertLogs("torch.distributed.rpc", logging.WARNING) as cm:
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=self.rpc_backend_options,
)
self.assertIn(
"It is recommended to migrate to the TENSORPIPE backend.",
"\n".join(cm.output),
)
@skip_if_lt_x_gpu(2)
@dist_init
def test_cuda(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(3, 3).cuda(0)
t2 = torch.rand(3, 3).cuda(1)
t3 = torch.rand(3, 3)
# cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t2))
# mix of cpu and cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t3))
# gpu tensor list as args fails.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._gpu_tensor_list_arg, args=([t1, t2]))
# cuda tensors as return values fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor, args=())
# cuda tensors as a list of return value fails
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor_list, args=())
# Sending to self should fail too.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(worker_name(self.rank), torch.add, args=(t1, t2))
def test_single_threaded_rref_owner(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# using RRefs on dst
futs = []
for i in range(len(rrefs)):
futs.append(
rpc.rpc_async(dst, my_rref_function, args=(rrefs[i], rrefs[i]))
)
# wait for results and check
for i in range(len(futs)):
self.assertEqual(2 * (torch.zeros(2, 2) + i), futs[i].wait())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(futs), num_owner_rrefs)
# trigger RRef deletion
del futs
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
def test_single_threaded_rref_to_here(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# wait for results and check
for i in range(len(rrefs)):
self.assertEqual(torch.zeros(2, 2) + i, rrefs[i].to_here())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(rrefs), num_owner_rrefs)
# trigger RRef deletion
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
@dist_init
def test_process_group_debug_info(self):
rpc.enable_gil_profiling(True)
initialize_pg(self.file_init_method, self.rank, self.world_size)
NUM_THREAD = self.rpc_backend_options.num_send_recv_threads
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# for the above check, add a barrier to ensure that another worker
# cannot send a request before we check num_idle_threads, since we'd
# use up an idle thread if we start processing that request.
dist.barrier()
dst_rank = (self.rank + 1) % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank), set_and_check_done, args=(dst_rank,)
)
# blocks until the request arrives
self.assertEqual(self.rank, VALUE_FUTURE.result())
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertGreaterEqual(float(info["agent.gil_average_wait_time_us"]), 0)
self.assertEqual(int(info["agent.num_pending_requests"]), 1)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
num_idle_threads = int(info["agent.num_idle_threads"])
# as we cannot know for sure whether the send thread has returned, there
# might be either 1 or 2 busy threads
self.assertTrue(num_idle_threads in [NUM_THREAD - 1, NUM_THREAD - 2])
# add a barrier to make sure the request is not finished before checking
# num_pending_requests
dist.barrier()
DONE_FUTURE.set_result(self.rank)
self.assertEqual(dst_rank, fut.wait())
# add a barrier to make sure the dst_rank has finished processing the
# request
dist.barrier()
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
for retry in range(3):
# even if the future has completed, there is no guarantee that
# the local send/recv threads would have finished. We try three
# times. (NB: this might potentially be flaky. If flakiness does
# occur, then we have to relax the assert.)
info = rpc.api._get_current_rpc_agent().get_debug_info()
if int(info["agent.num_idle_threads"]) == NUM_THREAD:
break
time.sleep(0.1)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# add a barrier to make sure SHUTDOWN message is not sent
dist.barrier()
@dist_init(setup_rpc=False)
def test_set_and_get_num_send_recv_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing ProcessGroupRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout,
)
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP)
self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
def _test_device_maps(self, options, errMsg="Invalid device_map"):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(options, "Wrong worker names")
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(options)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(options)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(options)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 0, y.is_cuda, y.device.index == 1]):
return x + y.to(0), x.to(1) - y
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 1})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(1), torch.ones(2).to(0))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(1), torch.ones(2).to(0))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
188c718d6467ae8fa888ac5aae2b97fa2aea6c74
|
5c0a0e9696a1b38ff6663440d21c94cbb6730ab1
|
/1 resnet training (CIFAR10).py
|
6ee2bf9da2c3570bf6250a3c8594527cda5c5469
|
[] |
no_license
|
DeBur19/FGE_reproduction_project
|
93107a9f9399022bf5c08e8f54d5fd22294a00ca
|
82143ac593dc6707d799378a699bd1c772e43126
|
refs/heads/master
| 2020-11-25T23:34:19.854559
| 2019-12-20T19:47:39
| 2019-12-20T19:47:39
| 228,890,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import random
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets, utils, models
from tqdm import tnrange
from time import time
data_transform_train = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_cifar10 = datasets.CIFAR10(root='cifar10/train', train=True, transform=data_transform_train, download=True)
test_cifar10 = datasets.CIFAR10(root='cifar10/test', train=False, transform=data_transform_test, download=True)
dataset_loader_train = torch.utils.data.DataLoader(train_cifar10, batch_size=32, shuffle=True, num_workers=4)
dataset_loader_test = torch.utils.data.DataLoader(test_cifar10, batch_size=32, shuffle=False, num_workers=4)
def train_epoch(model, data_iter, criterion, optimizer):
model.train()
losses = []
for _, batch in zip(tnrange(len(data_iter)), data_iter):
images, labels = batch
images, labels = images.cuda(), labels.cuda()
optimizer.zero_grad()
out = model(images)
loss = criterion(out, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
return losses
def val(model, data_iter, criterion):
model.eval()
losses = []
acc = 0.0
with torch.no_grad():
for _, batch in zip(tnrange(len(data_iter)), data_iter):
images, labels = batch
images, labels = images.cuda(), labels.cuda()
out = model(images)
loss = criterion(out, labels)
_, preds = torch.max(out, 1)
acc += torch.sum(preds == labels).type(torch.float)
losses.append(loss.item())
return losses, acc / len(data_iter.dataset)
def train(model, opt, crit, train_iter, val_iter, num_epochs, sched, checkpoint_epoch, n_model, n_classes):
train_log, val_log = [], []
val_accs = []
last_acc = 0.0
t1 = time()
for epoch in range(num_epochs):
tic = time()
train_loss = train_epoch(model, train_iter, crit, opt)
tac = time()
tic1 = time()
val_loss, val_acc = val(model, val_iter, crit)
tac1 = time()
val_accs.append(val_acc)
sched.step()
train_log.extend(train_loss)
val_log.append((len(train_iter) * (epoch + 1), np.mean(val_loss)))
t2 = time()
#print('EPOCH {}:'.format(epoch))
#print('Total time from start: {}min {}s'.format((t2 - t1) // 60, (t2 - t1) % 60))
#print('TRAIN: {}min {}s for epoch, mean loss = {}'.format((tac - tic) // 60, (tac - tic) % 60, np.mean(train_loss)))
#print('VAL: {}min {}s for epoch, mean loss = {}, acc = {}'.format((tac1 - tic1) // 60, (tac1 - tic1) % 60, np.mean(val_loss), val_acc))
last_acc = val_acc
if epoch == checkpoint_epoch:
torch.save(model.state_dict(), 'resnet_cifar{}_32ep_{}.pt'.format(n_classes, n_model))
torch.save(opt.state_dict(), 'resnet_cifar{}_32ep_{}_opt.pt'.format(n_classes, n_model))
return train_log, val_log, last_acc, model
# First model:
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(42)
resnet_cifar10 = models.resnet152(pretrained=False, **{'num_classes' : 10})
resnet_cifar10 = resnet_cifar10.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(resnet_cifar10.parameters(), lr=0.01, weight_decay=0.0005, momentum=0.9)
sched = MultiStepLR(optimizer, milestones=[20,30], gamma=0.1)
train_log, val_log, resnet_cifar10_2_acc, resnet_cifar10 = train(resnet_cifar10, optimizer, criterion, dataset_loader_train, dataset_loader_test, 40, sched,31, 1, 10)
torch.save(resnet_cifar10.state_dict(), 'resnet_cifar10_40ep_1.pt')
|
[
"noreply@github.com"
] |
DeBur19.noreply@github.com
|
2a7e760536c47ab9bfd21b678dd36dc283094496
|
769518eaef4ad25335bd917409136eccade1c0b1
|
/label_gen/removenolabeldata.py
|
94e8562ef19ea53a8f271caf6e1a8d4bbbbd8b4d
|
[] |
no_license
|
xk-wang/polyphonic-omr
|
eaade114eccb42d2137b7a5c857659d628ed4220
|
264b17302f9029460840c79014d0aa75b68d38b5
|
refs/heads/main
| 2023-07-19T11:40:52.007262
| 2021-09-20T21:20:02
| 2021-09-20T21:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
# Removes the imgs that have no corresponding label
# python removenolabel.py -imgs <img dir> -labels <label dir>
import os
import argparse
def main():
"""
Main method
"""
# Parse command line arguments for input
parser = argparse.ArgumentParser()
parser.add_argument('-imgs', dest='imgs', type=str, required=True, help='Path to the directory with imgs.')
parser.add_argument('-labels', dest='labels', type=str, required=True, help='Path to the directory with labels.')
args = parser.parse_args()
num_missing = 0
num_total = 0
# Go through all files in input directory
for file_name in os.listdir(args.imgs):
if not file_name.endswith('.png'):
continue
num_total += 1
sample_id = file_name.split('-')[0]
num = file_name.split('-')[1].split('.')[0]
if num.startswith('00'):
num = num[2:]
elif num.startswith('0'):
num = num[1:]
sem_name1 = sample_id + '-' + num + '.semantic'
sem_name2 = sample_id + '-0' + num + '.semantic'
sem_name3 = sample_id + '-00' + num + '.semantic'
# Open semantic file (try different names with/without leading 0s)
try:
sem_file = open(os.path.join(args.labels, sem_name1), 'r')
sem_file.close()
except FileNotFoundError:
try:
sem_file = open(os.path.join(args.labels, sem_name2), 'r')
sem_file.close()
except FileNotFoundError:
try:
sem_file = open(os.path.join(args.labels, sem_name3), 'r')
sem_file.close()
except FileNotFoundError:
num_missing += 1
os.remove(os.path.join(args.imgs, file_name))
continue
print(num_missing,num_total,num_total-num_missing)
if __name__ == "__main__":
main()
|
[
"sedirisooriya10@gmail.com"
] |
sedirisooriya10@gmail.com
|
6006a738df99a24e60f0d1202d8a0998e6f3c28b
|
45f93a9d47204d76b8bf25a71dfb79403e75c33c
|
/CodeForces/yes-or-yes.py
|
cb4db3b37842abd3b41cac071b12adb1ab078941
|
[] |
no_license
|
tahmid-tanzim/problem-solving
|
0173bce1973ac3e95441a76c10324c0e1b0a57c3
|
6ddb51de6772130f209474e76f39ca2938f444f0
|
refs/heads/master
| 2023-06-25T02:18:03.690263
| 2023-06-20T06:58:46
| 2023-06-20T06:58:46
| 137,173,850
| 4
| 1
| null | 2022-03-30T08:28:41
| 2018-06-13T06:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
# https://codeforces.com/problemset/problem/1703/A
# A. YES or YES?
if __name__ == "__main__":
for _ in range(int(input())):
print('YES' if input().lower() == 'yes' else 'NO')
|
[
"tahmid.tanzim@gmail.com"
] |
tahmid.tanzim@gmail.com
|
538ee5795f2f172fc105da720d85536515dbfad0
|
2a6627109f9e3caff5aea4a902f7c902d1e9ac40
|
/ee/clickhouse/views/person.py
|
6c15329824809af7c9ba886a4629a7b3387e8ab4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
suresh1999/posthog
|
0739d631e5efb96dd00f4d5544f14e885d567b7a
|
14d37dd38cd63b501324856e7dc6602a596efd1e
|
refs/heads/master
| 2023-01-24T22:49:19.026411
| 2020-12-04T16:13:52
| 2020-12-04T16:13:52
| 318,311,546
| 0
| 0
|
NOASSERTION
| 2020-12-04T16:18:23
| 2020-12-03T20:28:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
from rest_framework import request, response
from ee.clickhouse.models.person import delete_person
from ee.clickhouse.queries.clickhouse_retention import ClickhouseRetention
from ee.clickhouse.queries.trends.lifecycle import ClickhouseLifecycle
from posthog.api.person import PersonViewSet
from posthog.models import Event, Person
# TODO: Move grabbing all this to Clickhouse. See WIP-people-from-clickhouse branch.
class ClickhousePersonViewSet(PersonViewSet):
lifecycle_class = ClickhouseLifecycle
retention_class = ClickhouseRetention
def destroy(self, request: request.Request, pk=None, **kwargs): # type: ignore
team = self.team
person = Person.objects.get(team=team, pk=pk)
# TODO: Probably won't need this after a while
events = Event.objects.filter(team=team, distinct_id__in=person.distinct_ids)
events.delete()
delete_person(person.uuid, delete_events=True, team_id=team.pk)
person.delete()
return response.Response(status=204)
|
[
"noreply@github.com"
] |
suresh1999.noreply@github.com
|
0026affaf4db9c312ccfe2ba3d12b23bd58c0c57
|
3179016c72059999d5d35c18724d4dcf84b2a225
|
/test_scripts/test_permute.py
|
a202c18f9a93f422e6986fc2a0a6ab456d42c5c8
|
[] |
no_license
|
thematrixduo/DAIR
|
e51a1f5e7c13090e103afea8ffdbcb4a6504389a
|
5e6f76c97880114b606d054375250da0522babaa
|
refs/heads/master
| 2022-12-11T05:11:07.079300
| 2020-08-31T13:19:07
| 2020-08-31T13:19:07
| 291,719,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import torch
import numpy as np
torch_array = []
np_array = []
for i in range(3):
data = np.random.normal(0,1,(64,3))
#print(data.shape)
np_array.append(data)
torch_array.append(torch.tensor(data))
stacked_tensor = torch.stack(torch_array)
print(stacked_tensor.size())
permuted_tensor = stacked_tensor.permute(1,0,2)
print(permuted_tensor.size())
#print((stacked_tensor[0]-permuted_tensor[:,0,:]).data)
permuted_np = permuted_tensor.numpy()
print(np_array[0] - permuted_np[:,0,:])
|
[
"wangduo1992@hotmail.com"
] |
wangduo1992@hotmail.com
|
ea458da44d9961bee35199279e20dc43ab8ce4f3
|
567055ea99ac7ca3fa3fd77c36f5535b8fb6f546
|
/2018-W-450-4/02-learning-curves/__init__.py
|
3ee4e18e89186a452bc3744e947f3ff5262e3079
|
[] |
no_license
|
lssnadia/JupyterNotebooks
|
f72e58101250a129792639c90f4e276154bb7e6d
|
d36756ee4f46718bc2cffa7ef2d43b95938fd879
|
refs/heads/master
| 2020-03-08T13:41:29.826373
| 2018-04-05T06:00:49
| 2018-04-05T06:00:49
| 128,164,318
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from IPython.display import display
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline') # makes the picture show up
|
[
"ubuntu@ip-172-31-22-81.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-22-81.us-west-2.compute.internal
|
9ce6ce54431bf4e550f269c6bf8dc6421f443ae8
|
1770a4b13c908bc80b3cfc7e41b22648e434823e
|
/Assignments/homework_assignment_5/is_it_in_stock.py
|
12a31c5427831c3f701731c3635d0b9dfcd432a7
|
[] |
no_license
|
lshi0335/MSITM6341
|
51071ceafa9bb7df42e00ce42375e15f4bf1872e
|
1cbcf1237aaf8e8771eeb6817b72d5db193179b5
|
refs/heads/master
| 2020-07-12T12:38:19.398701
| 2019-12-17T04:13:45
| 2019-12-17T04:13:45
| 204,822,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
# Lei Shi
# 0985491
# MSITM6341
# 9/29/2019
# Assignment 5
# Initialize lists
menu_items_in_stock = ['Tacos', 'Chips', 'Salsa', 'Quesadilla']
customer_order = ['Tacos', 'Guacamole', 'Burrito', 'Chips', 'Salsa']
# Change all elements in lists to lower case
menu_items_in_stock = [x.lower() for x in menu_items_in_stock]
customer_order = [x.lower() for x in customer_order]
# Check if an item in customer_order is in menu_items_in_stock
for x in customer_order:
if x in menu_items_in_stock:
print('We have ' + x.capitalize() + ' in stock.')
else:
print('We do not have ' + x.capitalize() + ' in stock.')
|
[
"noreply@github.com"
] |
lshi0335.noreply@github.com
|
5f2d1ac25562db38ab4a821c2566217b269f5519
|
f8ab044c34f0d286195c8e5abfae6f451e6c8223
|
/test_arg.py
|
58c8a56dafab815d14bbf764635a534ade273ed5
|
[] |
no_license
|
liuyug/pytest
|
aa512e902cf4ba9afb91b6b5b5c5cb9dccdc6478
|
ffc14dbee70ff6fd9c8ab63a1c771fddc8bf5491
|
refs/heads/master
| 2020-05-19T12:20:57.958939
| 2016-09-06T03:01:38
| 2016-09-06T03:01:38
| 15,447,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
import argparse
import unittest
class testArgs(unittest.TestCase):
def setUp(self):
self.args = (
['--foo', 'abc', 'abcdefg.ext'],
['-a', 'abc', '-a', 'bcd', '-a', 'cde', 'def.def'],
['-vvvv', 'abc.ea'],
#['--version'],
)
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--foo', help='foo help', default='foo')
self.parser.add_argument('-a', '--all', help='all help', action='append')
self.parser.add_argument('-v', '--verbose', help='verbose help', action='count')
#self.parser.add_argument('--version', action='version', version='%(prog)s 0.3')
self.parser.add_argument('file', help='add filename')
def testargs(self):
for args in self.args:
print('args: ', args)
pargs = self.parser.parse_args(args)
print(pargs)
if __name__ == '__main__':
unittest.main()
|
[
"liuyug@gmail.com"
] |
liuyug@gmail.com
|
660bc9b69bbe66c2d9ce7f4e54e3b4a1dcabcda8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/334/usersdata/294/100466/submittedfiles/listas.py
|
90a4557636f7b45bccefc918ec87c0a87bd5ae66
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding: utf-8 -*-
n= int(input('Digite o número de elementos: '))
while n<2:
n= int(input('Digite o número de elementos: '))
a=[]
for i in range (0,n,1):
a.append(int(input('Digite o elemento%d: ' %(i+1))))
for i in range (0,n-1,1):
dif= a[i]-a[i+1]
if dif>0:
dif=dif*1
degrau=0
for i in range (0,n-1,1):
if dif>degrau:
degrau=dif
print(degrau)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
998989ef69a190607170515fdb349911ee57ef91
|
df7d030146c901790ab49402140f40fb26d03c8a
|
/PiTemperature/RPiWebThermometer.py
|
97b0ccc67ad3e3960d92a34216af4aa3910870ec
|
[] |
no_license
|
Adrian-Rosoga/code_pi
|
955316914a4ab51513e75a8da82cca8c3a71a14a
|
0a0b57489a4dce2021c03ee10a1d6002c3e34180
|
refs/heads/master
| 2021-06-23T11:35:12.992361
| 2021-05-25T17:05:47
| 2021-05-25T17:05:47
| 211,540,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
#!/usr/bin/python
#
# One-trick-pony web server serving page with temperature reading
# from DS18B20 sensor attached to a Raspberry Pi
#
#
# Configuration
#
PORT = 8000
DS18B20_OUTPUT_FILE = '/sys/bus/w1/devices/28-0000045bf342/w1_slave'
MAX_TEMPERATURE_ALERT = 22.0
MIN_TEMPERATURE_ALERT = 18.0
import re, time
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
regex = re.compile(r't=(\d+)')
def get_temperature():
file = open(DS18B20_OUTPUT_FILE, 'r')
file.readline()
line = file.readline()
file.close()
data = regex.search(line)
temperature = float(data.group(1)[0:4])/100.0
return temperature
body_template = '<HTML>\
<HEAD>\
<TITLE>Temperature</TITLE>\
<META HTTP-EQUIV=PRAGMA CONTENT=NO-CACHE>\
<META HTTP-EQUIV=EXPIRES CONTENT=-1>\
<META HTTP-EQUIV=REFRESH CONTENT=30>\
</HEAD>\
<BODY>\
<B><BR>\
<P ALIGN=CENTER>\
<FONT SIZE=12>Web Thermometer</FONT>\
<BR>\
<FONT SIZE=5>(Raspberry Pi, DS18B20)</FONT>\
<BR><BR><BR>\
<FONT SIZE=14 COLOR=%s>%s °C</FONT>\
<BR><BR><BR>\
<FONT SIZE=6>%s</FONT>\
<BR><BR>\
<FONT SIZE=5 COLOR=RED>%s</FONT>\
<BR><HR>\
<FONT SIZE=3>Page autorefreshes every 30 seconds.</FONT>\
</P>\
</B>\
</BODY>\
</HTML>'
class TemperatureHandler(SimpleHTTPRequestHandler, object):
def do_GET(self):
# Only the request for favicon is honored, for all else tell the temperature
if self.path == '/favicon.ico':
return SimpleHTTPRequestHandler.do_GET(self)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
temperature = get_temperature()
print 'Temperature = ' + str(temperature)
alert = ''
color = 'GREEN'
if (temperature > MAX_TEMPERATURE_ALERT):
alert = 'ALERT - Temperature above ' + str(MAX_TEMPERATURE_ALERT) + ' °C!'
color = 'RED'
elif (temperature < MIN_TEMPERATURE_ALERT):
alert = 'ALERT - Temperature below ' + str(MIN_TEMPERATURE_ALERT) + ' °C!'
color = 'RED'
self.wfile.write( body_template % (color, temperature, time.ctime(), alert
) )
self.wfile.close()
try:
server = HTTPServer(('', PORT), TemperatureHandler)
print 'Started HTTP server on port ' + str(PORT) + '...'
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down...')
server.socket.close()
|
[
"arosoga@yahoo.com"
] |
arosoga@yahoo.com
|
445c6ff875319e02bf6c664717e3b20fcc1eeef2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_peters.py
|
5c5535b7cf2fe2459518556a2fea0f4e76f710a9
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _PETERS():
def __init__(self,):
self.name = "PETERS"
self.definitions = peter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['peter']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9390417b1a18bcc58789ad980862c773d4cafc48
|
ba2039cf6d664c733cf1ff62dae0bb30e10798a9
|
/SUSTILP3/urls.py
|
2535aceee788fca6a3907b92d654cdc26fac7eb1
|
[] |
no_license
|
PauloR1997/EXAM_SUS_PL3
|
0a11c9c1531534ad5496cebbab1f9b373ea2b074
|
8e64326321f934f25e6ba6c43c8c906bcaa5b360
|
refs/heads/main
| 2023-03-01T19:32:37.853716
| 2021-02-17T16:53:51
| 2021-02-17T16:53:51
| 339,769,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
"""SUSTILP3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from miapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name="index"),
path('inicio/',views.index, name="inicio"),
path('listar_region/', views.listar_region, name="listar_region"),
path('create-full-region/',views.create_full_region, name="create_full_region"),
path('eliminar_region/<int:id>',views.eliminar_region, name="eliminar_region"),
path('save-region/',views.save_region, name="save_region"),
path('listar_empleado/', views.listar_empleado, name="listar_empleado"),
path('create-full-empleado/',views.create_full_empleado, name="create_full_empleado"),
]
|
[
"oyepaulo57@gmail.com"
] |
oyepaulo57@gmail.com
|
da10a3b12aca6306765f2a69092175ef54860514
|
d11b6555f38c1b05998d85c4b9be3c93e9a7e819
|
/Hello.py
|
eaef9e1f7b0cbd8f5d6dc725eb06923b587f3617
|
[] |
no_license
|
Vatty20/LearningPython
|
949b789a9c2182630b94d167e885069ba9e29fd7
|
00212c9aa8f6c18ee78a7a96536354b5dc0b6b19
|
refs/heads/main
| 2023-08-28T00:35:44.812062
| 2021-10-19T06:42:48
| 2021-10-19T06:42:48
| 418,799,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
print('Hello World')
print('Wassup')
|
[
"harlalka.vatsal@gmail.com"
] |
harlalka.vatsal@gmail.com
|
de8fae9c0ec7bb4c55a0e206af361d75c14fe612
|
b91bc02a5650597b8c217750c4fef238cb2b3363
|
/etcd_client/models/etcdserverpb_lease_time_to_live_request.py
|
8174a404dac146c49249468a580e235bf226a191
|
[] |
no_license
|
NimbleBoxAI/etcd-client-python
|
6d578fd5c588e7b691e6a41a45cb5f93afab279b
|
4d67692b0d75e4b14747f34f3d13998529216a3f
|
refs/heads/master
| 2023-07-13T19:04:36.118319
| 2021-08-21T15:29:11
| 2021-08-21T15:29:11
| 390,751,648
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,436
|
py
|
# coding: utf-8
"""
api/etcdserverpb/rpc.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from etcd_client.configuration import Configuration
class EtcdserverpbLeaseTimeToLiveRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'keys': 'bool'
}
attribute_map = {
'id': 'ID',
'keys': 'keys'
}
def __init__(self, id=None, keys=None, _configuration=None): # noqa: E501
"""EtcdserverpbLeaseTimeToLiveRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._keys = None
self.discriminator = None
if id is not None:
self.id = id
if keys is not None:
self.keys = keys
@property
def id(self):
"""Gets the id of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
ID is the lease ID for the lease. # noqa: E501
:return: The id of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EtcdserverpbLeaseTimeToLiveRequest.
ID is the lease ID for the lease. # noqa: E501
:param id: The id of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
:type: str
"""
self._id = id
@property
def keys(self):
"""Gets the keys of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
keys is true to query all the keys attached to this lease. # noqa: E501
:return: The keys of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
:rtype: bool
"""
return self._keys
@keys.setter
def keys(self, keys):
"""Sets the keys of this EtcdserverpbLeaseTimeToLiveRequest.
keys is true to query all the keys attached to this lease. # noqa: E501
:param keys: The keys of this EtcdserverpbLeaseTimeToLiveRequest. # noqa: E501
:type: bool
"""
self._keys = keys
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EtcdserverpbLeaseTimeToLiveRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EtcdserverpbLeaseTimeToLiveRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EtcdserverpbLeaseTimeToLiveRequest):
return True
return self.to_dict() != other.to_dict()
|
[
"cshubhamrao@gmail.com"
] |
cshubhamrao@gmail.com
|
d4cc69a7a8a0f8fee5660e5ea167301c123bedaa
|
faa741e4a1a44eb00daacbf5cfe73027f5a44200
|
/SRC/Controllers/sentiment.py
|
335ad8a19893431098435d93199a005f2f8e34d3
|
[] |
no_license
|
uscoburgo/API-Sentiment
|
53383406fcc3ac983594699aa9856700680cd033
|
fa66321ccf0e73f418fcb0d07885df46f7d1a556
|
refs/heads/master
| 2023-05-25T03:50:32.510912
| 2020-07-14T15:44:36
| 2020-07-14T15:44:36
| 279,024,151
| 0
| 1
| null | 2023-05-22T23:32:54
| 2020-07-12T08:35:32
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
from src.Controllers.chats import getMessage
from src.app import app
from pymongo import MongoClient
from src.Helpers.errorHelpers import errorHelper ,Error404 ,APIError
from src.config import DBURL
import json
import nltk
import ast
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity as distance
import pandas as pd
from bson import ObjectId
client = MongoClient(DBURL, connectTimeoutMS=2000, serverSelectionTimeoutMS=2000)
db = client.get_database()
nltk.download("vader_lexicon")
sia = SentimentIntensityAnalyzer()
@app.route("/chat/<chat_id>/sentiment") #?lang=<lang>
@errorHelper
def getSentiment(chat_id):
sentimentMessage={}
for ID, message in getMessage(chat_id).items():
sentimentMessage[ID]={"message":message,"sentiment":sia.polarity_scores(message)}
sums=0
for v in sentimentMessage.values():
print(v)
sums+=v['sentiment']['compound']
average = sums/len(sentimentMessage)
sentimentMessage['overall sentiment']=average
return sentimentMessage
"""
@app.route("/user/<user_id>/recommend")
@errorHelper
def getRecommendation(user_id):
stringsDict = {}
cur = db.messages.find({},{'_id':0,'user_id':1,'text':1})
data = list(cur)
for e in len(data):
stringsDict.update({data[e]['user_id'] : data[e]['text']})
sent1 = {}#k =user_id, v=text
for k,v in stringsDict.items():
sent1[k]=str(v)
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(sent1.values())
doc_term_matrix = sparse_matrix.todense()
df = pd.DataFrame(doc_term_matrix,
columns=count_vectorizer.get_feature_names(),
index=sent1.keys())
similarity_matrix = distance(df,df)
sim_df = pd.DataFrame(similarity_matrix, columns=sent1.keys(), index=sent1.keys())
def get3closest(sim_df,user_id):#user is an ObjectId
col=sim_df[user_id].sort_values(ascending=False)[1:]
return list(col[0:3].index)
output= get3closest(sim_df,ObjectId(user_id))
output=[str(el) for el in output]
return {'recommended':output}
"""
|
[
"uscoburgo@gmail.com"
] |
uscoburgo@gmail.com
|
ec14651799152533ae1afb6b7c4b39d5335c4ecb
|
2e74cff6c9639f3903ccde662e79359d0724285e
|
/2019_late/20190829/swea_5105_미로의거리.py
|
a33cf516df96ab271637b2d73566c84324d7b61b
|
[] |
no_license
|
dodonmountain/algorithm
|
e29988071f651e51ba65e3926302f94a3d4074a5
|
ce33e0d74220839aed4b17a47fa0069458a4324e
|
refs/heads/master
| 2022-11-05T05:14:01.527015
| 2022-11-01T04:29:37
| 2022-11-01T04:29:37
| 200,008,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,349
|
py
|
import sys
sys.stdin = open('5105.txt')
from pprint import pprint
from collections import deque
from collections import deque
def bfs(x, y):
global shortest
Q = deque()
visit[x][y] == True
Q.append((x, y))
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
D = [[0] * N for _ in range(N)] # 최단 거리를 저장
while Q:
xx, yy = Q.popleft()
for step in range(4):
nx = xx + dx[step]
ny = yy + dy[step]
if nx < 0 or ny < 0 or nx > N-1 or ny > N-1:
continue
if maze[nx][ny] == 3:
shortest = D[xx][yy]
return
if maze[nx][ny] == 0 and visit[nx][ny] == False:
visit[nx][ny] = 1 # 방문표시
D[nx][ny] = D[xx][yy] + 1
Q.append((nx, ny))
T = int(input())
for t_case in range(T):
shortest = 0
N = int(input())
visit = [[0] * N for _ in range(N)]
maze = []
for _ in range(N):
maze.append(list(map(int,input())))
# pprint(maze, width=30)
for i in range(N):
for j in range(N):
if maze[i][j] == 2:
start = [i, j]
elif maze[i][j] == 3:
goal = [i, j]
bfs(start[0], start[1])
pprint(visit,width=40)
print('#{} {}'.format(t_case + 1, shortest))
|
[
"lkh151515@gmail.com"
] |
lkh151515@gmail.com
|
22c73317a38a0b6c83062567b5d226553b2468c6
|
17f74be7a38e5e86539a239e3a8d642bdf1b36ff
|
/PYTHON_SYNTAX/two_types_of_division.py
|
9c020eb74b20dc7cd9d42de0ebbc833dc7bc5591
|
[] |
no_license
|
vishal-1codes/python
|
5d56825a02bc27375b6f6fba65cf0cad1596f87a
|
53c41e4f04933c5e911b2b0473d1ecf886955869
|
refs/heads/master
| 2023-08-13T21:16:17.995213
| 2021-09-25T17:21:01
| 2021-09-25T17:21:01
| 345,704,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
#if we want float number in result
cucumbers=100
num_people=6
whole_cucumbers_per_person=cucumbers/num_people
float_cucumbers_per_person=float(cucumbers)/num_people
print(float_cucumbers_per_person)
|
[
"pawarvishal10k@gmail.com"
] |
pawarvishal10k@gmail.com
|
01d0c81a95e80f405f125fd99caa00848d8f6f63
|
a86ca34e23afaf67fdf858df9e47847606b23e0c
|
/lib/temboo/Library/MailChimp/ListSubscribe.py
|
1366bda24013c469dbaa94248b91bb53f0dbbfa1
|
[] |
no_license
|
miriammelnick/dont-get-mugged
|
6026ad93c910baaecbc3f5477629b0322e116fa8
|
1613ee636c027ccc49c3f84a5f186e27de7f0f9d
|
refs/heads/master
| 2021-01-13T02:18:39.599323
| 2012-08-12T23:25:47
| 2012-08-12T23:25:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,083
|
py
|
###############################################################################
#
# ListSubscribe
# Adds a subscriber to a MailChimp list.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListSubscribe(Choreography):
"""
Create a new instance of the ListSubscribe Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/MailChimp/ListSubscribe')
def new_input_set(self):
return ListSubscribeInputSet()
def _make_result_set(self, result, path):
return ListSubscribeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListSubscribeChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListSubscribe
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListSubscribeInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key provided by Mailchimp.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the DoubleOptIn input for this choreography. ((optional, boolean) Flag to control whether a double opt-in confirmation message is sent. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_DoubleOptIn(self, value):
InputSet._set_input(self, 'DoubleOptIn', value)
"""
Set the value of the EmailAddress input for this choreography. ((conditional, string) The email address for the subscriber you want to create. Required unless the email address is included in the MergeVars input as part of your JSON object.)
"""
def set_EmailAddress(self, value):
InputSet._set_input(self, 'EmailAddress', value)
"""
Set the value of the EmailType input for this choreography. ((optional, string) Must be one of 'text', 'html', or 'mobile'. Defaults to html.)
"""
def set_EmailType(self, value):
InputSet._set_input(self, 'EmailType', value)
"""
Set the value of the ListId input for this choreography. ((required, string) The id of the list that the subsbriber will be added to.)
"""
def set_ListId(self, value):
InputSet._set_input(self, 'ListId', value)
"""
Set the value of the MergeVars input for this choreography. ((conditional, json) A JSON object of the merge fields for this subscriber. If the subscriber email address is not provided for the EmailAddress input, it must be specified here.)
"""
def set_MergeVars(self, value):
InputSet._set_input(self, 'MergeVars', value)
"""
Set the value of the ReplaceInterests input for this choreography. ((optional, boolean) A flag to determine whether to replace the interest groups with the groups provided or add the provided groups to the member's interest groups. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_ReplaceInterests(self, value):
InputSet._set_input(self, 'ReplaceInterests', value)
"""
Set the value of the SendWelcome input for this choreography. ((optional, boolean) If double_optin is false and this flag is true, a welcome email will be sent. Note that this does not apply when updating records. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_SendWelcome(self, value):
InputSet._set_input(self, 'SendWelcome', value)
"""
Set the value of the UpdateExisting input for this choreography. ((optional, boolean) Indicates that if the email already exists, this request will perform an update instead of an insert. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_UpdateExisting(self, value):
InputSet._set_input(self, 'UpdateExisting', value)
"""
A ResultSet with methods tailored to the values returned by the ListSubscribe choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListSubscribeResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Mailchimp. Returns the string "true" for success and an error description for failures.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListSubscribeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListSubscribeResultSet(response, path)
|
[
"miriam@famulus"
] |
miriam@famulus
|
2627c321f4755387617ebb32925ede73ea39af0d
|
5fafdbc2818450646d83550a0be2fe080a6f6b4d
|
/src/wagtail_email_subscription/config.py
|
95a8c23beeebc142315f14ade1e222ff2b7668e9
|
[
"MIT"
] |
permissive
|
robmoorman/wagtail-email-subscription
|
527bbbc9e298a7e712319892040152dc05495d91
|
59b6874ecdf562201e4867a2465ac24a4f4c157d
|
refs/heads/main
| 2023-04-20T19:39:23.611378
| 2021-05-04T13:50:22
| 2021-05-04T13:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from django.apps import AppConfig
class WagtailEmailSubscription(AppConfig):
name = "wagtail_email_subscription"
verbose_name = "Wagtail Email Subscription"
|
[
"maerteijn@gmail.com"
] |
maerteijn@gmail.com
|
a6bec2e6e27892e9e71ddf65399e880ac78e4293
|
f00ad57c98e554470a72511dda7a7bfd160aca19
|
/linear_structure/stack/infix_expression_ui.py
|
5a43f72bf018cb2935ef1500165bb647398f3437
|
[] |
no_license
|
fanzhangg/algorithm-problems
|
d60115210aaaffcd094b34b9db5b46dadf93fe9e
|
43b111ad625f197ba0905abceab9ee4484284e08
|
refs/heads/master
| 2021-07-12T20:24:46.265700
| 2020-07-06T17:58:31
| 2020-07-06T17:58:31
| 171,220,135
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from infix_expression import *
def eval_infix_input():
while True:
infix = input(">>>")
if not infix:
print("Session ended. Thanks for using!")
break
try:
postfix = infix_to_postfix(infix)
answer = eval_postfix(postfix)
if int(answer) == answer:
answer = int(answer)
print(answer)
except SyntaxError:
print("Invalid syntax!")
if __name__ == "__main__":
eval_infix_input()
|
[
"vanadiumzhang@gmail.com"
] |
vanadiumzhang@gmail.com
|
1fb40fbffed2a13ac1dd57353e0da19e31b8beb0
|
ea1e7cb5499dd9273352f55788251341a1e20dac
|
/finchcollector_project/settings.py
|
c6e846e1b23931ea29ef615906be73a27c95022f
|
[] |
no_license
|
jpardike/Finch-collector-lab-views
|
68a06d024529fa2ac40fe25f642432469db9b648
|
05a1ab69126dee7ba2b6f9fe7923b2b7d5bf3341
|
refs/heads/master
| 2023-01-30T23:56:47.929203
| 2020-11-03T08:04:08
| 2020-11-03T08:04:08
| 320,217,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
"""
Django settings for finchcollector_project project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import django_on_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#846welivxg$0w9r-%ey+o1v$u-x7ey1y(ry2$l(0$!vhx^$3('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'finchcollector_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finchcollector_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'finchcollector',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/finches/'
LOGOUT_REDIRECT_URL = '/'
django_on_heroku.settings(locals())
|
[
"jpardike@gmail.com"
] |
jpardike@gmail.com
|
869bef9db0f826ce4bfa32b021923192fba92284
|
95ddd1469d43d3dc1a61e3195b6ece492eb96296
|
/lesson 3/promblem1/problem1.py
|
b8f393530c32dfac73968f7df30f46e4f6e6e1ff
|
[] |
no_license
|
agp123/U3_lesson3
|
1f82f70e4c3c1b51c16309146582e1d7d299760e
|
3ea55353c97f2dc9d21ff2d69559255dc7385eec
|
refs/heads/master
| 2020-04-09T09:14:35.118198
| 2018-12-14T14:41:19
| 2018-12-14T14:41:19
| 160,226,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
x1 = -12
y1 = 6
x2 = -4
y2 =4
slope = (y2 - y1) / (x2 - x1)
print(slope)
|
[
"noreply@github.com"
] |
agp123.noreply@github.com
|
05dfdbb7f1fe32835a1c9b65abe2e372a8d9fad3
|
3eff0ac549dd24fbade02d63c3a541ab88db1e5b
|
/ultimate_python/pythonrefresh.py
|
36dd68b468c98ac1e89ac271b58821745d51e6d4
|
[] |
no_license
|
lisaolson/udemy
|
618410fb548db864b7878de5a2231e8293daa2ad
|
f40f947f6f79d692748f3efba02176fb360f0c4e
|
refs/heads/master
| 2020-03-28T20:14:23.028759
| 2018-09-18T19:45:32
| 2018-09-18T19:45:32
| 149,051,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
# VARIABLE
age = 21
name="Lisa"
# print("Hello my name is {} and I am {} years old".format(name, age))
if age < 25:
print('You are young')
def hello(name, age):
return 'Hello {} you are {} years old'.format(name, age)
sentence = hello('Lisa', 21)
print(sentence)
# LIST
dognames = ["Fido", "Sean", "Sally", "Mark"]
dognames.insert(0, "Jane")
print(dognames)
print(dognames[2])
print(len(dognames))
dognames[1] = "Lilo"
print(dognames)
# FOR LOOP
for dog in dognames:
print(dog)
for x in range(1,10):
print(x)
age = 0
# WHILE LOOP
while age < 18:
print(age)
age += 1
numbers = [76, 83, 16, 69, 52, 78, 10, 77, 45, 52, 32, 17, 58, 54, 79, 72, 55, 50, 81, 74, 45, 33, 38, 10, 40, 44, 70, 81, 79, 28, 83, 41, 14, 16, 27, 38, 20, 84, 24, 50, 59, 71, 1, 13, 56, 91, 29, 54, 65, 23, 60, 57, 13, 39, 58, 94, 94, 42, 46, 58, 59, 29, 69, 60, 83, 9, 83, 5, 64, 70, 55, 89, 67, 89, 70, 8, 90, 17, 48, 17, 94, 18, 98, 72, 96, 26, 13, 7, 58, 67, 38, 48, 43, 98, 65, 8, 74, 44, 92]
for number in numbers:
if number > 90:
print(number)
# LIBRARY
dogs = {"Fido":8, "Sally":17, "Sean":2}
print(dogs["Sally"])
dogs["Sarah"] = 6
print(dogs)
# CLASS
class Dog:
dogInfo = "Hey dogs are cool!"
def bark(self, str): # self as the first parameter then you can add something for the second parameter
print('BARK!' + str)
mydog = Dog()
mydog.bark("bark bark bark bark")
mydog.name = "Fido"
mydog.age = 16
print(mydog.name)
print(mydog.age)
Dog.dogInfo = "Hey there"
print(Dog.dogInfo)
class Horse:
def __init__(self, name, age, furcolor): # you can call 'self' anything as long as it's the same everywhere
self.name = name
self.age = age
self.furcolor = furcolor
def neigh(self):
print("NEIGH!")
myhorse = Horse("Joker", 7, "Brown")
print(myhorse.age)
|
[
"olson.lisa94@gmail.com"
] |
olson.lisa94@gmail.com
|
a83a2545f465ee61b698cfbea8d5b6debcef42ec
|
c05e1c1ee73951856a49ec53612f91f8a656931a
|
/conf.py
|
1fc76619acfd81514b95d7d25835aeca29c9621a
|
[] |
no_license
|
pylgrym/pyscify
|
b3bb33009994236578271c71c21fc734d0f6df9d
|
5e7f722a1a4b40f01546b160083fa6269bff9fe9
|
refs/heads/master
| 2021-01-10T04:47:19.918697
| 2016-02-20T20:50:00
| 2016-02-20T20:50:00
| 52,174,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# -*- coding: utf-8 -*-
from pysite.conf import PySiteConfiguration
from pysite.compat import PORTABLE_STRING
class HelloworldConf(PySiteConfiguration):
sitename = 'helloworld'
translations = ['en']
sitetitle = PORTABLE_STRING('yes')
def __init__(self,basedir):
super(HelloworldConf, self).__init__(basedir)
siteconf = HelloworldConf
|
[
"jgaardsted@gmail.com"
] |
jgaardsted@gmail.com
|
6171becb207e276db36bc6653ed1900ae7e3400d
|
a5424e248051f1cf2542aa82f040f7173aa8f8d8
|
/ExamenUII.2/ExamenU22/Examen/admin.py
|
6f540ae7584dda48774c6e956324debaa0392758
|
[] |
no_license
|
luisfbarajas/PWeb
|
dd23a744357e5b961b3a1c23756bf60cc6741b08
|
e514d0f5710e33c718de8e27a06321643a271e17
|
refs/heads/master
| 2020-03-28T07:11:33.107934
| 2018-12-06T07:56:17
| 2018-12-06T07:56:17
| 147,886,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.contrib import admin
from .models import Personal, Activities
# Register your models here.
admin.site.register(Personal)
admin.site.register(Activities)
|
[
"luisf.barajas.btz@gamil.com"
] |
luisf.barajas.btz@gamil.com
|
09d99350cbbabc222a77496c6c03229d663ceb9a
|
f3438f7b9925a16efa3240ae35b08efb4f567fec
|
/python_tutorial/Session 1/shout_it_out.py
|
34253082f4c81336fcb18f5dc81bc5075b5c4df2
|
[] |
no_license
|
Bing-Violet/Code-Like-a-Girl
|
cf1a0c8bff2f1c9f9092e89a6d50c1bf7ec7db8d
|
c72aa2aed9ca8ed2a9d20172a2a2c681f544c8ab
|
refs/heads/master
| 2021-03-15T10:41:53.185751
| 2020-03-12T13:51:46
| 2020-03-12T13:51:46
| 246,844,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
sentence = "I'm hungry!"
sentence_shout = sentence.upper()
print(sentence_shout)
|
[
"Bing_Violet@hotmail.com"
] |
Bing_Violet@hotmail.com
|
f16f18958e39635b2ec69e9ef800bff4f89db0b2
|
b05761d771bb5a85d39d370c649567c1ff3eb089
|
/venv/lib/python3.10/site-packages/poetry/core/_vendor/tomlkit/api.py
|
23658477769d0ea33af12aa07d3fc063cfaeaef7
|
[] |
no_license
|
JawshyJ/Coding_Practice
|
88c49cab955eab04609ec1003b6b8c20f103fc06
|
eb6b229d41aa49b1545af2120e6bee8e982adb41
|
refs/heads/master
| 2023-02-19T10:18:04.818542
| 2023-02-06T21:22:58
| 2023-02-06T21:22:58
| 247,788,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
/home/runner/.cache/pip/pool/3c/67/92/c18b0ffb5593ff972c09bb41120b8c23e3d192f2ff675748efe3028432
|
[
"37465112+JawshyJ@users.noreply.github.com"
] |
37465112+JawshyJ@users.noreply.github.com
|
cf3c79f12e13974f1a2fc914f299f7f6b79b0301
|
49692b22e38afe5fc6cf7144cd830e11a9a43d47
|
/cse101/asgn4/random_generator.py
|
7d6633063b576641eb97f343be08a0df60dec5bf
|
[] |
no_license
|
juliofuentesUI/cse
|
1b0080cabe8301c3149d9b0ab498f17a7ca7a7e8
|
4e86df10ee9c23bdfeeafb61106d664aa0d136ff
|
refs/heads/main
| 2023-08-21T19:27:03.697081
| 2021-10-04T07:09:44
| 2021-10-04T07:09:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import random
actors = []
with open("cleaned_movielist.txt", 'r') as movie_list:
for line, cast in enumerate(movie_list.readlines()):
actor_list=cast.split()
for i in range(1,len(actor_list)):
actors.append(actor_list[i])
with open("random.txt",'w') as output:
for i in range(0,10):
rand1=random.randint(0,len(actors)-1)
rand2=random.randint(0,len(actors)-1)
output.write(actors[rand1]+" "+ actors[rand2]+"\n")
|
[
"zliu259@ussc.edu"
] |
zliu259@ussc.edu
|
aedb32df31a1c5530d48f12e8a44775169b392d2
|
9d9e6e909e3f94ca62eef648c9e0b43473fa132e
|
/anagrams.py
|
495d404764901a8b900808bf62c07565468dba8e
|
[] |
no_license
|
Adi1729/Coding_100
|
a73fe21344c5da42cd72a829018fdf593299e4b9
|
61bd8838b1c989b0199d486f0a7269f8386ae924
|
refs/heads/master
| 2021-06-29T04:28:05.570383
| 2020-08-28T07:30:48
| 2020-08-28T07:30:48
| 142,202,161
| 0
| 0
| null | 2018-08-01T08:40:46
| 2018-07-24T19:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
s= 'cbaebabacd'
a = 'abc'
s="ababababab"
a="aab"
a ='acb'
q= [i for i in a]
q.sort()
a = ''.join(q)
a_orig =a
b= 'b'
l=[]
i=0
i=1
pcounter = collections.Counter(a)
scounter = collections.Counter(s[:len(a)])
for i in range(0,len(s)-len(a)+1):
scounter = collections.Counter(s[i:len(a)+i])
if pcounter==scounter:
l.append(i)
c='a'
a_orig ='abc'
a=a_orig
p=a
#Method 1 : Using Counters
result = []
pcounter = collections.Counter(p)
scounter = collections.Counter(s[:len(p) - 1])
begin = 0
for i in range(len(p) - 1, len(s)) :
scounter[s[i]] += 1
if scounter == pcounter :
print(scounter)
result.append(begin)
scounter[s[begin]] -= 1
if scounter[s[begin]] == 0 :
del(scounter[s[begin]])
begin += 1
return result
Input: ["eat", "tea", "tan", "ate", "nat", "bat"],
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
a='ate'
l = [i for i in a]
l.sort()
q = ''.join(l)
|
[
"adiyadav.1729@gmail.com"
] |
adiyadav.1729@gmail.com
|
eb9232dcd2be9ba8cbd5bd42e4ba39ef045ea84e
|
babaa02f2c866c20d9753193a7c3193c149fd09f
|
/12month_report/report/__init__.py
|
1982030fa17f97418fa32dfb22dc26a17de0fb2f
|
[] |
no_license
|
solbutec/kmart
|
5a03a4743bc8e935669e4aadea732d402dd59867
|
b73316e6c0ce79bbc463d7198f5c9fe181f70531
|
refs/heads/master
| 2022-12-05T19:19:29.393040
| 2020-08-24T13:50:14
| 2020-08-24T13:50:14
| 290,629,983
| 0
| 1
| null | 2020-08-26T23:55:19
| 2020-08-26T23:55:18
| null |
UTF-8
|
Python
| false
| false
| 50
|
py
|
# -*- coding: utf-8 -*-
from . import twelvemonth
|
[
"kyawzinoo@asiamatrixsoftware.com"
] |
kyawzinoo@asiamatrixsoftware.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.