blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbb437e84e29a7a57b6e783426de789e1c3f6ad7 | 4cb288c8b3274b9dc7959ca3bc2d5e4b3bf04618 | /python/ccxt/async_support/bitopro.py | 611f663bd07e3270ce070643f4ab02e0aff6649b | [
"MIT"
] | permissive | yijixiuxin/ccxt | 7537f73148472efc912f3997040e373cabf2ae0c | d71cd424b9d19b82f2234d8be55dacf311e01a31 | refs/heads/master | 2022-10-01T18:39:29.356725 | 2022-09-20T21:28:02 | 2022-09-20T21:28:02 | 168,174,277 | 0 | 0 | MIT | 2019-01-29T15:05:10 | 2019-01-29T15:05:10 | null | UTF-8 | Python | false | false | 62,980 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitopro(Exchange):
def describe(self):
return self.deep_extend(super(bitopro, self).describe(), {
'id': 'bitopro',
'name': 'BitoPro',
'countries': ['TW'], # Taiwan
'version': 'v3',
'rateLimit': 100,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'editOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': False,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactionFees': False,
'fetchTransactions': False,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': True,
'fetchWithdrawals': True,
'setLeverage': False,
'setMarginMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/158227251-3a92a220-9222-453c-9277-977c6677fe71.jpg',
'api': {
'rest': 'https://api.bitopro.com/v3',
},
'www': 'https://www.bitopro.com',
'doc': [
'https://github.com/bitoex/bitopro-offical-api-docs/blob/master/v3-1/rest-1/rest.md',
],
'fees': 'https://www.bitopro.com/fees',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'order-book/{pair}',
'tickers',
'tickers/{pair}',
'trades/{pair}',
'provisioning/currencies',
'provisioning/trading-pairs',
'provisioning/limitations-and-fees',
'trading-history/{pair}',
],
},
'private': {
'get': [
'accounts/balance',
'orders/history',
'orders/all/{pair}',
'orders/trades/{pair}',
'orders/{pair}/{orderId}',
'wallet/withdraw/{currency}/{serial}',
'wallet/withdraw/{currency}/id/{id}',
'wallet/depositHistory/{currency}',
'wallet/withdrawHistory/{currency}',
],
'post': [
'orders/{pair}',
'orders/batch',
'wallet/withdraw/{currency}',
],
'put': [
'orders',
],
'delete': [
'orders/{pair}/{id}',
'orders/all',
'orders/{pair}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('3000000'), self.parse_number('0.00194')],
[self.parse_number('5000000'), self.parse_number('0.0015')],
[self.parse_number('30000000'), self.parse_number('0.0014')],
[self.parse_number('300000000'), self.parse_number('0.0013')],
[self.parse_number('550000000'), self.parse_number('0.0012')],
[self.parse_number('1300000000'), self.parse_number('0.0011')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.001')],
[self.parse_number('3000000'), self.parse_number('0.00097')],
[self.parse_number('5000000'), self.parse_number('0.0007')],
[self.parse_number('30000000'), self.parse_number('0.0006')],
[self.parse_number('300000000'), self.parse_number('0.0005')],
[self.parse_number('550000000'), self.parse_number('0.0004')],
[self.parse_number('1300000000'), self.parse_number('0.0003')],
],
},
},
},
'options': {
'networks': {
'ERC20': 'ERC20',
'ETH': 'ERC20',
'TRX': 'TRX',
'TRC20': 'TRX',
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'Unsupported currency.': BadRequest, # {"error":"Unsupported currency."}
'Unsupported order type': BadRequest, # {"error":"Unsupported order type"}
'Invalid body': BadRequest, # {"error":"Invalid body"}
'Invalid Signature': AuthenticationError, # {"error":"Invalid Signature"}
'Address not in whitelist.': BadRequest,
},
'broad': {
'Invalid amount': InvalidOrder, # {"error":"Invalid amount 0.0000000001, decimal limit is 8."}
'Balance for ': InsufficientFunds, # {"error":"Balance for eth not enough, only has 0, but ordered 0.01."}
'Invalid ': BadRequest, # {"error":"Invalid price -1."}
'Wrong parameter': BadRequest, # {"error":"Wrong parameter: from"}
},
},
'commonCurrencies': {
},
})
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.publicGetProvisioningCurrencies(params)
currencies = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "currency":"eth",
# "withdrawFee":"0.007",
# "minWithdraw":"0.001",
# "maxWithdraw":"1000",
# "maxDailyWithdraw":"2000",
# "withdraw":true,
# "deposit":true,
# "depositConfirmation":"12"
# }
# ]
# }
#
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId)
deposit = self.safe_value(currency, 'deposit')
withdraw = self.safe_value(currency, 'withdraw')
fee = self.safe_number(currency, 'withdrawFee')
withdrawMin = self.safe_number(currency, 'minWithdraw')
withdrawMax = self.safe_number(currency, 'maxWithdraw')
limits = {
'withdraw': {
'min': withdrawMin,
'max': withdrawMax,
},
'amount': {
'min': None,
'max': None,
},
}
result[code] = {
'id': currencyId,
'code': code,
'info': currency,
'type': None,
'name': None,
'active': deposit and withdraw,
'deposit': deposit,
'withdraw': withdraw,
'fee': fee,
'precision': None,
'limits': limits,
}
return result
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitopro
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetProvisioningTradingPairs()
markets = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "pair":"shib_twd",
# "base":"shib",
# "quote":"twd",
# "basePrecision":"8",
# "quotePrecision":"6",
# "minLimitBaseAmount":"100000",
# "maxLimitBaseAmount":"5500000000",
# "minMarketBuyQuoteAmount":"1000",
# "orderOpenLimit":"200",
# "maintain":false,
# "orderBookQuotePrecision":"6",
# "orderBookQuoteScaleLevel":"5"
# }
# ]
# }
#
result = []
for i in range(0, len(markets)):
market = markets[i]
active = not self.safe_value(market, 'maintain')
id = self.safe_string(market, 'pair')
uppercaseId = id.upper()
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
limits = {
'amount': {
'min': self.safe_number(market, 'minLimitBaseAmount'),
'max': self.safe_number(market, 'maxLimitBaseAmount'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'uppercaseId': uppercaseId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': base,
'quoteId': quote,
'settle': None,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'derivative': False,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'limits': limits,
'precision': {
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'quotePrecision'))),
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'basePrecision'))),
},
'active': active,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "pair":"btc_twd",
# "lastPrice":"1182449.00000000",
# "isBuyer":false,
# "priceChange24hr":"-1.99",
# "volume24hr":"9.13089740",
# "high24hr":"1226097.00000000",
# "low24hr":"1181000.00000000"
# }
#
marketId = self.safe_string(ticker, 'pair')
market = self.safe_market(marketId, market)
symbol = self.safe_string(market, 'symbol')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': self.safe_string(ticker, 'high24hr'),
'low': self.safe_string(ticker, 'low24hr'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_string(ticker, 'lastPrice'),
'last': self.safe_string(ticker, 'lastPrice'),
'previousClose': None,
'change': None,
'percentage': self.safe_string(ticker, 'priceChange24hr'),
'average': None,
'baseVolume': self.safe_string(ticker, 'volume24hr'),
'quoteVolume': None,
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTickersPair(self.extend(request, params))
ticker = self.safe_value(response, 'data', {})
#
# {
# "data":{
# "pair":"btc_twd",
# "lastPrice":"1182449.00000000",
# "isBuyer":false,
# "priceChange24hr":"-1.99",
# "volume24hr":"9.13089740",
# "high24hr":"1226097.00000000",
# "low24hr":"1181000.00000000"
# }
# }
#
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTickers()
tickers = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "pair":"xrp_twd",
# "lastPrice":"21.26110000",
# "isBuyer":false,
# "priceChange24hr":"-6.53",
# "volume24hr":"102846.47084802",
# "high24hr":"23.24460000",
# "low24hr":"21.13730000"
# }
# ]
# }
#
return self.parse_tickers(tickers, symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.publicGetOrderBookPair(self.extend(request, params))
#
# {
# "bids":[
# {
# "price":"1175271",
# "amount":"0.00022804",
# "count":1,
# "total":"0.00022804"
# }
# ],
# "asks":[
# {
# "price":"1176906",
# "amount":"0.0496",
# "count":1,
# "total":"0.0496"
# }
# ]
# }
#
return self.parse_order_book(response, market['symbol'], None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market):
#
# fetchTrades
# {
# "timestamp":1644651458,
# "price":"1180785.00000000",
# "amount":"0.00020000",
# "isBuyer":false
# }
#
# fetchMyTrades
# {
# "tradeId":"5685030251",
# "orderId":"9669168142",
# "price":"11821.8",
# "action":"SELL",
# "baseAmount":"0.01",
# "quoteAmount":"118.218",
# "fee":"0.236436",
# "feeSymbol":"BNB",
# "isTaker":true,
# "timestamp":1644905714862,
# "createdTimestamp":1644905714862
# }
#
id = self.safe_string(trade, 'tradeId')
orderId = self.safe_string(trade, 'orderId')
timestamp = None
if id is None:
timestamp = self.safe_timestamp(trade, 'timestamp')
else:
timestamp = self.safe_integer(trade, 'timestamp')
marketId = self.safe_string(trade, 'pair')
market = self.safe_market(marketId, market)
symbol = self.safe_string(market, 'symbol')
price = self.safe_string(trade, 'price')
type = self.safe_string_lower(trade, 'type')
side = self.safe_string_lower(trade, 'action')
if side is None:
isBuyer = self.safe_value(trade, 'isBuyer')
if isBuyer:
side = 'buy'
else:
side = 'sell'
amount = self.safe_string(trade, 'amount')
if amount is None:
amount = self.safe_string(trade, 'baseAmount')
fee = None
feeAmount = self.safe_string(trade, 'fee')
feeSymbol = self.safe_currency_code(self.safe_string(trade, 'feeSymbol'))
if feeAmount is not None:
fee = {
'cost': feeAmount,
'currency': feeSymbol,
'rate': None,
}
isTaker = self.safe_value(trade, 'isTaker')
takerOrMaker = None
if isTaker is not None:
if isTaker:
takerOrMaker = 'taker'
else:
takerOrMaker = 'maker'
return self.safe_trade({
'id': id,
'info': trade,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'takerOrMaker': takerOrMaker,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTradesPair(self.extend(request, params))
trades = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "timestamp":1644651458,
# "price":"1180785.00000000",
# "amount":"0.00020000",
# "isBuyer":false
# }
# ]
# }
#
return self.parse_trades(trades, market, since, limit)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.publicGetProvisioningLimitationsAndFees(params)
tradingFeeRate = self.safe_value(response, 'tradingFeeRate', {})
first = self.safe_value(tradingFeeRate, 0)
#
# {
# "tradingFeeRate":[
# {
# "rank":0,
# "twdVolumeSymbol":"\u003c",
# "twdVolume":"3000000",
# "bitoAmountSymbol":"\u003c",
# "bitoAmount":"7500",
# "makerFee":"0.001",
# "takerFee":"0.002",
# "makerBitoFee":"0.0008",
# "takerBitoFee":"0.0016"
# }
# ],
# "orderFeesAndLimitations":[
# {
# "pair":"BTC/TWD",
# "minimumOrderAmount":"0.0001",
# "minimumOrderAmountBase":"BTC",
# "minimumOrderNumberOfDigits":"0"
# }
# ],
# "restrictionsOfWithdrawalFees":[
# {
# "currency":"TWD",
# "fee":"15",
# "minimumTradingAmount":"100",
# "maximumTradingAmount":"1000000",
# "dailyCumulativeMaximumAmount":"2000000",
# "remarks":"",
# "protocol":""
# }
# ],
# "cryptocurrencyDepositFeeAndConfirmation":[
# {
# "currency":"TWD",
# "generalDepositFees":"0",
# "blockchainConfirmationRequired":""
# }
# ],
# "ttCheckFeesAndLimitationsLevel1":[
# {
# "currency":"TWD",
# "redeemDailyCumulativeMaximumAmount":"",
# "generateMinimumTradingAmount":"",
# "generateMaximumTradingAmount":"",
# "generateDailyCumulativeMaximumAmount":""
# }
# ],
# "ttCheckFeesAndLimitationsLevel2":[
# {
# "currency":"TWD",
# "redeemDailyCumulativeMaximumAmount":"20000000",
# "generateMinimumTradingAmount":"30",
# "generateMaximumTradingAmount":"10000000",
# "generateDailyCumulativeMaximumAmount":"10000000"
# }
# ]
# }
#
result = {}
maker = self.safe_number(first, 'makerFee')
taker = self.safe_number(first, 'takerFee')
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': first,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
self.safe_integer(ohlcv, 'timestamp'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
resolution = self.timeframes[timeframe]
request = {
'pair': market['id'],
'resolution': resolution,
}
# we need to have a limit argument because "to" and "from" are required
if limit is None:
limit = 500
timeframeInSeconds = self.parse_timeframe(timeframe)
alignedSince = None
if since is None:
request['to'] = self.seconds()
request['from'] = request['to'] - (limit * timeframeInSeconds)
else:
timeframeInMilliseconds = timeframeInSeconds * 1000
alignedSince = int(math.floor(since / timeframeInMilliseconds)) * timeframeInMilliseconds
request['from'] = int(math.floor(since / 1000))
request['to'] = self.sum(request['from'], limit * timeframeInSeconds)
response = await self.publicGetTradingHistoryPair(self.extend(request, params))
data = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "timestamp":1644581100000,
# "open":"1214737",
# "high":"1215110",
# "low":"1214737",
# "close":"1215110",
# "volume":"0.08423959"
# }
# ]
# }
#
sparse = self.parse_ohlcvs(data, market, timeframe, since, limit)
return self.insert_missing_candles(sparse, timeframeInSeconds, alignedSince, limit)
def insert_missing_candles(self, candles, distance, since, limit):
# the exchange doesn't send zero volume candles so we emulate them instead
# otherwise sending a limit arg leads to unexpected results
length = len(candles)
if length == 0:
return candles
result = []
copyFrom = candles[0]
timestamp = None
if since is None:
timestamp = copyFrom[0]
else:
timestamp = since
i = 0
candleLength = len(candles)
resultLength = 0
while((resultLength < limit) and (i < candleLength)):
candle = candles[i]
if candle[0] == timestamp:
result.append(candle)
i = self.sum(i, 1)
else:
copy = self.array_concat([], copyFrom)
copy[0] = timestamp
# set open, high, low to close
copy[1] = copy[4]
copy[2] = copy[4]
copy[3] = copy[4]
copy[5] = self.parse_number('0')
result.append(copy)
timestamp = self.sum(timestamp, distance * 1000)
resultLength = len(result)
copyFrom = result[resultLength - 1]
return result
def parse_balance(self, response):
#
# [{
# "currency":"twd",
# "amount":"0",
# "available":"0",
# "stake":"0",
# "tradable":true
# }]
#
result = {
'info': response,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_string(balance, 'amount')
available = self.safe_string(balance, 'available')
account = {
'free': available,
'total': amount,
}
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetAccountsBalance(params)
balances = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "currency":"twd",
# "amount":"0",
# "available":"0",
# "stake":"0",
# "tradable":true
# }
# ]
# }
#
return self.parse_balance(balances)
def parse_order_status(self, status):
statuses = {
'-1': 'open',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'closed',
'4': 'canceled',
}
return self.safe_string(statuses, status, None)
def parse_order(self, order, market=None):
#
# createOrder
# {
# orderId: '2220595581',
# timestamp: '1644896744886',
# action: 'SELL',
# amount: '0.01',
# price: '15000',
# timeInForce: 'GTC'
# }
#
# fetchOrder
# {
# "id":"8777138788",
# "pair":"bnb_twd",
# "price":"16000",
# "avgExecutionPrice":"0",
# "action":"SELL",
# "type":"LIMIT",
# "timestamp":1644899002598,
# "status":4,
# "originalAmount":"0.01",
# "remainingAmount":"0.01",
# "executedAmount":"0",
# "fee":"0",
# "feeSymbol":"twd",
# "bitoFee":"0",
# "total":"0",
# "seq":"BNBTWD548774666",
# "timeInForce":"GTC",
# "createdTimestamp":1644898944074,
# "updatedTimestamp":1644899002598
# }
#
id = self.safe_string_2(order, 'id', 'orderId')
timestamp = self.safe_integer_2(order, 'timestamp', 'createdTimestamp')
side = self.safe_string(order, 'action')
side = side.lower()
amount = self.safe_string_2(order, 'amount', 'originalAmount')
price = self.safe_string(order, 'price')
marketId = self.safe_string(order, 'pair')
market = self.safe_market(marketId, market, '_')
symbol = self.safe_string(market, 'symbol')
orderStatus = self.safe_string(order, 'status')
status = self.parse_order_status(orderStatus)
type = self.safe_string_lower(order, 'type')
average = self.safe_string(order, 'avgExecutionPrice')
filled = self.safe_string(order, 'executedAmount')
remaining = self.safe_string(order, 'remainingAmount')
timeInForce = self.safe_string(order, 'timeInForce')
fee = None
feeAmount = self.safe_string(order, 'fee')
feeSymbol = self.safe_currency_code(self.safe_string(order, 'feeSymbol'))
if Precise.string_gt(feeAmount, '0'):
fee = {
'currency': feeSymbol,
'cost': feeAmount,
}
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': self.safe_integer(order, 'updatedTimestamp'),
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
'info': order,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'type': type,
'pair': market['id'],
'action': side,
'amount': self.amount_to_precision(symbol, amount),
'timestamp': self.milliseconds(),
}
orderType = type.upper()
if orderType == 'LIMIT':
request['price'] = self.price_to_precision(symbol, price)
if orderType == 'STOP_LIMIT':
request['price'] = self.price_to_precision(symbol, price)
stopPrice = self.safe_value_2(params, 'triggerPrice', 'stopPrice')
params = self.omit(params, ['triggerPrice', 'stopPrice'])
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder() requires a stopPrice parameter for ' + orderType + ' orders')
else:
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
condition = self.safe_string(params, 'condition')
if condition is None:
raise InvalidOrder(self.id + ' createOrder() requires a condition parameter for ' + orderType + ' orders')
else:
request['condition'] = condition
response = await self.privatePostOrdersPair(self.extend(request, params), params)
#
# {
# orderId: '2220595581',
# timestamp: '1644896744886',
# action: 'SELL',
# amount: '0.01',
# price: '15000',
# timeInForce: 'GTC'
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires the symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'id': id,
'pair': market['id'],
}
response = await self.privateDeleteOrdersPairId(self.extend(request, params))
#
# {
# "orderId":"8777138788",
# "action":"SELL",
# "timestamp":1644899002465,
# "price":"16000",
# "amount":"0.01"
# }
#
return self.parse_order(response, market)
async def cancel_orders(self, ids, symbol=None, params={}):
"""
cancel multiple orders
:param [str] ids: order ids
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: an list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
id = market['uppercaseId']
request = {}
request[id] = ids
response = await self.privatePutOrders(self.extend(request, params))
#
# {
# "data":{
# "BNB_TWD":[
# "5236347105",
# "359488711"
# ]
# }
# }
#
return response
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'pair': market['id'], # optional
}
# privateDeleteOrdersAll or privateDeleteOrdersPair
method = self.safe_string(self.options, 'privateDeleteOrdersPair', 'privateDeleteOrdersAll')
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
method = 'privateDeleteOrdersPair'
response = await getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'data', {})
#
# {
# "data":{
# "BNB_TWD":[
# "9515988421",
# "4639130027"
# ]
# }
# }
#
return result
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires the symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'pair': market['id'],
}
response = await self.privateGetOrdersPairOrderId(self.extend(request, params))
#
# {
# "id":"8777138788",
# "pair":"bnb_twd",
# "price":"16000",
# "avgExecutionPrice":"0",
# "action":"SELL",
# "type":"LIMIT",
# "timestamp":1644899002598,
# "status":4,
# "originalAmount":"0.01",
# "remainingAmount":"0.01",
# "executedAmount":"0",
# "fee":"0",
# "feeSymbol":"twd",
# "bitoFee":"0",
# "total":"0",
# "seq":"BNBTWD548774666",
# "timeInForce":"GTC",
# "createdTimestamp":1644898944074,
# "updatedTimestamp":1644899002598
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires the symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
# 'startTimestamp': 0,
# 'endTimestamp': 0,
# 'statusKind': '',
# 'orderId': '',
}
if since is not None:
request['startTimestamp'] = since
if limit is not None:
request['limit'] = limit
response = await self.privateGetOrdersAllPair(self.extend(request, params), params)
orders = self.safe_value(response, 'data')
if orders is None:
orders = []
#
# {
# "data":[
# {
# "id":"2220595581",
# "pair":"bnb_twd",
# "price":"15000",
# "avgExecutionPrice":"0",
# "action":"SELL",
# "type":"LIMIT",
# "createdTimestamp":1644896744886,
# "updatedTimestamp":1644898706236,
# "status":4,
# "originalAmount":"0.01",
# "remainingAmount":"0.01",
# "executedAmount":"0",
# "fee":"0",
# "feeSymbol":"twd",
# "bitoFee":"0",
# "total":"0",
# "seq":"BNBTWD8540871774",
# "timeInForce":"GTC"
# }
# ]
# }
#
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'statusKind': 'OPEN',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
request = {
'statusKind': 'DONE',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires the symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.privateGetOrdersTradesPair(self.extend(request, params))
trades = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "tradeId":"5685030251",
# "orderId":"9669168142",
# "price":"11821.8",
# "action":"SELL",
# "baseAmount":"0.01",
# "quoteAmount":"118.218",
# "fee":"0.236436",
# "feeSymbol":"BNB",
# "isTaker":true,
# "timestamp":1644905714862,
# "createdTimestamp":1644905714862
# }
# ]
# }
#
return self.parse_trades(trades, market, since, limit)
def parse_transaction_status(self, status):
states = {
'COMPLETE': 'ok',
'INVALID': 'failed',
'PROCESSING': 'pending',
'WAIT_PROCESS': 'pending',
'FAILED': 'failed',
'EXPIRED': 'failed',
'CANCELLED': 'failed',
'EMAIL_VERIFICATION': 'pending',
'WAIT_CONFIRMATION': 'pending',
}
return self.safe_string(states, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
# {
# "serial":"20220214X766799",
# "timestamp":"1644833015053",
# "address":"bnb1xml62k5a9dcewgc542fha75fyxdcp0zv8eqfsh",
# "amount":"0.20000000",
# "fee":"0.00000000",
# "total":"0.20000000",
# "status":"COMPLETE",
# "txid":"A3CC4F6828CC752B9F3737F48B5826B9EC2857040CB5141D0CC955F7E53DB6D9",
# "message":"778553959",
# "protocol":"MAIN",
# "id":"2905906537"
# }
#
# fetchWithdrawals or fetchWithdraw
# {
# "serial":"20220215BW14069838",
# "timestamp":"1644907716044",
# "address":"TKrwMaZaGiAvtXCFT41xHuusNcs4LPWS7w",
# "amount":"8.00000000",
# "fee":"2.00000000",
# "total":"10.00000000",
# "status":"COMPLETE",
# "txid":"50bf250c71a582f40cf699fb58bab978437ea9bdf7259ff8072e669aab30c32b",
# "protocol":"TRX",
# "id":"9925310345"
# }
#
# withdraw
# {
# "serial":"20220215BW14069838",
# "currency":"USDT",
# "protocol":"TRX",
# "address":"TKrwMaZaGiAvtXCFT41xHuusNcs4LPWS7w",
# "amount":"8",
# "fee":"2",
# "total":"10"
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
id = self.safe_string(transaction, 'serial')
txId = self.safe_string(transaction, 'txid')
timestamp = self.safe_integer(transaction, 'timestamp')
amount = self.safe_number(transaction, 'total')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'message')
status = self.safe_string(transaction, 'status')
fee = self.safe_number(transaction, 'fee')
return {
'info': transaction,
'id': id,
'txid': txId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': None,
'amount': amount,
'currency': code,
'status': self.parse_transaction_status(status),
'updated': None,
'fee': {
'currency': code,
'cost': fee,
'rate': None,
},
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires the code argument')
await self.load_markets()
currency = self.safe_currency(code)
request = {
'currency': currency['id'],
# 'endTimestamp': 0,
# 'id': '',
# 'statuses': '', # 'ROCESSING,COMPLETE,INVALID,WAIT_PROCESS,CANCELLED,FAILED'
}
if since is not None:
request['startTimestamp'] = since
if limit is not None:
request['limit'] = limit
response = await self.privateGetWalletDepositHistoryCurrency(self.extend(request, params))
result = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "serial":"20220214X766799",
# "timestamp":"1644833015053",
# "address":"bnb1xml62k5a9dcewgc542fha75fyxdcp0zv8eqfsh",
# "amount":"0.20000000",
# "fee":"0.00000000",
# "total":"0.20000000",
# "status":"COMPLETE",
# "txid":"A3CC4F6828CC752B9F3737F48B5826B9EC2857040CB5141D0CC955F7E53DB6D9",
# "message":"778553959",
# "protocol":"MAIN",
# "id":"2905906537"
# }
# ]
# }
#
return self.parse_transactions(result, currency, since, limit, {'type': 'deposit'})
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitopro api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires the code argument')
await self.load_markets()
currency = self.safe_currency(code)
request = {
'currency': currency['id'],
# 'endTimestamp': 0,
# 'id': '',
# 'statuses': '', # 'PROCESSING,COMPLETE,EXPIRED,INVALID,WAIT_PROCESS,WAIT_CONFIRMATION,EMAIL_VERIFICATION,CANCELLED'
}
if since is not None:
request['startTimestamp'] = since
if limit is not None:
request['limit'] = limit
response = await self.privateGetWalletWithdrawHistoryCurrency(self.extend(request, params))
result = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "serial":"20220215BW14069838",
# "timestamp":"1644907716044",
# "address":"TKrwMaZaGiAvtXCFT41xHuusNcs4LPWS7w",
# "amount":"8.00000000",
# "fee":"2.00000000",
# "total":"10.00000000",
# "status":"COMPLETE",
# "txid":"50bf250c71a582f40cf699fb58bab978437ea9bdf7259ff8072e669aab30c32b",
# "protocol":"TRX",
# "id":"9925310345"
# }
# ]
# }
#
return self.parse_transactions(result, currency, since, limit, {'type': 'withdrawal'})
async def fetch_withdrawal(self, id, code=None, params={}):
"""
fetch data on a currency withdrawal via the withdrawal id
:param str id: withdrawal id
:param str code: unified currency code of the currency withdrawn, default is None
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawal() requires the code argument')
await self.load_markets()
currency = self.safe_currency(code)
request = {
'serial': id,
'currency': currency['id'],
}
response = await self.privateGetWalletWithdrawCurrencySerial(self.extend(request, params))
result = self.safe_value(response, 'data', {})
#
# {
# "data":{
# "serial":"20220215BW14069838",
# "address":"TKrwMaZaGiAvtXCFT41xHuusNcs4LPWS7w",
# "amount":"8.00000000",
# "fee":"2.00000000",
# "total":"10.00000000",
# "status":"COMPLETE",
# "txid":"50bf250c71a582f40cf699fb58bab978437ea9bdf7259ff8072e669aab30c32b",
# "protocol":"TRX",
# "id":"9925310345",
# "timestamp":"1644907716044"
# }
# }
#
return self.parse_transaction(result, currency)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitopro api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': self.number_to_string(amount),
'address': address,
}
if 'network' in params:
networks = self.safe_value(self.options, 'networks', {})
requestedNetwork = self.safe_string_upper(params, 'network')
params = self.omit(params, ['network'])
networkId = self.safe_string(networks, requestedNetwork)
if networkId is None:
raise ExchangeError(self.id + ' invalid network ' + requestedNetwork)
request['protocol'] = networkId
if tag is not None:
request['message'] = tag
response = await self.privatePostWalletWithdrawCurrency(self.extend(request, params))
result = self.safe_value(response, 'data', {})
#
# {
# "data":{
# "serial":"20220215BW14069838",
# "currency":"USDT",
# "protocol":"TRX",
# "address":"TKrwMaZaGiAvtXCFT41xHuusNcs4LPWS7w",
# "amount":"8",
# "fee":"2",
# "total":"10"
# }
# }
#
return self.parse_transaction(result, currency)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if headers is None:
headers = {}
headers['X-BITOPRO-API'] = 'ccxt'
if api == 'private':
self.check_required_credentials()
if method == 'POST' or method == 'PUT':
body = self.json(params)
payload = self.string_to_base64(body)
signature = self.hmac(payload, self.encode(self.secret), hashlib.sha384)
headers['X-BITOPRO-APIKEY'] = self.apiKey
headers['X-BITOPRO-PAYLOAD'] = payload
headers['X-BITOPRO-SIGNATURE'] = signature
elif method == 'GET' or method == 'DELETE':
if query:
url += '?' + self.urlencode(query)
nonce = self.milliseconds()
rawData = {
'nonce': nonce,
}
rawData = self.json(rawData)
payload = self.string_to_base64(rawData)
signature = self.hmac(payload, self.encode(self.secret), hashlib.sha384)
headers['X-BITOPRO-APIKEY'] = self.apiKey
headers['X-BITOPRO-PAYLOAD'] = payload
headers['X-BITOPRO-SIGNATURE'] = signature
elif api == 'public' and method == 'GET':
if query:
url += '?' + self.urlencode(query)
url = self.urls['api']['rest'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to the default error handler
if code >= 200 and code < 300:
return
feedback = self.id + ' ' + body
error = self.safe_string(response, 'error')
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback) # unknown message
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
af515b669ae560887ad5dc37d62f08810616b261 | ee0cd5c10058a0fc2e29b27139b89706755ca53e | /html/shell/firefox.sh | 18f27d1faa6545cd843bcc49ea8e1b4821958b20 | [] | no_license | meetann/finalcloudproject | 44ac1e36b27cedfc43f6f24035f8477f876709c9 | d8bce6f4fe18d4155900caf0f63eae737ae25309 | refs/heads/master | 2020-06-16T10:07:53.544074 | 2019-07-06T11:57:10 | 2019-07-06T11:57:10 | 195,534,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | sh | #!/usr/bin/python
import os
os.system('sshpass -p t ssh -X -o StrictHostKeyChecking=no root@192.168.1.5 firefox')
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c1ecba608b38e7e151190d9428b136119b3a8902 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/third_party/skia/gyp/icu.gyp | 4a985032c26d61b2145ef092b2b838626d4a11de | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-public-domain"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 3,713 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'common_variables.gypi',
],
'variables': {
'component%': 'static_library',
'icu_directory': '../third_party/externals/icu'
},
'targets': [
{
'target_name': 'icuuc',
'type': '<(component)',
'sources': [
'<!@(python find.py ../third_party/externals/icu/source/common "*.c*")'
],
'defines': [
'U_COMMON_IMPLEMENTATION',
'U_HIDE_DATA_SYMBOL',
'U_USING_ICU_NAMESPACE=0',
'HAVE_DLOPEN=0',
'UCONFIG_NO_NON_HTML5_CONVERSION=1',
],
'include_dirs': [ '<(icu_directory)/source/common', ],
'direct_dependent_settings': {
'defines': [
'U_USING_ICU_NAMESPACE=0',
'U_ENABLE_DYLOAD=0',
],
'include_dirs': [ '<(icu_directory)/source/common', ],
'conditions': [
[
'component=="static_library"', {
'defines': [
'U_STATIC_IMPLEMENTATION',
],
}
],
],
},
'cflags': [ '-w' ],
'cflags_cc': [ '-frtti', ],
'conditions': [
[
'component=="static_library"', {
'defines': [ 'U_STATIC_IMPLEMENTATION', ],
}
],
[
'OS == "win"', {
'sources': [
'<(icu_directory)/source/stubdata/stubdata.c',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [ '<(icu_directory)/windows/icudt.dll', ],
},
],
'msvs_disabled_warnings': [4005, 4068, 4244, 4355, 4996, 4267],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [ '/EHsc', ],
},
},
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true', # /GR
},
},
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true', # /GR
},
},
},
},
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'advapi32.lib',
],
},
},
},
}
],
[
'OS == "win" and skia_clang_build', {
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
# See http://bugs.icu-project.org/trac/ticket/11122
'-Wno-inline-new-delete',
'-Wno-implicit-exception-spec-mismatch',
],
},
},
}
],
[
'skia_os == "android"', {
'sources': [ '<(icu_directory)/android/icudtl_dat.S', ],
}
],
[
'skia_os == "linux"', {
'sources': [ '<(icu_directory)/linux/icudtl_dat.S', ],
}
],
[
'skia_os == "mac"', {
'sources': [ '<(icu_directory)/mac/icudtl_dat.S', ],
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES', # -frtti
'WARNING_CFLAGS': [ '-w' ],
},
}
],
], # conditions
},
], # targets
}
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com |
3835b22d8900c6b757de48417b42a1a6aa1eda45 | ceabe6221dd70ef6b8e25c14dce2943e8732c453 | /keras06_RMSE.py | c2b71a160e31dcdd92def3c84c4dcc7728728c1d | [] | no_license | seheonpark/Keras | 6a68753ba869becb43f3d46146c73701bcccb676 | cbad985b0e80068e0868228998f888c45c84c81a | refs/heads/master | 2020-09-23T01:16:32.819268 | 2019-12-04T11:54:35 | 2019-12-04T11:54:35 | 225,343,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x_test = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
y_test = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
# x_predict = np.array([21, 22, 23, 24, 25])
model = Sequential()
# model.add(Dense(501, input_dim=1, activation='relu'))
model.add(Dense(500, input_shape=(1, ), activation='relu'))
model.add(Dense(497))
model.add(Dense(495))
model.add(Dense(493))
model.add(Dense(491))
model.add(Dense(1))
model.summary()
model.compile(loss='mse', optimizer='adam', metrics=['mse']) # metrics=['accuracy']
# acc : 1.0 loss : 1.6951153725131007e-07
# acc : 1.0916937576155306e-08 loss : 1.0916937576155306e-08
model.fit(x_train, y_train, epochs=700)
loss, mse = model.evaluate(x_test, y_test)
print("mse : ", mse)
print("loss : ", loss)
y_predict = model.predict(x_test)
print(y_predict)
# RMSE 구하기
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict)) | [
"noreply@github.com"
] | seheonpark.noreply@github.com |
fbfb7242f59b6de918bafe71b564c981ed3c02db | decc60052f0d9e8c84bfae731a3a1e4f86da7659 | /classes/Instrument.py | 48c5ea07e8f97388fcef35fbd833b6e95caa0582 | [] | no_license | dxcv/research | 8370c2baac66b098f61424d6e323233f358239e2 | 153d7633f526a5eefd02c650b1c6264e1f93e73b | refs/heads/master | 2020-09-08T16:02:15.130239 | 2019-11-08T15:50:05 | 2019-11-08T15:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import pandas as pd
class Instrument(object):
""" Individual Instrument """
def __init__(self, symbol, df_data, target_freq):
self.symbol = symbol
self.raw_data = df_data
self.date_index = df_data.index
self.frequency = target_freq
for key in df_data:
# todo: only set attribute for certain data (e.g. Close, Open, High,...)
setattr(self, key.replace(' ', ''), self.raw_data[key])
def get_df_view(self, attributes_list):
df = pd.DataFrame()
for attribute in attributes_list:
series = getattr(self, attribute)
series.name = attribute
df = pd.concat([df, series], axis=1)
return df
| [
"28ideas@gmail.com"
] | 28ideas@gmail.com |
ef10e6dd4322d781b7c31c1a350b7397e6a752ac | 4ee1d690aee51b13091cb2397bcad8254da446f1 | /word_select.py | a23ad1608880d4e6f343eecaa86f7e0cae210973 | [] | no_license | xyl576807077/BackgroundRemove | 4a80752e09d6e3791f22e726cd4eef0154ec1216 | c6df04e764b3fd172caf89a90e53e1da62c077a7 | refs/heads/master | 2020-03-09T23:06:35.023744 | 2018-05-16T02:52:52 | 2018-05-16T02:52:52 | 129,051,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,116 | py | import copy
import json
import os
import sys
import numpy as np
from char_process import *
from langconv import Converter
from select_rule import *
from util import *
class WordSelect:
def __init__(self, label_path, frequency_path):
with open(label_path, 'r') as f:
_ = json.load(f)
labels = list(_.keys())
labels.remove('$$')
self.language_ratio = {}
for label in labels:
index = hard_encode(label)
self.language_ratio[index] = self.language_ratio.get(index, 0) + 1
self.freq = [{}, {}, {}, {}]
with open(frequency_path, 'r') as f:
tmp_freq = json.load(f)
tmp = classify(labels)
for i in range(len(tmp)):
words = tmp[i]
for w in words:
self.freq[i][w] = tmp_freq[w]
self.language_ratio = {0:1028916, 1:0, 2:0, 3:0}
self.len_ratio = {1: 22963, 2: 21997, 3: 73336, 4: 25366,
5: 12318, 6: 9778, 7: 7699, 8: 6191,
9: 4090, 10: 3356, 11: 2763, 12: 1841,
13: 1429, 14: 1052, 15: 900, 16: 700,
17: 593, 18: 454, 19: 405, 20: 295,
21: 2492}
def get_language(self):
language = random_interval_select(self.language_ratio)
# self.language_ratio[language] -= 1
return language
def update_freq(self, select_words):
for word in select_words:
code = hard_encode(word)
if self.freq[code][word] == 1:
self.language_ratio[code] -= 1
self.freq[code][word] = max(self.freq[code][word] - 1, 0)
def get_canditate(self):
res = [[], [], [], []]
for i in range(4):
words = self.freq[i]
for key, value in words.items():
if value != 0:
res[i].append(key)
return res
def get_seq_len(self):
length = random_interval_select(self.len_ratio)
return length
def get_word_language(self, flag=None):
# 检查能不能组成混合的
chinese = cal_dict_sum(self.freq[0]) + cal_dict_sum(self.freq[1])
char = cal_dict_sum(self.freq[2])
symbol = cal_dict_sum(self.freq[3])
# if flag == None:
# language = self.get_language()
# else:
# if flag <= 1028916:
# language = 0
# elif flag <= 1028916 + 409527:
# language = 2
# else:
# language = 3
# assert language != 1
language = 0
length = self.get_seq_len()
# print(language, length)
while language == 3 and length == 1:
# print('可能死循环')
length = self.get_seq_len()
res = {}
if language == 0:
simple = cal_dict_sum(self.freq[0])
tradition = cal_dict_sum(self.freq[1])
flag = -1
if simple > length:
flag = 0
elif tradition > length:
flag = 1
else:
if simple > tradition:
length = simple
flag = 0
else:
length = tradition
flag = 1
assert length != 0
for i in range(length):
res[flag] = res.get(flag, 0) + 1
print('***\n')
elif language == 2:
symbol_num = cal_dict_sum(self.freq[3])
length = length if length < symbol_num else symbol_num
for i in range(length):
res[3] = res.get(3, 0) + 1
elif language == 3:
combination = ['01', '02', '12', '012']
if chinese == 0:
combination = ['12']
if char == 0:
combination = ['02']
if symbol == 0:
combination = ['01']
print(chinese, char, symbol,combination)
assert chinese + char + symbol != 0
index = int(np.random.uniform(0, len(combination)))
select = combination[index]
for num in select:
if int(num) != 0:
res[int(num) + 1] = 1
else:
if cal_dict_sum(self.freq[0]) > 0 and cal_dict_sum(self.freq[1]) > 0:
prob = np.random.uniform(0, 1)
if prob > 0.5:
res[0] = 1
else:
res[1] = 1
elif cal_dict_sum(self.freq[0]) > 0:
res[0] = 1
else:
res[1] = 1
if length < len(select):
length = len(select)
else:
difference = length - len(select)
canditate = self.get_canditate()
tmp = []
for l in canditate:
tmp.extend(l)
canditate = tmp
cp = CharProcess()
for i in range(difference):
index = int(np.random.uniform(0, len(canditate)))
code = hard_encode(canditate[index])
res[code] = res.get(code, 0) + 1
canditate.pop(index)
return res
def get_words(self, func=[], flag=None):
lang_word_dic = self.get_word_language(flag)
canditate = self.get_canditate()
for f in func:
canditate = f(lang_word_dic, canditate)
flag = 0
for i in range(4):
flag += len(canditate[i])
if flag == 0:
return None
# if len(canditate[3]) == 0:
# raise NameError("used all symbols")
words = ''
# print(lang_word_dic)
for key, value in lang_word_dic.items():
for j in range(value):
w = random_interval_select(self.freq[key])
self.update_freq([w])
words += w
tmp_dict = {}
tmp_dict.update(self.freq[0])
tmp_dict.update(self.freq[1])
tmp_dict.update(self.freq[2])
tmp_dict.update(self.freq[3])
with open('./tmp-1.json', 'w') as f:
json.dump(tmp_dict, f, ensure_ascii=False)
return words
# wordselect = WordSelect('./data/all_chars_dict.json', './data/5_16_big.json')
# i = 1
# while(1):
# words = wordselect.get_words(func=[only_full, only_half])
# i += 1
# if words == None:
# break
# print(i)
# with open('./5_16_big.txt', 'a') as f:
# f.write(words + '\n')
# with open('./log.txt', 'a') as f:
# tmp = ''
# for key, value in wordselect.language_ratio.items():
# tmp = tmp + str(key) + ':' + str(value) + '\t'
# tmp += '\n'
# f.write(tmp)
| [
"xyl576807077@gmail.com"
] | xyl576807077@gmail.com |
1e1d5ccfdb2caa614c32a09ee07729393624758c | 4c672231bd8b7c23bd5773ef990404cc3146712a | /shipmaster/server/celery.py | 8e24f72855c7e156d14e3e37290140aeabcf16b0 | [
"BSD-3-Clause"
] | permissive | AzureCloudMonk/shipmaster | b0e82f93308ecc829e6f6b3cb3156f11dcfbadd4 | cf596be7ea689c26c4bf47acb67dfd15169d3c46 | refs/heads/master | 2020-11-30T01:51:32.010852 | 2018-03-03T21:47:17 | 2018-03-03T21:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shipmaster.server.settings')
from celery import Celery
from django.conf import settings
app = Celery('shipmaster.server')
app.config_from_object(settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| [
"lex@damoti.com"
] | lex@damoti.com |
60a9319cb5e51a72ea6172acb56753d27d908782 | 9aa52f7e5902ea8f4a2810809218d9631446345d | /backend/course/api/v1/serializers.py | 94b376e43c63bba2216fc46a5939adf50d3f51d9 | [] | no_license | crowdbotics-apps/merchandising-plays-21542 | e662e42b8766a2fc24d6e0ab926580de0b580461 | c0298b28a45a617b88984d074af4a69f4ea00700 | refs/heads/master | 2022-12-29T10:31:41.304017 | 2020-10-15T18:39:00 | 2020-10-15T18:39:00 | 304,412,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
dd4c5f0cf3d049124539bf2e96145945474a60c3 | 389569a591284a2adcdc38046114e7b1038afd94 | /python-script/trax/main.py | 9c8e8741e2a3e128b672f4596ae761a0f61aea50 | [] | no_license | xytysingle/AnnotationTool | b797daf2fd472f602341b16f24fb1ed9b702aef1 | a217d4376ceee739e0d8c43515c403133982e86e | refs/heads/master | 2020-04-11T18:16:10.438919 | 2019-07-31T10:21:18 | 2019-07-31T10:21:18 | 161,992,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,827 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.command import Command
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
from selenium.webdriver.common.action_chains import ActionChains
import time
import requests
import json
import os
import math
from urllib import request
def login():
global lastPageScenceId
getLogin_url = 'https://services.trax-cloud.cn'
username = wait.until(EC.presence_of_element_located((By.NAME, "username")))
# username = browser.find_element_by_name("username")
# submit_next = browser.find_element_by_name("login")
submit_next = wait.until(EC.presence_of_element_located((By.NAME, "login")))
username.clear()
username.send_keys("chenqinghai@swirebev.com")
time.sleep(1)
submit_next.click()
# password_input = browser.find_element_by_name("password")
# submit_login = browser.find_element_by_name("login")
password_input = wait.until(EC.presence_of_element_located((By.NAME, "password")))
submit_login = wait.until(EC.presence_of_element_located((By.NAME, "login")))
password_input.clear()
password_input.send_keys("Trax12345")
time.sleep(1)
submit_login.click()
Explorer = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a")))
Explorer.click()
# Explorer = browser.find_element_by_xpath("/html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a").click()
# /html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a
Scenes = browser.find_element_by_xpath("/html/body/ui-view/div/ui-view/ui-view/div/div[2]/div[2]").click()
DateRange = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div"))).click()
# https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/?limit=200&from=2019-02-01&to=2019-02-02&direction=first
FromDate = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[1]/input[1]")))
ToDate = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[1]/input[2]")))
# '12 Mar, 2019' '14 Mar, 2019' Mar Feb Jan
FromDate.clear()
FromDate.send_keys("13 Mar, 2019")
ToDate.clear()
ToDate.send_keys("13 Mar, 2019")
time.sleep(1)
Apply_btn = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[6]/button[2]")))
Apply_btn.click()
#
# page = browser.page_source
# 进入场景
time.sleep(5)
# getFirstScencesList()
getNextScencesList(lastPageScenceId)
def saveCookies():
cookies = browser.get_cookies()
jsonCookies = json.dumps(cookies)
with open('cookies.json', 'w') as f:
f.write(jsonCookies)
print(cookies)
# 获取cookies
def getCookies():
with open('cookies.json', 'r', encoding='utf-8') as f:
listCookies = json.loads(f.read())
cookie = [item["name"] + "=" + item["value"] for item in listCookies]
cookiestr = '; '.join(item for item in cookie)
return cookiestr
# 获取localStorage
def getLocalStorage(key):
# getItem = 'localStorage.getItem("temp2")'
print(key)
res = browser.execute_script("return localStorage.getItem({})".format(key))
return res
def getLabelResults(index):
print('发起请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/scene/' + str(index)
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
try:
rec_response = requests.get(base_url, headers=headers).text
rec_response = json.loads(rec_response)
scence_path = date_path + "/{}".format(str(index))
mkdir(scence_path)
# saveResults(scence_path + "/{}".format(str(index)), rec_response)
saveResultsByJson(scence_path + "/{}".format(str(index)), rec_response)
imagesList = rec_response["probeImages"]
for img in imagesList:
img_url = 'https://services.traxretail.com/images/traxus' + img["probe_image_path"].partition('http://traxus.s3.amazonaws.com')[2] + '/original'
img_name = img["probe_image_path"].split('/')[-1]
try:
saveimage(img_url, scence_path + "/{}.jpeg".format(img_name))
except Exception as e:
print("图片保存失败:", e)
print('爬取成功...')
except:
print("爬取失败")
time.sleep(2)
# print(rec_response)
def goToNextPage():
# span.xp-navigate-description.trax-tst-pagination-paging-summary
page_location = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span.xp-navigate-description.trax-tst-pagination-paging-summary')))
print('page_location:', page_location.text)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span[title="next"]'))).click()
# 进入场景
time.sleep(5)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a[href^="trax-one/swirecn/explore/scene/"]'))).click()
def getNextSence():
scence_location = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-center > ui-view > div > siblings-navigator > span > span > span.items-list.trax-tst-viewer-serializationText')))
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-center > ui-view > div > siblings-navigator > span > span > span.trax-icons.trax-icons-page-back.rotated-to-down-arrow.trax-tst-viewer-next'))).click()
scence_index = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-left > ui-view > div > span > span:nth-child(4)')))
print('scence_location:', scence_location.text, 'scence_index:', scence_index.text)
def getFirstScencesList():
global pageNumber
global totalPages
print('发起场景列表请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/'
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
request_data = {
"limit": 200,
"from": from_date,
"to": to_date,
"direction": 'first',
# "last_known_primary_key": last_known_primary_key
}
scencesList_res = requests.get(url=base_url, headers=headers, params=request_data).text
scencesList_res = json.loads(scencesList_res)
saveResultsByJson(date_path +'/' + date + '_' + str(pageNumber + 1), scencesList_res)
print(scencesList_res)
totalItemsCount = scencesList_res["totalItems"]["total_items"]
items = scencesList_res["items"]
print("totalItemsCount:",totalItemsCount, "items:", items)
pageNumber += 1
totalPages = math.ceil(int(totalItemsCount) / 200)
for i in range(0, 200):
index = items[i]["scene_id"]
print("正在爬取第{}页的第{}条,共{}页,共{}条".format(pageNumber, i+1, totalPages, totalItemsCount))
try:
getLabelResults(index)
if i == 199 and pageNumber <= totalPages:
getNextScencesList(index)
except Exception as e:
print('获取下个场景失败', e)
def getNextScencesList(last_known_primary_key):
global pageNumber
global totalPages
print('发起场景列表请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/'
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
request_data = {
"limit": 200,
"from": from_date,
"to": to_date,
"direction": 'next',
"last_known_primary_key": last_known_primary_key
}
scencesList_res = requests.get(url=base_url, headers=headers, params=request_data).text
scencesList_res = json.loads(scencesList_res)
# print(scencesList_res)
# saveResultsByJson(str(2019), scencesList_res)
saveResultsByJson(date_path + '/' + date + '_' + str(pageNumber + 1), scencesList_res)
print(scencesList_res)
totalItemsCount = scencesList_res["totalItems"]["total_items"]
items = scencesList_res["items"]
print("totalItemsCount:", totalItemsCount, "items:", items)
pageNumber += 1
totalPages = math.ceil(int(totalItemsCount) / 200)
for i in range(0, 200):
index = items[i]["scene_id"]
print("正在爬取第{}页的第{}条,共{}页,共{}条".format(pageNumber, i + 1, totalPages, totalItemsCount))
try:
getLabelResults(index)
if i == 199 and pageNumber <= totalPages:
getNextScencesList(index)
except Exception as e:
print('获取下个场景失败', e)
def saveimage(imgUrl, imgPath):
request.urlretrieve(imgUrl, imgPath)
def saveResults(filename, data):
with open("{}.json".format(filename), "w", encoding='utf-8') as f:
f.write(data)
def saveResultsByJson(filename, data):
with open("{}.json".format(filename), 'w', encoding='utf-8') as json_file:
json.dump(data, json_file, ensure_ascii=False)
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{}创建成功".format(path))
return True
else:
print("{}已存在".format(path))
return False
if __name__ == "__main__":
from_date = '2019-03-13'
to_date = '2019-03-13'
date = from_date.replace('-', '')
date_path = "./scence/{}".format(date)
lastPageScenceId = 9237427
pageNumber = 5
totalPages = 0
mkdir(date_path)
# chromeOptions = webdriver.ChromeOptions()
# chromeOptions.add_argument('--proxy-server=https://210.16.189.230:16816')
# browser = webdriver.Chrome(chrome_options=chromeOptions)
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)
login()
| [
"2463072824@qq.com"
] | 2463072824@qq.com |
e781d00d0e62eba41dfcc5c1dbf741b1d1c2e5d3 | 3b40c1bc2a0616ae2ad1ab94f4b8c68ac43c5cb3 | /AmazonSpider/AmazonSpider/spiders/Amazon_spider.py | bc88f030efa0c3cacf552051d866105abf926021 | [] | no_license | clamli/Amazon-Dataset-API | f0341b54bb0bcbd1ee4d42b94485ff18e7d13a55 | ae68be8a5f311d0f6a11a3330b68b97875a46df1 | refs/heads/master | 2020-03-17T01:39:22.903599 | 2018-05-16T13:33:58 | 2018-05-16T13:33:58 | 133,161,573 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import scrapy
class AmazonSpider(scrapy.Spider):
name = "Amazon"
def start_requests(self):
urls = [
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/"
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for dataset, url in zip(response.css("table td a::text").extract(), response.css("table td a::attr(href)").extract()):
yield {
'dataset': dataset,
'url': url,
}
| [
"boyizjuer2017@gmail.com"
] | boyizjuer2017@gmail.com |
1c6e1b9c12df767cb7a6b9f2532ec92383cd2c87 | 739324fe968beecf2814792c0a85f6690e56a26a | /Codes/Dalily_Flash/Week_2/23Jan/Python/Program1.py | 3babf00ed107d5e5dfc944e100facdbb79e59e38 | [] | no_license | Kunal17sarpatil/kunal_personal | 76e5cd1002c23dc30c1d7d86d495b72992a4c24d | b1c4f8de8b73e34253743deb3c26e00b2b02ef76 | refs/heads/master | 2023-03-22T03:47:46.132195 | 2020-06-17T10:53:19 | 2020-06-17T10:53:19 | 151,773,565 | 0 | 0 | null | 2021-03-20T04:23:24 | 2018-10-05T20:18:31 | Java | UTF-8 | Python | false | false | 91 | py | for row in range(1,5):
for no in range(0,row):
print(row,end=" ")
print();
| [
"hawkeye@pop-os.localdomain"
] | hawkeye@pop-os.localdomain |
35c3037b8282d6a32e0538752d0f07e27be1b439 | f6dd42c6e7cef402c08bccd92c9a0fd3423eded9 | /ex015.py | 53540053af3f85482fdc757dfc8d7f59c5d0e765 | [] | no_license | ronicson/ronicson | 3e122e6fa8a534abc1799ef765ec85323b71f451 | 74b5738bc7467b7f25451d33e9c16f7a733731ea | refs/heads/master | 2023-07-14T23:24:09.419400 | 2021-08-25T18:17:59 | 2021-08-25T18:17:59 | 390,477,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | km = float(input('Quantos kilometros você percorreu:'))
d = int(input('Quantos dias você permaneceu com o veiculo:'))
p1 = d * 60
p2 = km * 0.15
print('O valor em diárias é de R${} de kms é de R${:.2f} e total de R${:.2f} a pagar.'.format(p1, p2, p1 + p2))
| [
"ronikism@gmail.com"
] | ronikism@gmail.com |
964ebf4b9298853c334657ff5e86b55de08c732c | 33cc5bd288a4f98be57df6ac880d49ab9937f9e7 | /examples.py | 0dcb1b9bbc3670a7a148b9f28110a7b9bfbd95ee | [] | no_license | stefantalpalaru/morelia-pcre | d848d69aab718a27c72ddca6ee5c2a803fd270bc | 814b0b664d3a449889698775e5f1e40217ba77c1 | refs/heads/master | 2021-01-01T17:47:21.614705 | 2015-07-05T17:58:02 | 2015-07-05T17:58:02 | 4,858,206 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!/usr/bin/env python
import pcre
from pprint import pprint
pattern = r'(?<sna>fo{2})b'
subject = 'foobar FoObaz'
options = pcre.PCRE_CASELESS
compiled = pcre.pcre_compile(pattern, options)
extra = pcre.pcre_study(compiled)
result = pcre.pcre_exec(compiled, subject, extra=extra)
# find the first match
print('%d matches:' % result.num_matches)
for i in xrange(result.num_matches):
print(' "%s"' % repr(result.matches[i]))
print('named substrings:')
pprint(result.named_matches)
# find all the matches
results = pcre.pcre_find_all(compiled, subject, extra=extra)
print '*** find all ***'
for result in results:
for i in xrange(result.num_matches):
print(' "%s"' % repr(result.matches[i]))
| [
"stefantalpalaru@yahoo.com"
] | stefantalpalaru@yahoo.com |
5222fa56f1244047dbb221e092b2f2d0ca6f8e32 | e88436ade391f11aa69138056d926a15bd335ab8 | /home/views.py | c80486e77d5f1712c6c643efac5051f938625f28 | [
"Apache-2.0"
] | permissive | Ethan-Jeong/Django | a492a6c4f5af373e18015db560c03707adfb420b | 01268256f782bc7ca36b8f7116380309def97ea5 | refs/heads/master | 2023-06-24T13:39:27.662976 | 2021-07-29T07:58:35 | 2021-07-29T07:58:35 | 384,298,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
path = request.path
resultstr = ''
if path == '/home':
resultstr = '<h1>여기는 Home 입니다.</h1>'
else:
resultstr = '<h1>여기는 master 입니다.</h1>'
return HttpResponse(resultstr)
def index01(request):
result = {'first':'Ethan', 'second':'Jeong'}
return render(request,'index.html', context=result )
def index02(request):
result = {'first': request.GET['first'], 'second': request.GET['second']}
return render(request,'index.html', context=result ) | [
"jds88guy@gmail.com"
] | jds88guy@gmail.com |
9107cc0a1e028e20c237a6840a6fc50e903497c8 | ca18cd3b72d75e3877bcb8ba1fdccd95b188dc46 | /python_set.py | a03e891b245b87e37a4529f809fedac57138f47a | [] | no_license | ramshree123/ramya | e64490ea6828db1ee952bdbbd35034d0ed487885 | 88df88c198d29dcf50ac699962a21d20fc213b30 | refs/heads/master | 2021-05-16T14:09:42.601497 | 2018-03-19T10:01:17 | 2018-03-19T10:01:17 | 118,075,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | set1={1,2,3,4}
set2={3,4,6,8}
print("set1&set2")
| [
"noreply@github.com"
] | ramshree123.noreply@github.com |
0925941fe477c086703afaa2c02bab0a1f36fd82 | 50c1f7e4a3084ecd0ef72c9b20f8ea218cebe14c | /movie/urls.py | 85d544f939f483dd2237188e0c01883acb0a8856 | [] | no_license | rahulshivan05/Coder | 922240a494207d0fcf1a553d1749eb7c09c6425b | 79340971c4c1ac3123e5a65fc9fb423f87eac972 | refs/heads/main | 2023-02-17T13:50:51.491016 | 2021-01-13T15:55:22 | 2021-01-13T15:55:22 | 329,347,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from .views import *
from .import views
urlpatterns = [
path('', views.movie, name='movie'),
# path('detail', views.detail, name='detail'),
# path('<str:slug>', views.blogPost, name='blogPost'),
] | [
"rahulshivan05@gmail.com"
] | rahulshivan05@gmail.com |
2e73afe47864bd9ebaf4f8f37c8ee656dbe4e152 | 17713d586f680821759b0bba8e25046c2fa6a90b | /build/turtlebot3/turtlebot3_description/catkin_generated/pkg.develspace.context.pc.py | 949b141090dd163690ebe5bdd2ab60d94da91ef9 | [] | no_license | Saipriyavk/Delivery-Robot | fb899a8330815b02de118275dc6d7a1fad70bc8e | 9255791e683b32573227a1877b5fb92f000dfca2 | refs/heads/master | 2023-06-14T16:32:25.444170 | 2021-07-19T21:43:13 | 2021-07-19T21:43:13 | 387,573,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "urdf;xacro".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_description"
PROJECT_SPACE_DIR = "/home/saipriya/projects_ai_cse/devel"
PROJECT_VERSION = "1.2.1"
| [
"saipriya.vk31@gmail.com"
] | saipriya.vk31@gmail.com |
8e6782093b61600cbe22a17ae9e9302e33c03002 | 22e88f41deec7cbfda7d6094d514e5941adc218e | /Bag3D_package.V5/delete_dtseq.py | f8471fcac22e82060bad0320ed026852162b42c5 | [] | no_license | SPURc-Lab/NGS-D9 | 6a2df9c96b4a029dd3672bd3189163e7baa3d1dc | 3e0da35912edb5ecf3b8c6b1b7acf1bc7a5853b4 | refs/heads/master | 2021-01-01T15:55:31.348720 | 2015-06-03T08:15:14 | 2015-06-03T08:15:14 | 34,987,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/python
# Usage: delete_dtseq.py <Original FASTA format file> <redundant contig name file>
# <Original FASTA format file> e.g. Dt_all_seq.fa
# <redundant contig name file> is the redundant contig name file generated from Program 3: get_gene_model.py, e.g. deleted_dtnames.txt
# Python program to filter the multiple FASTA format sequence file with the redundant contigs
import sys
def delete_dtseq():
inFile = open(sys.argv[1], 'r')
dtFile = open(sys.argv[2], 'r')
outFile = open("Dt_non-redundant_seq.fa", 'w')
dt_names = []
for line in dtFile.readlines():
dt_names.append([float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]), line.strip().split('Confidence_')[1]])
dt_names.sort(key=lambda x: x[0])
#for item in dt_names:
# print item
#sys.exit(0)
cur_dt_index = 0
deleting = False
breaking = False
for line in inFile:
if '>' in line and breaking == False:
while cur_dt_index < len(dt_names) and dt_names[cur_dt_index][0] < float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]):
cur_dt_index = cur_dt_index + 1
if cur_dt_index >= len(dt_names):
outFile.write(line)
breaking = True
continue
elif dt_names[cur_dt_index][0] == float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]):# and dt_names[cur_dt_index][1] in line:
deleting = True
#print dt_names[cur_dt_index][0]
#cur_dt_index = cur_dt_index + 1
else:
deleting = False
outFile.write(line)
elif breaking == True or deleting == False:
outFile.write(line)
inFile.close()
outFile.close()
if __name__=='__main__':
if len(sys.argv) < 2:
print 'Invalid arguments!\n'
sys.exit(0)
delete_dtseq()
print 'I am DONE. Please check the output. :)'
| [
"yaolina@215-244.priv27.nus.edu.sg"
] | yaolina@215-244.priv27.nus.edu.sg |
0c9ea3acd8dfbd941580cf0d8e50eff68889ee0b | 6c56f13050a3a8c1208030bb96e5a9dddeabd584 | /experiment/cpu_predictions_likelyhood_final_adaptation/nupic_output.py | aaaa36eb45b4a56edfe8fc530767497676dfbd8b | [] | no_license | baselm/self-healing-latex | 089dd97dbc818f65e176d977a65a6fea469684b8 | b43d5fa117aa40e9d4cf256f62f7945391d8681e | refs/heads/master | 2020-03-29T12:51:57.235243 | 2018-12-30T11:47:01 | 2018-12-30T11:47:01 | 149,924,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,438 | py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num, DateFormatter
except ImportError:
pass
WINDOW = 200
HIGHLIGHT_ALPHA = 0.3
ANOMALY_HIGHLIGHT_COLOR = 'red'
WEEKEND_HIGHLIGHT_COLOR = 'yellow'
ANOMALY_THRESHOLD = 0.75
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
@abstractmethod
def write(self, timestamp, value, predicted, anomalyScore):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCount = 0
headerRow = [
'timestamp', 'CPU', 'prediction',
'anomaly_score', 'anomaly_likelihood'
]
outputFileName = "%s_out.csv" % self.name
print "Preparing to output %s data to %s" % (self.name, outputFileName)
self.outputFile = open(outputFileName, "w")
self.outputWriter = csv.writer(self.outputFile)
self.outputWriter.writerow(headerRow)
def write(self, timestamp, value, predicted, anomalyScore):
if timestamp is not None:
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood]
self.outputWriter.writerow(outputRow)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name)
def extractWeekendHighlights(dates):
weekendsOut = []
weekendSearch = [5, 6]
weekendStart = None
for i, date in enumerate(dates):
if date.weekday() in weekendSearch:
if weekendStart is None:
# Mark start of weekend
weekendStart = i
else:
if weekendStart is not None:
# Mark end of weekend
weekendsOut.append((
weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
weekendStart = None
# Cap it off if we're still in the middle of a weekend
if weekendStart is not None:
weekendsOut.append((
weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return weekendsOut
def extractAnomalyIndices(anomalyLikelihood):
anomaliesOut = []
anomalyStart = None
for i, likelihood in enumerate(anomalyLikelihood):
if likelihood >= ANOMALY_THRESHOLD:
if anomalyStart is None:
# Mark start of anomaly
anomalyStart = i
else:
if anomalyStart is not None:
# Mark end of anomaly
anomaliesOut.append((
anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
anomalyStart = None
# Cap it off if we're still in the middle of an anomaly
if anomalyStart is not None:
anomaliesOut.append((
anomalyStart, len(anomalyLikelihood)-1,
ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return anomaliesOut
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.value = []
self.allValues = []
self.predicted = []
self.anomalyScore = []
self.anomalyLikelihood = []
self.actualLine = None
self.predictedLine = None
self.anomalyScoreLine = None
self.anomalyLikelihoodLine = None
self.linesInitialized = False
self._chartHighlights = []
fig = plt.figure(figsize=(16, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._mainGraph = fig.add_subplot(gs[0, 0])
plt.title(self.name)
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
self._anomalyGraph = fig.add_subplot(gs[1])
plt.ylabel('Percentage')
plt.xlabel('Date')
# Maximizes window
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.tight_layout()
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%m/%d %H:%M')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def highlightChart(self, highlights, chart):
for highlight in highlights:
# Each highlight contains [start-index, stop-index, color, alpha]
self._chartHighlights.append(chart.axvspan(
self.convertedDates[highlight[0]], self.convertedDates[highlight[1]],
color=highlight[2], alpha=highlight[3]
))
def write(self, timestamp, value, predicted, anomalyScore):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.allValues.append(value)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(self.allValues)
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| [
"baz@Basels-MacBook-Pro.local"
] | baz@Basels-MacBook-Pro.local |
13c272c3444b1b202eff2b15a9ed3f9dac8dbc7b | 6f293744f62c2e1f33250ade7cebfabc8bc24eda | /sentry_a/views.py | ec3227d201756bc62aa9a4d53a895d705ff13d47 | [] | no_license | 40huo/sentry_test | 1026552f8240404a0b1d7d0178c86a35a7e63b4b | cd57a666a41ad67a4b0f39eddd794ca5acdb4e06 | refs/heads/master | 2020-12-07T10:01:27.203541 | 2020-01-09T01:56:06 | 2020-01-09T01:56:06 | 232,698,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from django.shortcuts import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("hello")
| [
"git@40huo.cn"
] | git@40huo.cn |
2543a1e62c58b091daee385222a1fcbed751cfba | 060c40375aee04f1f68352339ffa24eb74da56ef | /read-n-characters-given-read4-2.py | b25c2b8cc6d2362834bf2664b9b04ed6336dff7d | [] | no_license | cannium/leetcode | be7d9bfb3e2a999eabe6c466a5390005656cec2b | 70f16a872cb203f77eeddb812e734ad1d46df79d | refs/heads/master | 2021-01-23T20:14:47.821782 | 2020-04-12T09:34:20 | 2020-04-12T09:34:20 | 18,369,249 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | """
The read4 API is already defined for you.
@param buf, a list of characters
@return an integer
def read4(buf):
# Below is an example of how the read4 API can be called.
file = File("abcdefghijk") # File is "abcdefghijk", initially file pointer (fp) points to 'a'
buf = [' '] * 4 # Create buffer with enough space to store characters
read4(buf) # read4 returns 4. Now buf = ['a','b','c','d'], fp points to 'e'
read4(buf) # read4 returns 4. Now buf = ['e','f','g','h'], fp points to 'i'
read4(buf) # read4 returns 3. Now buf = ['i','j','k',...], fp points to end of file
"""
class Solution(object):
def __init__(self):
self.bf = []
self.old_size = 0
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Number of characters to read (int)
:rtype: The number of actual characters read (int)
"""
nn = 0
while nn < n:
if self.old_size > 0:
if self.old_size >= n:
buf[nn:n] = self.bf[:n-nn]
self.bf = self.bf[n:]
self.old_size = self.old_size - n
return n
else:
buf[nn:nn+self.old_size] = self.bf
nn += self.old_size
self.bf = []
self.old_size = 0
bf = [' '] * 4
n4 = read4(bf)
if nn + n4 > n:
buf[nn:n] = bf[:n-nn]
self.bf = bf[n-nn:]
self.old_size = nn+n4-n
return n
buf[nn:nn+n4] = bf
nn += n4
if n4 < 4:
return nn
return nn
| [
"can@canx.me"
] | can@canx.me |
9f4bf716ff6b31f433999da81b499aaa0e0761c6 | 724f23eaa94c64b9a72abcb6df90b6ed72114a3c | /day1/var1.py | af7bcd99732d01d19b0b9bd467d099b831f631b3 | [] | no_license | wxwssg/leraning_python | e30f65fa65dd95707a7db9c3966b9514b3b1f880 | 282eae1b10ba6e1e3171e86b427a751918f85eae | refs/heads/master | 2020-04-30T08:05:53.941876 | 2019-08-06T08:47:26 | 2019-08-06T08:47:26 | 176,701,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding:utf-8 -*-
# Author:wizard_wang
name1 = 'wizard'
name2 = name1
print('My name is',name1,name2)
#内存指向 name1 指向被修改 但是值改了但是name2还是指向‘wizard’
name1 = 'old wang'
print(name1,name2)
#变量 奇葩使用 中文做变量 只有python3支持
名字 = '奇葩的中文变量'
print(名字)
#python没有常量 大写表示声明常量
NAME = '声明是常量'
print(NAME) | [
"wangxingwu17@qq.com"
] | wangxingwu17@qq.com |
701836e019bed7dc29f4b99b1ca2d9c0b7724046 | c929fe7a8983f162345ce8a9a5f2bd038c94db65 | /Track2/videowalk/code/utils_videowalk/visualize.py | e01642585e73132e56b1612367e229f950006990 | [] | no_license | YJingyu/UVO_Challenge | 7078cfb3482174badeaa0f708e4b5cb2fde0e299 | 78d1cb6d9299218fae08682e5e081eacbac29e4b | refs/heads/main | 2023-08-29T03:56:34.878106 | 2021-10-29T14:53:49 | 2021-10-29T14:53:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,772 | py | # Video's features
import numpy as np
from sklearn.decomposition import PCA
import cv2
import imageio as io
import visdom
import time
import PIL
import torchvision
import torch
import matplotlib.pyplot as plt
from matplotlib import cm
def pca_feats(ff, K=1, solver='auto', whiten=True, img_normalize=True):
## expect ff to be N x C x H x W
N, C, H, W = ff.shape
pca = PCA(
n_components=3*K,
svd_solver=solver,
whiten=whiten
)
ff = ff.transpose(1, 2).transpose(2, 3)
ff = ff.reshape(N*H*W, C).numpy()
pca_ff = torch.Tensor(pca.fit_transform(ff))
pca_ff = pca_ff.view(N, H, W, 3*K)
pca_ff = pca_ff.transpose(3, 2).transpose(2, 1)
pca_ff = [pca_ff[:, kk:kk+3] for kk in range(0, pca_ff.shape[1], 3)]
if img_normalize:
pca_ff = [(x - x.min()) / (x.max() - x.min()) for x in pca_ff]
return pca_ff[0] if K == 1 else pca_ff
def make_gif(video, outname='/tmp/test.gif', sz=256):
if hasattr(video, 'shape'):
video = video.cpu()
if video.shape[0] == 3:
video = video.transpose(0, 1)
video = video.numpy().transpose(0, 2, 3, 1)
video = (video*255).astype(np.uint8)
video = [cv2.resize(vv, (sz, sz)) for vv in video]
if outname is None:
return np.stack(video)
io.mimsave(outname, video, duration = 0.2)
def draw_matches(x1, x2, i1, i2):
# x1, x2 = f1, f2/
detach = lambda x: x.detach().cpu().numpy().transpose(1,2,0) * 255
i1, i2 = detach(i1), detach(i2)
i1, i2 = cv2.resize(i1, (400, 400)), cv2.resize(i2, (400, 400))
for check in [True]:
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=check)
# matches = bf.match(x1.permute(0,2,1).view(-1, 128).cpu().detach().numpy(), x2.permute(0,2,1).view(-1, 128).cpu().detach().numpy())
h = int(x1.shape[-1]**0.5)
matches = bf.match(x1.t().cpu().detach().numpy(), x2.t().cpu().detach().numpy())
scale = i1.shape[-2] / h
grid = torch.stack([torch.arange(0, h)[None].repeat(h, 1), torch.arange(0, h)[:, None].repeat(1, h)])
grid = grid.view(2, -1)
grid = grid * scale + scale//2
kps = [cv2.KeyPoint(grid[0][i], grid[1][i], 1) for i in range(grid.shape[-1])]
matches = sorted(matches, key = lambda x:x.distance)
# img1 = img2 = np.zeros((40, 40, 3))
out = cv2.drawMatches(i1.astype(np.uint8), kps, i2.astype(np.uint8), kps,matches[:], None, flags=2).transpose(2,0,1)
return out
import wandb
class Visualize(object):
def __init__(self, args):
self._env_name = args.name
self.vis = visdom.Visdom(
port=args.port,
server='http://%s' % args.server,
env=self._env_name,
)
self.args = args
self._init = False
def wandb_init(self, model):
if not self._init:
self._init = True
wandb.init(project="videowalk", group="release", config=self.args)
wandb.watch(model)
def log(self, key_vals):
return wandb.log(key_vals)
def nn_patches(self, P, A_k, prefix='', N=10, K=20):
nn_patches(self.vis, P, A_k, prefix, N, K)
def save(self):
self.vis.save([self._env_name])
def get_stride(im_sz, p_sz, res):
stride = (im_sz - p_sz)//(res-1)
return stride
def nn_patches(vis, P, A_k, prefix='', N=10, K=20):
# produces nearest neighbor visualization of N patches given an affinity matrix with K channels
P = P.cpu().detach().numpy()
P -= P.min()
P /= P.max()
A_k = A_k.cpu().detach().numpy() #.transpose(-1,-2).numpy()
# assert np.allclose(A_k.sum(-1), 1)
A = np.sort(A_k, axis=-1)
I = np.argsort(-A_k, axis=-1)
vis.text('', opts=dict(width=10000, height=1), win='%s_patch_header' %(prefix))
for n,i in enumerate(np.random.permutation(P.shape[0])[:N]):
p = P[i]
vis.text('', opts=dict(width=10000, height=1), win='%s_patch_header_%s' % (prefix, n))
# vis.image(p, win='%s_patch_query_%s' % (prefix, n))
for k in range(I.shape[0]):
vis.images(P[I[k, i, :K]], nrow=min(I.shape[-1], 20), win='%s_patch_values_%s_%s' % (prefix, n, k))
vis.bar(A[k, i][::-1][:K], opts=dict(height=150, width=500), win='%s_patch_affinity_%s_%s' % (prefix, n, k))
def compute_flow(corr):
# assume batched affinity, shape N x H * W x W x H
h = w = int(corr.shape[-1] ** 0.5)
# x1 -> x2
corr = corr.transpose(-1, -2).view(*corr.shape[:-1], h, w)
nnf = corr.argmax(dim=1)
u = nnf % w # nnf.shape[-1]
v = nnf / h # nnf.shape[-2] # nnf is an IntTensor so rounds automatically
rr = torch.arange(u.shape[-1])[None].long().cuda()
for i in range(u.shape[-1]):
u[:, i] -= rr
for i in range(v.shape[-1]):
v[:, :, i] -= rr
return u, v
def vis_flow_plt(u, v, x1, x2, A):
flows = torch.stack([u, v], dim=-1).cpu().numpy()
I, flows = x1.cpu().numpy(), flows[0]
H, W = flows.shape[:2]
Ih, Iw, = I.shape[-2:]
mx, my = np.mgrid[0:Ih:Ih/(H+1), 0:Iw:Iw/(W+1)][:, 1:, 1:]
skip = (slice(None, None, 1), slice(None, None, 1))
ii = 0
fig, ax = plt.subplots()
im = ax.imshow((I.transpose(1,2,0)),)
C = cm.jet(torch.nn.functional.softmax((A * A.log()).sum(-1).cpu(), dim=-1))
ax.quiver(my[skip], mx[skip], flows[...,0][skip], flows[...,1][skip]*-1, C)#, scale=1, scale_units='dots')
# ax.quiver(mx[skip], my[skip], flows[...,0][skip], flows[...,1][skip])
return plt
def frame_pair(x, ff, mm, t1, t2, A, AA, xent_loss, viz):
normalize = lambda xx: (xx-xx.min()) / (xx-xx.min()).max()
spatialize = lambda xx: xx.view(*xx.shape[:-1], int(xx.shape[-1]**0.5), int(xx.shape[-1]**0.5))
N = AA.shape[-1]
H = W = int(N**0.5)
AA = AA.view(-1, H * W, H, W)
##############################################
## Visualize PCA of Embeddings, Correspondences
##############################################
# import pdb; pdb.set_trace()
if (len(x.shape) == 6 and x.shape[1] == 1):
x = x.squeeze(1)
if len(x.shape) < 6: # Single image input, no patches
# X here is B x C x T x H x W
x1, x2 = normalize(x[0, :, t1]), normalize(x[0, :, t2])
f1, f2 = ff[0, :, t1], ff[0, :, t2]
ff1 , ff2 = spatialize(f1), spatialize(f2)
xx = torch.stack([x1, x2]).detach().cpu()
viz.images(xx, win='imgs')
# Flow
u, v = compute_flow(A[0:1])
flow_plt = vis_flow_plt(u, v, x1, x2, A[0])
viz.matplot(flow_plt, win='flow_quiver')
# Keypoint Correspondences
kp_corr = draw_matches(f1, f2, x1, x2)
viz.image(kp_corr, win='kpcorr')
# # PCA VIZ
pca_ff = pca_feats(torch.stack([ff1,ff2]).detach().cpu())
pca_ff = make_gif(pca_ff, outname=None)
viz.images(pca_ff.transpose(0, -1, 1, 2), win='pcafeats', opts=dict(title=f"{t1} {t2}"))
else: # Patches as input
# X here is B x N x C x T x H x W
x1, x2 = x[0, :, :, t1], x[0, :, :, t2]
m1, m2 = mm[0, :, :, t1], mm[0, :, :, t2]
pca_ff = pca_feats(torch.cat([m1, m2]).detach().cpu())
pca_ff = make_gif(pca_ff, outname=None, sz=64).transpose(0, -1, 1, 2)
pca1 = torchvision.utils.make_grid(torch.Tensor(pca_ff[:N]), nrow=int(N**0.5), padding=1, pad_value=1)
pca2 = torchvision.utils.make_grid(torch.Tensor(pca_ff[N:]), nrow=int(N**0.5), padding=1, pad_value=1)
img1 = torchvision.utils.make_grid(normalize(x1)*255, nrow=int(N**0.5), padding=1, pad_value=1)
img2 = torchvision.utils.make_grid(normalize(x2)*255, nrow=int(N**0.5), padding=1, pad_value=1)
viz.images(torch.stack([pca1,pca2]), nrow=4, win='pca_viz_combined1')
viz.images(torch.stack([img1.cpu(),img2.cpu()]), opts=dict(title=f"{t1} {t2}"), nrow=4, win='pca_viz_combined2')
##############################################
# LOSS VIS
##############################################
color = cm.get_cmap('winter')
xx = normalize(xent_loss[:H*W])
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None]
for aa in AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
viz.images(img_grid, win='lossvis')
| [
"dulucas24@gmail.com"
] | dulucas24@gmail.com |
da255a406ffda289b1c78e5f386fd921f2efbcdd | abe02872257b18a9ad6179a057be71fe04825587 | /task_1/svm/SVM.py | 33340e6abb0e1a75b50ef17f52c0822adb0b7b56 | [] | no_license | georgeahill/tdads | 98924b9b28b826a8bbf1e374a2e1cb12db878c99 | dd7b1742452c205aca2f2abb603bd65be3807721 | refs/heads/master | 2023-03-13T09:28:29.215662 | 2021-03-03T22:20:29 | 2021-03-03T22:20:29 | 274,945,760 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | # Preprocesses the dataset using PCA, and increasing image contrast
import numpy as np
from sklearn import svm
from sklearn.decomposition import PCA
# The threshold at which a colour value is increased instead of reduced
# These values were calculated using manual testing
CONTRAST_THRESHOLD = 70 # 70
CONTRAST_REDUCTION = 80 # 80
CONTRAST_INCREASE = 70 # 70
def increaseContrast(value):
if value < CONTRAST_THRESHOLD:
return min(0, value - CONTRAST_REDUCTION)
else:
return max(255, value + CONTRAST_INCREASE)
VECTORISED_CONTRAST = np.vectorize(increaseContrast)
def fit(x_train, y_train):
clf = svm.SVC(kernel="rbf", C=10, gamma=2e-7)
x_train = VECTORISED_CONTRAST(np.asarray(x_train).reshape((len(x_train), 784)))
scaler = PCA(0.9).fit(x_train)
x_train = scaler.transform(x_train)
clf.fit(x_train, y_train)
return [clf, scaler]
def predict(x_test, fit_return_list):
clf = fit_return_list[0]
x_test = VECTORISED_CONTRAST(np.asarray(x_test).reshape((len(x_test), 784)))
x_test = fit_return_list[1].transform(x_test)
return clf.predict(x_test)
| [
"oliver.little12@gmail.com"
] | oliver.little12@gmail.com |
462ff12ed72a87b6f46032cc0eeb6fd1d11f6baf | af669dbef653dd69474f4c0836582bf14262c80f | /price-test/frame/lib/commonlib/configure/configunit.py | d59369edd113378ff64e2167f6f76406ff180d06 | [] | no_license | siki320/fishtest | 7a3f91639d8d4cee624adc1d4d05563611b435e9 | 7c3f024192e1c48214b53bc45105bdf9e746a013 | refs/heads/master | 2021-01-19T21:58:36.807126 | 2017-04-19T09:56:37 | 2017-04-19T09:56:37 | 88,729,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python
# -*- coding: GB18030 -*-
'''
Created on 2012-3-10
@author: tongdangdang
'''
class ConfigUnit(object):
'''
@author: tongdangdang
@summary: ub conf configure unit
'''
def __init__(self,key,value,father,note = ""):
self.key = key
self.value = value
self.level = -1
self.father = father
self.note = note
'''
@summary: user defined str
'''
def __str__(self):
return self.value
def __getitem__(self, key):
return self.value
#def __delitem__(self, key):
# if isinstance(self.father, configarray.ConfigArray):
# pass
# elif isinstance(self.father, configarray.ConfigArray):
# pass
| [
"lisiqi_i@didichuxing.com"
] | lisiqi_i@didichuxing.com |
88ff3ce0379daf9f75d75ac23c9191cfe9db7fb9 | d61c8fed77acfb0b977425a7cb9bbbb6983bfd17 | /blog/forms.py | cc95e11a08a2770858a0e67065adefc525aaea9a | [] | no_license | soukaina-debug/CodeStorm | cc6fcf9bc9e0209a37c2dc87b985a73c65d4dbe4 | b5a02a4520b05e4a0004c4ce62e474fca1821ac6 | refs/heads/master | 2023-08-10T20:31:18.182705 | 2021-09-29T09:42:30 | 2021-09-29T09:42:30 | 411,614,438 | 1 | 0 | null | 2021-09-29T09:41:40 | 2021-09-29T09:41:39 | null | UTF-8 | Python | false | false | 257 | py | from django import forms
from .models import Doubt,Reply
class DoubtForm(forms.ModelForm):
class Meta:
model = Doubt
fields = ['ask']
class ReplyForm(forms.ModelForm):
class Meta:
model = Reply
fields = ['reply']
| [
"apurvaajmera10@gmail.com"
] | apurvaajmera10@gmail.com |
f2e0abd8107144d4e884b8bd567c77d9c0a60711 | 850f5d34fdf43cd87ddf4028f35f94b0a6214411 | /HandwrittenDigitsClassifierStreamlit.py | 7fccfa0d7adbc25bf2c528177ada7f2a89aa7420 | [
"MIT"
] | permissive | khanfarhan10/TextGeneration | 419d11bf27e9c67580802c6d9a4252acce71a496 | 3f4680e8763a3002bcae98d776137b6dfde2f985 | refs/heads/main | 2023-02-03T16:33:31.373820 | 2020-12-09T17:43:05 | 2020-12-09T17:43:05 | 316,436,503 | 1 | 1 | MIT | 2020-12-05T19:42:06 | 2020-11-27T07:58:42 | Python | UTF-8 | Python | false | false | 3,424 | py | # streamlit run HandwrittenDigitsClassifierStreamlit.py
from streamlit_drawable_canvas import st_canvas
import streamlit as st
import pandas as pd
import numpy as np
import sklearn
import pickle
from sklearn.svm import SVC # import Support Vector Classifier
# Imports PIL module
from PIL import Image
import warnings
warnings.filterwarnings("ignore")
st.title('Handwritten Digit Classifier')
st.subheader("using t-SNE and Streamlit")
st.title('Drawable Canvas First')
# Basic 28*28 frame to get back an image from
# Specify canvas parameters in application
stroke_width = 10
stroke_color = "#000000"
bg_color = "#ffffff"
dirt2 = """
bg_image = st.sidebar.file_uploader("Background image:", type=["png", "jpg"])
drawing_mode = st.sidebar.selectbox(
"Drawing tool:", ("freedraw", "line", "rect", "circle", "transform")
)
realtime_update = st.sidebar.checkbox("Update in realtime", True)
"""
bg_image = False
drawing_mode = "freedraw"
realtime_update = True
# Create a canvas component
canvas_result = st_canvas(
fill_color="rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
stroke_width=stroke_width,
stroke_color=stroke_color,
background_color="" if bg_image else bg_color,
background_image=Image.open(bg_image) if bg_image else None,
update_streamlit=realtime_update,
height=200, width=200,
drawing_mode=drawing_mode,
key="canvas",
)
# print(canvas_result.image_data)
data = canvas_result.image_data
def equal_array(a):
"""Returns True if the array is same throughout"""
b = np.full(a.shape, a.max())
# print(b)
return np.array_equal(a, b)
@st.cache()
def load_model():
filename = 'non_linear_svc.sav'
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
get_preds = False
svc = load_model()
# or (not equal_array(data))
if data is not None:
numpy_img = data
# print(data.max())
img = numpy_img.squeeze().astype(np.uint8)
PIL_image = Image.fromarray(img).convert('RGB').convert('L')
img_reshaped = np.array(PIL_image.resize((28, 28)))
img_reshaped = img_reshaped.flatten()
img_reshaped = img_reshaped.reshape(1, 784)
# print(img_reshaped.shape)
PIL_image.save('Input.png')
img_reshaped = Image.open("Input.png").convert('L')
img_reshaped = np.array(img_reshaped.resize(
(28, 28))).flatten().reshape(1, 784)
print(img_reshaped.shape)
if not equal_array(data):
get_preds = True
# print(sklearn.__version__) # 0.22.1
if get_preds == True:
svc = load_model()
confidence = int(100*np.random.rand())
y_pred = svc.predict(img_reshaped)
print(y_pred)
results = y_pred[0]
st.header("Predicted : " + str(results))
# st.header("Confidence = "+str(confidence)+" % ")
# Add a placeholder
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(confidence):
latest_iteration.text(f'Confidence : {i+1}')
bar.progress(i + 1)
z = """
if canvas_result.image_data is not None:
st.image(canvas_result.image_data)
"""
# print(type(canvas_result.image_data))
# Do something interesting with the image data and paths
# wait till I hit the predict button
y = """
if canvas_result.image_data is not None:
st.image(canvas_result.image_data) # numpy array
if canvas_result.json_data is not None:
st.dataframe(pd.json_normalize(canvas_result.json_data["objects"]))
"""
| [
"njrfarhandasilva10@gmail.com"
] | njrfarhandasilva10@gmail.com |
d93c54dd443bb190b38b1b80e83a4f17d08b5b2d | e8ce8f41f8875b2c76d62a69e2aadd1e2503e4e0 | /Alpaca_Blog_First/Alpaca_Blog_First/urls.py | 1804a2b39b0c81fcd6e3f3693e95a29fa19da504 | [] | no_license | Alpaca-H/Pyweb | 9bdc41c9f10360b54fc5a973a4bd054211b0ef4a | 7e7c6bdde9bf24acb5f7931638e9f734a310ff3b | refs/heads/master | 2020-03-19T18:01:12.586189 | 2018-08-19T13:50:54 | 2018-08-19T13:50:54 | 136,789,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | """Alpaca_Blog_First URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('load.urls')),
path('',include(('load.urls','load'),namespace="index")),
path('',include(('load.urls','load'),namespace="about")),
path('',include(('load.urls','load'),namespace="contact")),
path('',include(('load.urls', 'load'),namespace="full")),
path('',include(('load.urls', 'load'),namespace="readGo")),
]
| [
"1097690268@qq.com"
] | 1097690268@qq.com |
ef1b5e460b554cf63dd75e50f5f716da3096df96 | 9b483cba8b680c280becd611e136329724b4d4fb | /t_rank.py | 8d07e469f4fd931ed079ca1d0dfc34a2178e6d64 | [] | no_license | dmlicht/FMAssembler | eac29ab82ff81dac196e738b0ba4fe53085c4b2e | 3842390ea3be7cb7d9de1356a0f2a116a965023b | refs/heads/master | 2021-01-01T20:16:26.243439 | 2016-12-18T22:55:18 | 2016-12-18T22:55:18 | 8,988,810 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | class TRank(object):
"""Maintains checkpoints of t-ranks for last row in BW matrix
values of characters at checkpoint is 0 if character has not been seen at all"""
def __init__(self, bwt, characters, cp_interval=4):
self.checkpoints = {}
self.characters = characters
self.bwt = bwt
self.cp_interval = cp_interval
#setting all values to nergative 1, because t-rank should be 0 at first occurence
self.char_occurences = { c: -1 for c in characters }
self.create_checkpoints()
def create_checkpoints(self):
"""iterates through last column of BW matrix recording num occurences of
each character seen. Stores current counts in checkpoint rows"""
for i, c in enumerate(self.bwt):
self.char_occurences[c] += 1 #track occurence of letter
if i % self.cp_interval == 0: #at specified interval
self.add_checkpoint(i) #add checkpoint
def add_checkpoint(self, checkpoint_row):
"""saves number of occurences of each character at checkpoint row"""
self.checkpoints[checkpoint_row] = self.char_occurences.copy()
def rank_at_row(self, char, row):
"""returns number of character occurences up to given row.
INCLUDING occurences at given row"""
#save ourselves trouble if the character does not occur at all
if char not in self.characters or row < 0:
return -1
distance_from_prev_cp = row % self.cp_interval
prev_cp_index = row - distance_from_prev_cp
t_rank_at_prev_index = self.checkpoints[prev_cp_index][char]
#previous cp_index + 1 because the checkpoints count occurences that happen on their index
occurences_after_checkpoint = self.count_up(char, prev_cp_index + 1, row + 1)
# print prev_cp_index, row
return t_rank_at_prev_index + occurences_after_checkpoint
def count_up(self, char, from_index, to_index):
"""counts occurences of char between from_index and to_index"""
occurences = 0
# print 'from_index:', from_index
# print 'to_index', to_index
# print len(self.bwt)
for i in xrange(from_index, to_index):
# print char
if self.bwt[i] == char:
occurences += 1
return occurences | [
"Dlichte5@jhu.edu"
] | Dlichte5@jhu.edu |
ad673bc733be8c44e976879669998549baebfa69 | 9180b7c014860820ea28af5876d374c935a8c8d3 | /01.flask/05.static.py | 5ec4c9832c93e58ed58152ba44237f616df0cac0 | [] | no_license | leele91/flask-web | 05d2ce3769b1c0a7196c828acfa3b35072fd7c97 | 7c66205d09cbcc1b044ad31e1ea6a7e882727afb | refs/heads/main | 2023-03-10T21:15:04.997561 | 2021-02-26T06:47:15 | 2021-02-26T06:47:15 | 320,127,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from flask import Flask, render_template
import os
app = Flask(__name__)
@app.route('/')
def index():
img_file = os.path.join(app.root_path, 'static/img/cat.jpg')
mtime =int(os.stat(img_file).st_mtime)
return render_template('05.index.htm', mtime = mtime) # mtime를 넣어주면 이미지경로가 바뀔때마다 리플레쉬됨 /html에도 동일적용
if __name__ == '__main__':
app.run(debug=True) | [
"wlsduddl013@gmail.com"
] | wlsduddl013@gmail.com |
ae929b0b384dcb6a29d72a3456cc2e90c9ff266c | 55a29a059e19d6d9ef830f36bdc2a94174811379 | /test_db_1.py | 58fbafdc59c6f03a6847aae52998c7fe1893f9e3 | [] | no_license | slyrx/flask_web_app | c321c79d809dcd4e0f28d18a67a7e037c90cbd7a | 8a1eaa414a11a8e7628b412de579cc0092df560d | refs/heads/master | 2020-06-03T20:46:08.859774 | 2020-03-02T02:19:45 | 2020-03-02T02:19:45 | 191,725,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | import os
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_script import Manager
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
#db = SQLAlchemy(app)
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
#db.create_all()
manager.run()
| [
"slyrx2006@gmail.com"
] | slyrx2006@gmail.com |
caf4b456838e4066cfe9191405c63b482a8eda64 | 036f11eaae82a9c7838580d141375ab3c03f739a | /unsupervised-semantic-audio-embeddings/main.py | 53c20702308eebe08c34ffa43db3737d513dda3c | [] | no_license | silvadirceu/experiments | 8b6f1739a51803f73da89c137d07871505ddf712 | 2390392726a43aa5587e02d8ee2a423cf281463c | refs/heads/master | 2022-02-19T11:18:52.485742 | 2019-09-26T14:43:51 | 2019-09-26T14:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,422 | py | from __future__ import division, print_function
import zounds
import argparse
from data import dataset
from deformations import make_pitch_shift, make_time_stretch, additive_noise
from training_data import TripletSampler
from train import Trainer
from network import EmbeddingNetwork
import torch
import numpy as np
import cPickle as pickle
from search import TreeSearch
# resample all audio in our dataset to this rate
samplerate = zounds.SR11025()
# produce a base class for our audio processing graph, which will do some
# basic preprocessing and transcoding of the signal
BaseModel = zounds.resampled(resample_to=samplerate, store_resampled=True)
# the length in samples of the audio segments we'll be creating embeddings for
window_size_samples = 8192
slice_duration = samplerate.frequency * window_size_samples
# segments occurring within ten seconds of our anchor will be considered
# semantically similar
temporal_proximity = zounds.Seconds(10)
# a collection of the audio deformations we'll use during training. Temporal
# proximity is included implicitly
deformations = [
make_time_stretch(samplerate, window_size_samples),
make_pitch_shift(samplerate),
additive_noise
]
@zounds.simple_lmdb_settings(
'/hdd/sounddb2', map_size=1e11, user_supplied_id=True)
class Sound(BaseModel):
"""
An audio processing graph, that will resample each audio file to 11025hz
and store the results in an LMDB database
"""
short_windowed = zounds.ArrayWithUnitsFeature(
zounds.SlidingWindow,
wscheme=zounds.HalfLapped(),
wfunc=zounds.OggVorbisWindowingFunc(),
needs=BaseModel.resampled)
stft = zounds.ArrayWithUnitsFeature(
zounds.FFT,
needs=short_windowed)
def train(network, batch_size, device, checkpoint, weights_file_path):
"""
Train the model indefinitely
"""
sampler = TripletSampler(
Sound, slice_duration, deformations, temporal_proximity)
trainer = Trainer(
network=network,
triplet_sampler=sampler,
learning_rate=1e-4,
batch_size=batch_size,
triplet_loss_margin=0.25).to(device)
for batch_num, error in enumerate(trainer.train()):
print('Batch: {batch_num}, Error: {error}'.format(**locals()))
if batch_num % checkpoint == 0:
torch.save(network.state_dict(), weights_file_path)
def compute_all_embeddings(network):
"""
A generator that will compute embeddings for every non-overlapping segment
of duration window_size_samples in the database
"""
for snd in Sound:
windowed = snd.resampled.sliding_window(
samplerate * window_size_samples).astype(np.float32)
arr = zounds.learn.apply_network(
network, windowed, chunksize=64)
ts = zounds.ArrayWithUnits(
arr, [windowed.dimensions[0], zounds.IdentityDimension()])
print(snd._id)
yield snd._id, ts
def build_search_index(network, search_file_path, n_trees=32):
"""
Build both a brute force search index, as well as an index that uses a tree
of random hyperplane splits
"""
try:
with open(search_file_path, 'rb') as f:
search = pickle.load(f)
except IOError:
search = zounds.BruteForceSearch(
compute_all_embeddings(network), distance_metric='cosine')
with open(search_file_path, 'wb') as f:
pickle.dump(search, f, pickle.HIGHEST_PROTOCOL)
print('building tree...')
tree_search = TreeSearch(search, n_trees=n_trees)
return search, tree_search
def visualize_embeddings(network, search_file_path):
from matplotlib import cm
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
# map labels/categories to some known examples of sounds that fall into
# that category
class_to_id = {
'piano': {'AOC11B', 'CHOPINBallades-NEWTRANSFER'},
'pop': {'02.LostInTheShadowsLouGramm', '08Scandalous'},
'jazz': {'Free_20s_Jazz_Collection'},
'hip-hop': {'LucaBrasi2', 'Chance_The_Rapper_-_Coloring_Book'},
'speech': {
'Greatest_Speeches_of_the_20th_Century', 'The_Speeches-8291'},
'nintendo': {
'CastlevaniaNESMusicStage10WalkingOnTheEdge',
'SuperMarioBros3NESMusicWorldMap6'}
}
# map a color to each category
color_map = cm.Paired
color_index = dict(
(key, color_map(x)) for x, key
in zip(np.linspace(0, 1, len(class_to_id)), class_to_id.iterkeys()))
# map sound ids to their labels
id_index = dict()
for snd in Sound:
for label, _ids in class_to_id.iteritems():
for _id in _ids:
if _id in snd._id:
id_index[snd._id] = label
# reduce the entire database of computed embeddings to just those with the
# ids we care about
search, tree_search = build_search_index(
network, search_file_path, n_trees=1)
# build up two sequences, one that contains the indices we're interested in
# and the other that contains the color that should be assigned to that
# data point
indices = []
labels = []
for index, pair in enumerate(search._ids):
_id, _ = pair
try:
label = id_index[_id]
labels.append(label)
indices.append(index)
except KeyError:
continue
indices = np.array(indices)
labels = np.array(labels)
# shuffle indices and take the first N
new_indices = np.random.permutation(len(indices))[:int(2e4)]
indices = indices[new_indices]
labels = labels[new_indices]
embeddings = search.index[indices]
print(embeddings.shape)
# dist = cosine_distances(embeddings, embeddings)
# print(dist.shape)
model = TSNE(metric='cosine')
points = model.fit_transform(embeddings)
print(points.shape)
plt.figure(figsize=(15, 15))
for label in class_to_id.iterkeys():
label_indices = np.where(labels == label)[0]
p = points[label_indices]
color = color_index[label]
plt.scatter(p[:, 0], p[:, 1], c=[color], label=label, edgecolors='none')
plt.xticks([])
plt.yticks([])
plt.legend()
plt.savefig('t-SNE.png')
def compare_search_indices(network, search_file_path):
search, tree_search = build_search_index(
network, search_file_path, n_trees=64)
tree_search.compare_and_plot(
n_trees=[1, 2, 4, 8, 16, 32, 64],
n_iterations=50,
n_results=50)
def visualize_tree(network, search_file_path):
search, tree_search = build_search_index(
network, search_file_path, n_trees=1)
tree_search.visualize_tree()
def demo_negative_mining(network, batch_size, device):
from matplotlib import pyplot as plt, gridspec
from itertools import product
sampler = TripletSampler(
Sound, slice_duration, deformations, temporal_proximity)
trainer = Trainer(
network=network,
triplet_sampler=sampler,
learning_rate=1e-4,
batch_size=batch_size,
triplet_loss_margin=0.25).to(device)
spec = gridspec.GridSpec(4, 4, wspace=0.25, hspace=0.25)
fig = plt.figure(figsize=(15, 15))
for x, y in product(xrange(4), xrange(4)):
anchor_to_positive, anchor_to_negative, mined_anchor_to_negative = \
trainer.negative_mining_demo()
ax = plt.subplot(spec[x, y])
ax.plot(anchor_to_positive, label='anchor-to-positive')
ax.plot(anchor_to_negative, label='anchor-to-negative')
ax.plot(mined_anchor_to_negative, label='mined-anchor-to-negative')
ax.set_xticks([])
ax.set_ylim(0, 1.0)
plt.legend(bbox_to_anchor=(1, 0), loc="lower right")
plt.savefig('negative_mining.png', format='png')
fig.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[
zounds.ui.AppSettings()
])
parser.add_argument(
'--ingest',
help='should data be ingested',
action='store_true')
parser.add_argument(
'--batch-size',
help='Batch size to use when training',
type=int)
parser.add_argument(
'--checkpoint',
help='save network weights every N batches',
type=int)
parser.add_argument(
'--weights-file-path',
help='the name of the file where weights should be saved')
parser.add_argument(
'--search',
help='test the search',
action='store_true')
parser.add_argument(
'--search-file-path',
help='the path where a pre-built search should be stored',
required=False)
parser.add_argument(
'--demo-negative-mining',
help='run a demo of within-batch semi-hard negative mining',
action='store_true')
parser.add_argument(
'--compare-search-indices',
help='run a comparison of search indices',
action='store_true')
parser.add_argument(
'--visualize-tree',
help='produce a visualization of one hyperplane tree',
action='store_true')
parser.add_argument(
'--visualize-embeddings',
help='produce a 2d visualiation of the embeddings using t-SNE',
action='store_true'
)
args = parser.parse_args()
if args.ingest:
zounds.ingest(dataset, Sound, multi_threaded=True)
network, device = EmbeddingNetwork.load_network(args.weights_file_path)
if args.search:
search, tree_search = build_search_index(
network=network,
search_file_path=args.search_file_path)
elif args.demo_negative_mining:
demo_negative_mining(network, args.batch_size, device)
elif args.compare_search_indices:
compare_search_indices(network, args.search_file_path)
elif args.visualize_tree:
visualize_tree(network, args.search_file_path)
elif args.visualize_embeddings:
visualize_embeddings(network, args.search_file_path)
else:
train(
network=network,
batch_size=args.batch_size,
device=device,
checkpoint=args.checkpoint,
weights_file_path=args.weights_file_path)
app = zounds.ZoundsApp(
model=Sound,
visualization_feature=Sound.stft,
audio_feature=Sound.ogg,
globals=globals(),
locals=locals())
app.start(port=args.port)
| [
"john.vinyard@gmail.com"
] | john.vinyard@gmail.com |
9f7caf4fed95bc81250d025326f45ff1780ff678 | 0a995834cfd7cce1defc2e0e5e0bee8103dca2b5 | /luffy_permission/rbac/views/role.py | 7654a58cd7c5a0575b3ef191abdf7e8c98d3e419 | [] | no_license | mling17/rbac | 9bcbc6e3c0cfd66d7399a7c9599801ff9271b20e | 969b4e0c52e1eb8bd29cf67b02b26ae5b1203fe1 | refs/heads/master | 2020-11-29T19:30:56.315369 | 2019-12-26T05:05:57 | 2019-12-26T05:05:57 | 230,197,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | from django.shortcuts import render, redirect, HttpResponse
from django.urls import reverse
from rbac import models
from rbac.forms.role import RoleModelForm
def role_list(request):
if request.method == 'GET':
roles = models.Role.objects.all()
return render(request, 'rbac/role_list.html', {'roles': roles})
def role_add(request):
if request.method == 'GET':
form = RoleModelForm()
return render(request, 'rbac/change.html', {'form': form})
form = RoleModelForm(data=request.POST)
if form.is_valid():
form.save()
return redirect(reverse('rbac:role_list'))
return render(request, 'rbac/change.html', {'form': form})
def role_edit(request, pk):
obj = models.Role.objects.filter(id=pk).first()
if not obj:
return HttpResponse('没有此角色')
if request.method == 'GET':
form = RoleModelForm(instance=obj)
return render(request, 'rbac/change.html', {'form': form})
form = RoleModelForm(instance=obj, data=request.POST)
if form.is_valid():
form.save()
return redirect(reverse('rbac:role_list'))
return render(request, 'rbac/role_list.html', {'form': form})
def role_del(request, pk):
origin_url = reverse('rbac:role_list')
if request.method == 'GET':
return render(request, 'rbac/delete.html', {'cancel': origin_url})
models.Role.objects.filter(id=pk).delete()
return redirect(origin_url)
| [
"mling17@163.com"
] | mling17@163.com |
033635d8b06972430571c7945b9aca52940092cf | 073affd8807eb668f9e7013414a9cc39a6c34f26 | /endgame.py | 68cdc9087984a18a713aa9218c2371b002ef0439 | [] | no_license | dencynluv/Hangman | fca2a972f07cf23b96a75f15661df168b6fc92ee | 36ba194ed0f602a996b36fe45f3a17a94af3555e | refs/heads/master | 2021-01-19T11:53:15.145780 | 2017-02-21T08:39:04 | 2017-02-21T08:39:04 | 82,270,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # end game module
# imports global variables needed into this module
from setup import good_letters, bad_letters, unique_letters, secret_word, guess_limit
# function to end the game
def is_game_over():
# both lists have to match in length in order to win the game
# or else
# bad letters and the guess limit have to match to lose the game
# both statements return True which ends the game loop
if len(unique_letters) == len(good_letters):
print "\n" * 15
print "You win! The word was {}".format(secret_word)
print "\n" * 15
return True
elif len(bad_letters) == guess_limit:
print "\n" * 15
print "You didn't guess it! My word was {}".format(secret_word)
print "\n" * 15
return True
else:
return False
| [
"sotocyn@gmail.com"
] | sotocyn@gmail.com |
9aef497bd51f3ca575e9f0400deb92ea47e6ae5f | df3c449a70bc985246e9d6263cebcd685781d05b | /Blink_Detection/blinky.py | e2c5901a077b74cdde7fb7c99f79cba749c0cf39 | [
"MIT"
] | permissive | vaaiibhav/Blink-Detection-using-Webcam-in-Python--and-OPenCV | d800a16125e94ed8816b07f0b8c034a9f0286e71 | d0ab756f6a5088223f18b96f962b711daa08db65 | refs/heads/master | 2022-04-23T22:49:07.915855 | 2020-04-28T14:48:54 | 2020-04-28T14:48:54 | 259,666,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #!/usr/bin/env python
"""blinky.py:
Starting point of blinky
"""
import extract
import webcam
import pylab
import numpy as np
def main(args):
# Extract video first
data = webcam.video2csv(args)
edgyBlinks = extract.find_blinks_using_edge(data)
outfile = "%s_blinks_using_edges.csv" % args['video_file']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
, header = "time,blinks")
pixalBlinks = extract.find_blinks_using_pixals(data)
outfile = "%s_blinks_using_pixals.csv" % args['video_file']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
, header = "time,blinks")
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''description'''
parser = argparse.ArgumentParser(description=description)
class Args: pass
args = Args()
parser.add_argument('--video-file', '-f'
, required = True
, help = 'Path of the video file'
)
parser.add_argument('--bbox', '-b'
, required = False
, nargs = '+'
, type = int
, help = 'Bounding box : topx topy width height'
)
parser.parse_args(namespace=args)
main(vars(args))
| [
"vaaiibhav@live.com"
] | vaaiibhav@live.com |
da03b2d8448602dd00e2ae9394f8c2537ee7e71f | b4be1cf1ac616abdf12297b3a69226d98ae8c70b | /src/coboljsonifier/fields/field_alphanumeric_ebcdic.py | 32d18049debfef65a752fc01c42d9a4f6a62b9bf | [] | no_license | jrperin/cobol-copybook.jsonifier | 08dc7cd6d6cb95bcb7e0b59b9db949e490b179a6 | 7aefd72956e3c26456327dcd222723a04a115bf3 | refs/heads/master | 2023-09-01T00:05:08.776147 | 2023-08-22T01:28:11 | 2023-08-22T01:28:11 | 405,270,494 | 13 | 7 | null | 2023-08-22T01:23:17 | 2021-09-11T03:11:16 | Python | UTF-8 | Python | false | false | 482 | py | from .field import Field
class FieldAlphanumericEbcdic(Field):
"""
Composite Pattern - Leaf
"""
def __init__(self, type: str, name: str, size: int):
super(FieldAlphanumericEbcdic, self).__init__(type, name, size, 0)
def parse(self, data_in):
if not data_in:
return
self._value = data_in[:self._size].decode('cp500').strip()
if not self._value:
self._value = None
return data_in[self._size:]
| [
"jrperin@gmail.com"
] | jrperin@gmail.com |
fe7bb503a0a28eaefde625c1ebbd8c2563154270 | 6d9d83eee49c5729afb3c6b4c3f1013d2e65107b | /05_boarding_pass/solution.py | e5154ae8202d5cae334fa061d10e8b62b695af05 | [] | no_license | jpclark6/advent_of_code_2020 | 7aa706849d159ff9cd9117556588d877a9c98c80 | b7255da50c1d555aa671f87b0d44cdb39777c117 | refs/heads/main | 2023-02-18T14:33:31.408534 | 2021-01-14T22:43:36 | 2021-01-14T22:43:36 | 317,354,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | """
Advent of code
Day 5
"""
def parse_row(boarding_pass):
possible_seats = list(range(128))
for rule in boarding_pass[:7]:
if rule == 'F':
possible_seats = possible_seats[:round(len(possible_seats) / 2)]
else:
possible_seats = possible_seats[round(len(possible_seats) / 2):]
return possible_seats[0]
def parse_column(boarding_pass):
possible_seats = list(range(8))
for rule in boarding_pass[7:]:
if rule == 'L':
possible_seats = possible_seats[:round(len(possible_seats) / 2)]
else:
possible_seats = possible_seats[round(len(possible_seats) / 2):]
return possible_seats[0]
def find_seat_id(boarding_pass):
row = parse_row(boarding_pass)
column = parse_column(boarding_pass)
return row * 8 + column
def part_1(passes):
highest = 0
for boarding_pass in passes:
seat_id = find_seat_id(boarding_pass)
if seat_id > highest:
highest = seat_id
print('Part 1:', highest)
def part_2(passes):
all_ids = [find_seat_id(boarding_pass) for boarding_pass in passes]
all_ids.sort()
for i in range(len(all_ids)):
if all_ids[i] + 1 != all_ids[i + 1]:
print('part 2:', all_ids[i] + 1)
return
if __name__ == "__main__":
filename = './input.txt'
text = open(filename)
lines = text.read().split('\n')
part_1(lines)
part_2(lines) | [
"jpclark6@gmail.com"
] | jpclark6@gmail.com |
8bc1f3af1ca811d884a225dbd76851c0ad13c46a | 1da15a0ec8eb771d4584b3997d44d2af23d53484 | /D3/1220.Magnetic.py | 2da7b7c711faaafaf1586b556cbc79aeea42fe62 | [] | no_license | cdh3261/Algorithm_Problems | 1e9ad0310490ffe5396f8cef3205885d62ebefb7 | d9ad791e9a0bcdd1c13b8e18fa993b784a53b064 | refs/heads/master | 2020-08-29T07:27:04.331917 | 2020-03-06T11:33:57 | 2020-03-06T11:33:57 | 217,966,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | ####### N극 #######
####### S극 #######
for t in range(1, 11):
n = int(input())
arr = [list(map(int, input().split())) for i in range(n)]
col = []
for i in range(n):
a = []
for j in range(n):
if arr[j][i] != 0:
a.append(arr[j][i])
col.append(a)
cnt = 0
for i in range(n):
for j in range(len(col[i])):
if j != 0 and col[i][j] == 2 and col[i][j - 1] != 2:
cnt += 1
print(f'#{t} {cnt}') | [
"cdh3261@naver.com"
] | cdh3261@naver.com |
488a72882522860ee0646184122fed08d6f759bc | 5491c6305d3ce2000ad5a509b845c4e9846f6140 | /lle_isomap_plot.py | 9a81c278df20a4b52388c22ec32106fa2873743c | [] | no_license | Eason-Sun/Effects-Of-Different-Dimensionality-Reductions-On-Image-Data | 468bcaee38a8fbef53503959ebe28451ccf4a6c3 | 9697462eca9834b7f1ec01d48e30692866305854 | refs/heads/master | 2020-07-09T06:37:10.524241 | 2019-08-23T02:58:34 | 2019-08-23T02:58:34 | 203,907,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import manifold
df = pd.read_csv('DataB.csv', index_col=0)
class_matrix = df['gnd'].values
# Slice the data matrix so that only samples in class '3' are taken.
digit_3_df = df.loc[df['gnd'] == 3].copy()
digit_3_df.drop(columns=['gnd'], inplace=True)
digit_3_matrix = digit_3_df.values
df.drop(columns=['gnd'], inplace=True)
data_matrix = df.astype(float).values
n_neighbors = 5 # Set the number of nearest neighbour to 5.
# Plot the image based on the first and second components of LLE or ISOMAP.
def plot(X, title=None, min_dist=4e-3):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min) # Min-Max Normalization.
plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(X.shape[0]):
# Discard those samples that appear too near in the figure.
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < min_dist:
continue
shown_images = np.r_[shown_images, [X[i]]]
# Map each image to their corresponding coordinates provided by the first 2 components of the projected matrix.
imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(digit_3_matrix[i].reshape((28, 28)), cmap=plt.cm.gray_r), X[i],
frameon=False)
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
# Apply LLE to the dataset in class '3'.
lle = manifold.LocallyLinearEmbedding(n_neighbors, n_components=4, method='standard')
lle.fit(digit_3_matrix)
X_lle = lle.transform(digit_3_matrix)
plot(X_lle[:, 0:2], "LLE Projection", 3e-3)
# Apply ISOMAP to the dataset in class '3'.
iso = manifold.Isomap(n_neighbors, n_components=4)
iso.fit(digit_3_matrix)
X_iso = iso.transform(digit_3_matrix)
plot(X_iso[:, 0:2], "ISOMAP Projection")
plt.show()
| [
"easonsyx@gmail.com"
] | easonsyx@gmail.com |
1275d2dfcb26f9a06de524ae94252ec4c0261fcb | b6df9e7962725e6b4dda7873576a1bc23f748d93 | /.history/crawling_20210906175955.py | d7af46376b00aacfd63666971793f5d6ccf0b9dd | [
"MIT"
] | permissive | jk7g14/keywordcount | 8500288c0d248fc7b9227897170b8d50c8eaf0c6 | 89187bd865716a8cb8a1f50bc80265a2edf95cea | refs/heads/main | 2023-07-21T20:22:05.016630 | 2021-09-08T00:34:10 | 2021-09-08T00:34:10 | 403,511,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | from requests_html import HTML, HTMLSession
from time import sleep
import csv
import datetime
file = open(f'data-{datetime.datetime.now()}', 'w', newline='')
writer = csv.writer(file)
writer.writerow(["name", "date", "content"])
session = HTMLSession()
url = 'https://www.ybtour.co.kr/promotion/incRepList.yb'
data = {'bnrMstNo': 20000016490, 'pageNo': 1}
while(True):
# r = session.post(url, data=data)
# items = r.html.find('.box_prom_temp .desc_cmt')
# if len(items) == 0:
# break
# for item in items:
# name = item.find('.list_user .name', first=True).text
# date = item.find('.list_user .date', first=True).text
# content = item.find('.txt_desc', first=True).text
# writer.writerow([name, date, content])
# print(f'작성자: {name}, 작성일: {date} \n')
# print(f'---------------------------------- \n')
# print(f'내용: {content}')
# print('------------------------------------------------------')
data['pageNo'] += 1
print(data['pageNo'])
sleep(1)
file.close()
print('---------------------------완료')
print('---------------------------완료')
print('---------------------------완료')
print('---------------------------완료')
| [
"llyppp@revhat.com"
] | llyppp@revhat.com |
4d0eb742be6248d77f5c482bc1869c2dcbcc2143 | bb1a8acd8b17b687e6ab5e25628ef11c885b9735 | /wc_script/mots_dans_codes_fiscaux.py | c4e49aab1c8ee51a7357b3643dff75b64ad04be5 | [] | no_license | adrienpacifico/french-law-word-cout | d1e4b99fcb568fa14fd29878b0204503c643680d | dfc960058beb67660a1bbef446321e33b332decc | refs/heads/master | 2021-01-10T14:55:44.653122 | 2015-12-10T17:45:54 | 2015-12-10T17:45:54 | 47,777,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # -*- coding: utf-8 -*-
import os, sys, logging, shutil, ntpath
execution_directory = os.getcwd()
#os.system("pdflatex " + "-output-directory='/Users/adrienpacifico/.tmp_latex'" + " " + '"' + tex_file + '"' )
#TODO : allows for relatives path.
cgi_path = "/Users/adrienpacifico/Informatique/loi_versionne_sous_git/textes_de_lois/codes-juridiques-francais/codes-en-vigueur/code-general-des-impots-cgi"
#os.chdir("/Users/adrienpacifico/Informatique/loi_versionne_sous_git/textes_de_lois/codes-juridiques-francais/codes-en-vigueur/code-general-des-impots-cgi")
import re
import subprocess
last_line_re = re.compile('\s*(?P<n>\d+)\s+total$')
output = subprocess.check_output(["find . -type f -name '*.md' | xargs wc -w"],cwd = cgi_path, shell = True)
last_line = output.rstrip().split("\n")[-1]
match = last_line_re.match(last_line)
words_count = int(match.group('n'))
output = subprocess.check_output(["find . -type f -name 'README.md' | xargs wc -w"],cwd = cgi_path, shell = True)
last_line = output.rstrip().split("\n")[-1]
match = last_line_re.match(last_line)
readme_words_count = int(match.group('n'))
print "program output:", words_count - readme_words_count
#print 'float_print', float(out[-15:-6])
#import ipdb
#ipdb.set_trace()
#find . -name '*' ! -name 'README.md' |xargs wc -w
| [
"adrienpacifico@gmail.com"
] | adrienpacifico@gmail.com |
52258e928c4847ff0ce541f546f10238923ecc97 | 086c483bec404a0aaafdd81dd2eb20760d8b6d90 | /prueba_hilos.py | 0e94a7bdfda78d5ca92d7386c99887699f9d4b22 | [] | no_license | MariaFernandaG/raspberry | 9faa8eaa23c9df2597f22e09d5cd825e3d2614d5 | 176ce4b26ef01007d92740ecacbf47b02844f808 | refs/heads/main | 2023-07-17T12:29:13.281874 | 2021-09-02T20:49:49 | 2021-09-02T20:49:49 | 367,141,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,790 | py | #OXIMETRO, PESA, TEMPERATURA Y ALTURA
#lIBRERIAS
import RPi.GPIO as GPIO
import time
from time import sleep
import serial
import threading
#TEMPERATURA
import board
import adafruit_mlx90614
GPIO.setwarnings(False)
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO_TRIGGER_AL = 16
GPIO_ECHO_AL = 25
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(GPIO_TRIGGER_AL, GPIO.OUT)
GPIO.setup(GPIO_ECHO_AL, GPIO.IN)
#VARIABLES ALTURA
global altura
altura = 0.0
#VARIABLES TEMPERATURA
global promedioTemperatura
promedioTemperatura = 0.0
#VARIABLES PESO
global flag_peso
#VARIABLES OXIGENO
flag_ox = 0
flag_peso = 0
#SERIAL
global ser
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
#EQUIVALENTE A MILLIS() DE ARDUINO
millis = lambda: int(round(time.time() * 1000))
def readTemperatura():
global promedioTemperatura
promedioTemperatura = 0.0
i2c = board.I2C()
mlx = adafruit_mlx90614.MLX90614(i2c)
temp = 0.0
tLow = 0.0
tHigh = 0.0
TA = 0.0
TF = 0.0
TCore = 0.0
varTemp = 0.007358834
varProcess = 1e-9
Pc = 0.0
G = 0.0
P = 1.0
Xp = 0.0
Zp = 0.0
Xe = 0.0
for i in range(10):
temp = mlx.object_temperature
while temp < 0 or temp > 45:
print("Sensor no responde")
#mlx.writeEmissivity(0.98);
time.sleep(100)
temp = mlx.object_temperature
print(temp)
#FILTRO KALMAN
Pc = P + varProcess
G = Pc / (Pc + varTemp)
P = (1 - G) * Pc
Xp = Xe
Zp = Xp
Xe = G * (temp - Zp) + Xp
time.sleep(0.01) #delay de 10ms
TA = mlx.ambient_temperature
if TA <= 25:
tLow = 32.66 + 0.186 * (TA - 25)
tHigh = 34.84 + 0.148 * (TA - 25)
if TA > 25:
tLow = 32.66 + 0.086 * (TA - 25)
tHigh = 34.84 + 0.100 * (TA - 25)
TF = Xe
if TF < tLow:
TCore = 36.3 + (0.551658273 + 0.021525068 * TA) * (TF - tLow)
if tLow < TF and TF < tHigh:
TCore = 36.3 + 0.5 / (tHigh - tLow) * (TF - tLow)
if TF > tHigh:
TCore = 36.8 + (0.829320618 + 0.002364434 * TA) * (TF - tHigh)
promedioTemperatura = TCore
def medicionTemperatura():
distance = 0.0;
distanceValid = 0
t_inicio = millis()
while distanceValid == 0:
t_final = millis()
t_total = t_final - t_inicio
getAltura(1) #utilizar el sensor 1
distance = altura
#print(t_total)
if distance > 4 and distance <= 6:
distanceValid = 1
else:
distanceValid = 0
if distanceValid == 1:
readTemperatura()
if promedioTemperatura > 34 and promedioTemperatura < 42.1:
print("Temperatura: ",promedioTemperatura)
else:
print("Temperatura fuera de los límites: ",promedioTemperatura)
#print(distance)
distanceValid = 2
if t_total > 12000:
print("No se realizó la medición")
distanceValid = 2
def readAltura(n_sensor):
#TEMPERATURA
if n_sensor == 1:
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
#distance = TimeElapsed * 0.01715
#ALTURA
if n_sensor == 2:
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER_AL, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER_AL, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO_AL) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO_AL) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
#distance = TimeElapsed * 0.01715
return distance
def getAltura(n_sensor):
global altura
varDistance = 0.0567274773869351 #variance determined using excel and reading samples of raw sensor data
varProcessD = 1e-8
temp = 0.0
prom = 0
PcD = 0.0
GD = 0.0
PD = 1.0
XpD = 0.0
ZpD = 0.0
XeD = 0.0
flag_alt = 0
while flag_alt == 0:
for i in range(25):
temp = round(readAltura(n_sensor))
prom += temp
#Filtro Kalman
PcD = PD + varProcessD
GD = PcD / (PcD + varDistance)
PD = (1 - GD) * PcD
XpD = XeD
ZpD = XpD
XeD = GD * (temp - ZpD) + XpD
time.sleep(0.01) #delay de 10ms
prom /= 25
prom += 1
altura = (XeD) + 1
#print(altura)
if altura > 250 or altura < 0:
flag_alt = 0
#print("fuera límite: ", altura)
else:
flag_alt = 1
def medicionAltura():
global altura
menu = 0
if menu == 0:
getAltura(2) #utilizar el sensor 2
refaltura = altura
print("Referencia: ", refaltura)
menu = 1
while menu == 1:
print("Desea medir altura y/n")
entrada = input()
if entrada == "y":
getAltura(2)
altura = refaltura - altura
print("Altura: ",altura)
#menu = 0
elif entrada == "n":
print("No se midió altura")
menu = 0
else:
print("Instrucción no válida")
def lectura_calibracion():
global flag_peso
global ser
while True:
if flag_peso == 1:
enviar = input()
if enviar == "+":
ser.write(b"+\n")
#print("mas")
elif enviar == "-":
ser.write(b"-\n")
#print("menos")
elif enviar == "salir":
ser.write(b"salir\n")
flag_peso = 0
#print("salir")
else:
print("comando inválido")
if __name__ == '__main__':
try:
ser.flush()
#e = threading.Event()
#CREACIÓN DE HILO
#hilo1 = threading.Thread(target=lectura_calibracion, args=(e,))
hilo1 = threading.Thread(target=lectura_calibracion)
hilo1.start()
while True:
print("Insertar opción: oxigeno, temperatura, altura o peso")
medicion = input()
#print(medicion)
if medicion == "temperatura":
print("Medir temperatura")
medicionTemperatura()
elif medicion == "altura":
print("Medir altura")
medicionAltura()
elif medicion == "oxigeno":
print("Medir oxigeno")
flag_ox = 1
ser.write(b"oxigeno\n")
while flag_ox == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
#time.sleep(1)
elif medicion == "peso":
ser.write(b"peso\n")
print("Opción: calibrar o medir")
medicion2 = input()
if medicion2 == "calibrar":
ser.write(b"calibrar\n")
print("Coloque una masa conocida sobre la pesa e inserte el valor de dicha masa.")
masa = input()
if masa != 0:
ser.write(masa.encode('utf-8')) #REVISAR ESTO
flag_peso = 1
while flag_peso == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
#if line == "salir":
#flag_peso = 0
#e.clear()
elif medicion2 == "medir":
ser.write(b"medir\n")
flag_peso = 1
while flag_peso == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
if line == "finp":
line = ser.readline().decode('utf-8').rstrip()
print(line)
flag_peso = 0
else:
print("Instrucción no válida")
else:
print("Instrucción no válida")
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
| [
"noreply@github.com"
] | MariaFernandaG.noreply@github.com |
3a9ee750bfdb695244f25d1a4fe174c316d25b0b | 0125394835e95f5fb4b47ec2a1bc7d5a555b81c4 | /card_layout test.py | db268c3417a9c214541f02a291029381766e7690 | [] | no_license | jayesh59/05-Blackjack | a7c91214bcba446eb2e48502ba5e55ff90ecd344 | ee75275fcc5fcc04b9bca18e40585106ee824ecb | refs/heads/master | 2023-04-13T22:43:38.336361 | 2021-04-27T03:02:45 | 2021-04-27T03:02:45 | 257,320,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
#Initiation:
Spade = cv2.imread('Spade.jpg')
Diamond = cv2.imread('Diamond.jpg')
Club = cv2.imread('Club.jpg')
Heart = cv2.imread('Heart.jpg')
shapes = [Spade, Diamond, Club, Heart]
for s in shapes: _, s = cv2.threshold(s, 127, 255, cv2.THRESH_BINARY)
resized_shapes = []
for s in shapes:
s_copy = s.copy()
s_copy = cv2.resize(s_copy, (0,0), s_copy, 0.2, 0.2)
resized_shapes.append(s_copy)
#Base For Cards:
shape = (133, 75, 3)
img = np.ones(shape, dtype = np.uint16) * 255
img_copy = img.copy
c = 0
cards = []
for s in resized_shapes:
img = np.ones(shape, dtype = np.uint16) * 255
img_copy = img.copy()
y = int((img.shape[0] - s.shape[0])/2)
x = int((img.shape[1] - s.shape[1])/2)
w,h = (img.shape[0]-3, img.shape[1]-24)
img_copy = cv2.putText(img_copy, '10', (0, 15), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 2)
img_copy[y:y+s.shape[0], x:x+s.shape[1]] = s
cards.append(img_copy)
for i in cards:
plt.imshow(i)
plt.show()
plt.imshow(cards[0])
plt.show()
| [
"51175318+jayesh59@users.noreply.github.com"
] | 51175318+jayesh59@users.noreply.github.com |
f150726c58233152b08d58900602edbd74120923 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/util/updates/info_file.py | 8b990c36ca53d32b4dff8ed8376bbc10a937fb35 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 6,573 | py |
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import md5
import os
import re
import warnings
from xml.etree import ElementTree as ET
#========================================================================
# Regex helpers for parsing file names and validating checksums.
#========================================================================
_version_in_name = re.compile("(\S*)[-](\d+\.\d+\.*\d*)\S*")
def _get_filename(filename):
match = _version_in_name.search(filename)
if match is None:
raise ValueError, "Could not find name in filename: " + filename
return match.group(1)
def _get_version(filename):
match = _version_in_name.search(filename)
if match is None:
raise ValueError, "Could not find version in filename: " + filename
return match.group(2)
def _get_checksum(filename):
base, ext = os.path.splitext(filename)
data = open(base).read()
return md5.new(data).hexdigest()
filename_re = re.compile('filename: \s*(.*)\n')
version_re = re.compile('version: \s*(.*)\n')
checksum_re = re.compile('checksum: \s*(.*)\n')
desc_re = re.compile('\ndescription:\n')
codedict = {'filename':{'re':filename_re,
'get':_get_filename},
'version': {'re':version_re,
'get':_get_version},
'checksum': {'re':checksum_re,
'get':_get_checksum}
}
class InfoFile:
"""Representation of an .info file, which provides metadata of another
file (its "target").
Important methods:
@classmethod
from_info_file(filename)
construct an InfoFile object from a filename --- simple parser
name: %filename% (if not present extracted from .info filename)
version: %filename% (if not present it is extracted from name of file)
checksum: md5hash (if not present it is computed from the basefile)
html: (everything else in the file from the next line to the end)
get_xml()
return a list of xml elements for this file
"""
# The filename of the update_file. This is not the full path -
# see **location_root** below.
filename = ""
# The version of the target file
version = None
# Checksum of the target file
checksum = None
# A multi-line HTML document describing the changes between
# this version and the previous version
description = ""
# The reported location of where self.filename can be found. This gets
# prepended to self.filename to form the full path. Typically this will be
# an HTTP URL, but this can be a URI for a local or LAN directory.
# This field usually gets set by an external tool, and is not present
# in the .info format.
location = "./"
# A function that takes a string (self.version) and returns something
# that can be used to compare against the version-parsed version of
# another VersionInfo object.
version_parser = None
#========================================================================
# Constructors
#========================================================================
@classmethod
def from_info_file(cls, filename):
""" Construct an InfoFile instance from a .info file on disk.
"""
str = open(filename).read()
obj = cls()
for attr in ['filename', 'version', 'checksum']:
funcdict = codedict[attr]
match = funcdict['re'].search(str)
if match is None:
value = funcdict['get'](filename)
else:
value = match.group(1)
setattr(obj, attr, value)
match = desc_re.search(str)
if match is None:
warnings.warn("Info file " + filename + " lacks a description: field")
else:
beg, end = match.span()
start = str.find('\n', end)
obj.description = str[start:]
return obj
@classmethod
def from_target_file(cls, filename):
""" Construct an InfoFile given the filename of the target file.
"""
obj = cls(filename=filename)
# Try to glean a version number from the file name
try:
version = _get_version(filename)
obj.version = version
except ValueError:
pass
return obj
@classmethod
def from_xml(cls, bytes):
""" Returns a new InfoFile instance from a multi-line string of
XML data
"""
raise NotImplementedError
def __init__(self, **kwargs):
# Do a strict Traits-like construction
for attr in ("filename", "version", "checksum", "description",
"location", "version_parser"):
if attr in kwargs:
setattr(self, attr, kwargs[attr])
return
#========================================================================
# Public methods
#========================================================================
def to_xml(self):
root = ET.Element("file")
for attrname in ("version", "filename", "location", "checksum", "description"):
node = ET.SubElement(root, attrname)
node.text = getattr(self, attrname)
return root
def to_xml_str(self):
""" Returns a multi-line string of XML representing the information in
this object.
"""
return ET.tostring(self.to_xml())
def to_info_str(self):
""" Returns a multi-line string in the .info file format
"""
lines = []
for attr in ["filename", "version", "checksum"]:
lines.append(attr + ": " + getattr(self, attr))
return "\n".join(lines) + "\ndescription:\n" + self.description + "\n"
def __cmp__(self, other):
""" Allows for comparing two VersionInfo objects so they can
be presented in version-sorted order. This is where we parse
and interpretation of the **version** string attribute.
"""
# TODO: Do something more intelligent here, if version parsers are missing
if self.version_parser is not None:
self_ver = self.version_parser(self.version)
else:
self_ver = self.version
if other.version_parser is not None:
other_ver = other.version_parser(other.version)
else:
other_ver = other.version
if self_ver < other_ver:
return -1
elif self.ver == other_ver:
return 0
else:
return 1
| [
"robert.kern@gmail.com"
] | robert.kern@gmail.com |
fd91de5e21ca19d5f64f3219747ad6809612c353 | 10c40d2a919a69f4a680efffd17e326e25e9e73d | /sigopt/magics.py | d65810ab869a51a5098b128e4a519ada018c39f7 | [
"MIT"
] | permissive | XiaozhiShenNovelis/sigopt-python | 34b8930397bd364376a0daef6baaac7f3d34e8fc | 3ba596cfae37a8f987c61f87055b8081a12aeaf0 | refs/heads/master | 2023-08-23T00:59:19.555102 | 2021-10-19T19:25:57 | 2021-10-19T19:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,430 | py | import http
import io
import sys
import yaml
import IPython
from IPython.core.magic import (
Magics,
cell_magic,
magics_class,
)
from .config import config
from .interface import get_connection
from .log_capture import NullStreamMonitor, SystemOutputStreamMonitor
from .run_context import global_run_context
from .factory import SigOptFactory
from .defaults import get_default_project
from .validate import validate_experiment_input, ValidationError
from .logging import print_logger
from .exception import ApiException
def get_ns():
# NOTE(taylor): inspired by https://github.com/ipython/ipython/blob/master/IPython/core/interactiveshell.py
# Walk up the stack trace until we find the 'exit' command
stack_depth = 1
while True:
frame = sys._getframe(stack_depth)
f_locals = frame.f_locals
try:
if isinstance(f_locals['exit'], IPython.core.autocall.ExitAutocall):
return f_locals
except KeyError:
pass
stack_depth += 1
@magics_class
class SigOptMagics(Magics):
def __init__(self, shell):
super().__init__(shell)
self._connection = get_connection()
self._experiment = None
self._factory = SigOptFactory(get_default_project())
def setup(self):
config.set_user_agent_info([
'Notebook',
'/'.join(['IPython', IPython.__version__]),
])
@cell_magic
def experiment(self, _, cell):
ns = get_ns()
# pylint: disable=eval-used
cell_value = eval(cell, ns)
# pylint: enable=eval-used
if isinstance(cell_value, dict):
experiment_body = dict(cell_value)
else:
experiment_body = yaml.safe_load(io.StringIO(cell_value))
self.setup()
try:
validated = validate_experiment_input(experiment_body)
except ValidationError as validation_error:
print_logger.error("ValidationError: %s", str(validation_error))
return
try:
self._experiment = self._factory.create_prevalidated_experiment(validated)
except ApiException as api_exception:
if api_exception.status_code == http.HTTPStatus.BAD_REQUEST:
print_logger.error("ApiException: %s", str(api_exception))
def exec_cell(self, run_context, cell, ns):
global_run_context.set_run_context(run_context)
try:
if config.cell_tracking_enabled:
run_context.log_source_code(content=cell)
stream_monitor = SystemOutputStreamMonitor() if config.log_collection_enabled else NullStreamMonitor()
with stream_monitor:
# pylint: disable=exec-used
exec(cell, ns)
# pylint: enable=exec-used
stream_data = stream_monitor.get_stream_data()
if stream_data:
stdout, stderr = stream_data
run_context.set_logs({'stdout': stdout, 'stderr': stderr})
finally:
global_run_context.clear_run_context()
@cell_magic
def run(self, line, cell):
ns = get_ns()
name = None
if line:
name = line
self.setup()
run_context = self._factory.create_run(name=name)
with run_context:
self.exec_cell(run_context, cell, ns)
@cell_magic
def optimize(self, line, cell):
ns = get_ns()
if self._experiment is None:
raise Exception('Please create an experiment first with the %%experiment magic command')
name = None
if line:
name = line
self.setup()
for run_context in self._experiment.loop(name=name):
with run_context:
self.exec_cell(run_context, cell, ns)
| [
"noreply@github.com"
] | XiaozhiShenNovelis.noreply@github.com |
9bde88a2b2416002fb786ff4cd29779e70a3c2e9 | b5be3680c2b3404ec9c6156466b7c276bff0cf01 | /salon/admin.py | 7596574aa4e46d3d86c1088f779911155c0efef1 | [] | no_license | aselya0012777/beauty-salon | e2cf861055fd10d6a6f29fcc8306c4be0199a117 | 86abc8c3c90b494bdd16275051cdab8b4b2493fb | refs/heads/master | 2023-07-13T22:09:16.991190 | 2021-09-04T11:37:11 | 2021-09-04T11:37:11 | 370,702,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | from django.contrib import admin
from .models import EmployeeSchedule, Salon, SalonServices,Employee,EmployeeSchedule
@admin.register(Salon)
class SalonAdmin(admin.ModelAdmin):
list_display = ['name', 'address','number','rating']
list_filter = ('name','address', 'rating')
@admin.register(SalonServices)
class SalonServicesAdmin(admin.ModelAdmin):
list_display = ['salon','name','price','duration']
list_filter = ('salon', 'name','price','duration')
@admin.register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['name']
list_filter = ('name', 'service')
@admin.register(EmployeeSchedule)
class EmployeeScheduleAdmin(admin.ModelAdmin):
list_display = ['employee','date','start_hour','end_hour']
list_filter = ('employee','date','start_hour','end_hour')
| [
"aselya.0012@yahoo.com"
] | aselya.0012@yahoo.com |
6debc3f00966a216dd43e092a1d80453f069d876 | cfe737ca092cf949f99af67743e2a935f53baecf | /Natural-Language-Processing_prepprocessing_and_feature_extraction/TFIDF.py | 5ef16440787fbe0ebb5ab28f3b7296ae59f086b4 | [] | no_license | Sakil786/Natural_Language_processing | 409147d9c0fac900265c92724161c53d7a574372 | 0dd11c3c848b92837a73f5bc84e239cf3e36f27f | refs/heads/master | 2022-06-26T22:47:59.125026 | 2020-05-10T14:08:11 | 2020-05-10T14:08:11 | 255,063,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,020 | py |
import nltk
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. Vikram Sarabhai of the Dept. of
space, Professor Satish Dhawan, who succeeded him and Dr. Brahm Prakash, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# Cleaning the texts
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
wordnet=WordNetLemmatizer()
sentences = nltk.sent_tokenize(paragraph)
corpus = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [wordnet.lemmatize(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the TF-IDF model
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer()
X = cv.fit_transform(corpus).toarray()
| [
"sakilansari4@gmail.com"
] | sakilansari4@gmail.com |
b012696da3f84d3dfe7b35af80765087353bc29f | a8aa0624c8367d77079366a16671793978f47716 | /traine_genetic.py | 3542b9d4d11f49c39ac4edfad0392a187d79f0af | [] | no_license | Arty-Facts/battleships | 35a976dddf2224999ea9d99663a24479468ccaee | 5c7ede1d5dd49cd5925d4e3715728222411babfa | refs/heads/master | 2022-11-22T06:01:26.600460 | 2020-03-02T07:10:43 | 2020-03-02T07:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | from ML.neural_tagger_trainer import train_neural
from ML.nn_agent import NN_Agent
from ML.training_agent import Train
from ML.evaluvate import bench
from lib.world import World
from lib.state import State
from lib.ship import Ship
from config import *
from time import time
from random import shuffle
from pathlib import Path
import torch
PATH
def train(model):
start = time()
name_gen = Path(model).name.split(".")
gen = 1
if len(name_gen) == 1:
name = name_gen[0]
else:
name, gen = name_gen
print("Started Taining")
print("Models start at:", name, "Genaration", gen)
network , optimizer = train_neural(Train ,State , World, Ship, n=TRAINING_ROUNDS, model=model)
print("Training Done in {:.2f} s".format((time() - start)))
score = bench(network, BENCHMARK)
print("Resulting score:", score)
torch.save({
'model_state_dict': network.model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, f"{PATH}/{int(score)}.{int(gen)+1}")
def main():
while True:
path = Path(PATH)
models = [str(x.name).split('.') + [x] for x in path.iterdir() if x.is_file()]
models = sorted(models)
for m in models:
print(m)
if len(models) == 0:
train("")
else:
for s,g, m in models:
train(m)
models = sorted([str(x.name).split('.') + [x] for x in path.iterdir() if x.is_file()])
removed = 0
if len(models) > GENARATIONS*1.5:
for i in range(1, len(models)):
if models[i-1][0] == models[i][0]:
models[i-1][2].unlink()
removed += 1
if not (len(models) - removed > GENARATIONS*1.5):
break
if len(models) > GENARATIONS:
for s, g , m in models[GENARATIONS+1:]:
m.unlink()
if __name__ == "__main__":
import sys
main() | [
"artal938@student.liu.se"
] | artal938@student.liu.se |
0cf7cd7851932bf83ba266a23941fc3516d97cab | 722905ff407bbabcfe9c3a0b1f848ef3bb7a9571 | /randomforest1.py | 8bf8cf1c364c09a6cd292713d3bb6697559f92be | [] | no_license | Anupwilson/datascience-python_code | 469d4178ab3b272e84d67b953b632c36438ab0c2 | 5e067248866c241dc1ece30044540e8a969196e0 | refs/heads/main | 2023-04-01T17:06:21.993684 | 2021-04-23T16:10:07 | 2021-04-23T16:10:07 | 309,037,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import pandas as pd
import streamlit as st
from sklearn.ensemble import RandomForestClassifier
st.title('Model Deployment: random forest classifier')
st.sidebar.header('User Input Parameters')
def user_input_features():
CALLERID = st.sidebar.number_input("Insert the callerid")
OPENBY = st.sidebar.number_input("Insert the open by")
LOC = st.sidebar.number_input("Insert the loc")
CATEGORY = st.sidebar.number_input("Insert the category")
data = {'CALLERID': CALLERID,
'OPENBY': OPENBY,
'LOC': LOC,
'CATEGORY': CATEGORY}
features = pd.DataFrame(data, index=[0])
return features
df = user_input_features()
st.subheader('User Input parameters')
st.write(df)
incident = pd.read_csv("final_data.csv")
incident = incident.dropna()
X = incident.loc[:, ['caller_id', 'open_by', 'loc', 'category']]
Y = incident.loc[:, 'i_impact']
clf = RandomForestClassifier()
clf.fit(X, Y)
prediction = clf.predict(df)
prediction_proba = clf.predict_proba(df)
st.subheader('Predicted Result')
st.write('high impact' if prediction_proba[0][1] > 0.5 else 'not high impact')
st.subheader('Prediction Probability')
st.write(prediction_proba) | [
"noreply@github.com"
] | Anupwilson.noreply@github.com |
68630c5fbfab57458f2f69b296ec886ae0ac252a | 869bbe81819d15a5a1bfdf591e909d15d41eba33 | /src/nlp_utils/simple_tokenizer.py | c357607198bd69cc76fed14c4ef35a3a5f1949c0 | [] | no_license | LindaMoreau08/nlp_utils | c6b5899b46760c1306c8bf01028d46444fb81d0e | b32582343b764a94af6630ecde196e5e27773c2c | refs/heads/main | 2023-07-02T06:08:42.412320 | 2021-08-04T08:09:42 | 2021-08-04T08:09:42 | 392,092,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,960 | py | # -*- coding: utf-8 -*-
# TODO: turn this into a class and implement it correctly!
import numpy
import regex
import stopwords
from w3lib.html import replace_entities
lang_stopwords = stopwords.get_stopwords('en')
NORM_prefix = 'norm_'
# TOKEN TYPES (TT_)l
TT_email = 'email'
TT_emoji = 'emoji'
TT_hashtag = 'hashtag'
TT_normtag = 'norm_tag'
TT_number = 'number'
TT_punct = 'punct'
TT_quotation = 'quotation'
TT_space = 'space'
TT_symbol = 'symbol'
TT_url = 'url'
TT_username = 'username'
TT_unk = 'unk'
TT_word = 'word'
# TOKEN SUBTYPES
TS_emoticon = 'emoticon'
TS_emoji = 'emoji'
TS_flag = 'flag'
TS_ordinal = 'ordinal'
TS_cardinal = 'cardinal'
TS_money = 'monetary'
TS_acronym = 'acronym'
TS_abbreviation = 'abbreviation'
# single codepoint flags
Base_flags = ['⚐', '⚑', '⛳', '⛿', '🎌', '🏁', '🏱', '🏳', '🏴', '🚩']
Space_reguex = r'[\p{Z}\p{C}]+'
# TODO: add from unicode chart
Punct_pairs = {")": "(", "]": "[", "}": "{", "»": "«", ">": "<", "´": "`", "/": "\\"}
Char_subs = {"0": 'o', "1": 'i', "3": 'e', "4": 'a', "5": "s", "7": 't', "8": 'a', "9": 'g', "@": 'a', "$": 's', "!": 'i'}
emoji = r'[\p{Emoji=Yes}&&[^\u00A9\u00Ae\u0030-\u0039\u203c\u2049\u2122]]'
emoticon_1 = r'(?:[:;][*\-]?[Ddpb\)\(\}\{o\]])|(?:[Ddpb\)\(\}\{o\]][*\-]?[:;])'
emoticon_2 = r'[03578BbPpOODdXx×VvÞþ][\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]+[03578BbPpOODdXx×VvÞþ]?0?3?'
emoticon_3 = r'[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]+[\p{L}\p{N}]{1,2}[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]*[oO0.]?3?[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]*'
patterns = {
'html_codes': r'((?:&(?:#?[0-9a-f]+|[a-z]+);)',
'twitter_usernames': r'(?:(?<![\p{L}\p{N}])@+[\w_]+)',
'hashtags': r'(?:\#+[\w_]+[\w\'_\-]*[\w_]+)',
'norm_tags': r'(?:norm__?[_A-Z]+)',
'tags': r'(?:<\w+\/>)',
'email': r'(?:\b[\p{L}\p{N}][\p{L}\p{N}._%+\!-]+@(?:[A-Za-z0-9.\-]+\.[A-Za-z]{2,4}|\[?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\]?))',
'url': r'(?:(?:http|ftp|file|mailto|data|irc)s?:\/\/\S+)|(?:www2?\.\S+)|(?:\S+\.(?:com|edu|gov|org|info|biz|mil|net|name|museum|[a-z]{2}\b)(?:\/\S+)?)',
'acronyms': r'(?:\b\p{L}{1,3}\.(?:\p{L}{1,3}\.)+(?:\p{L}\b)?)|(?:\b[\p{L}&&[^aeiouAEIOU]]{1,3}\.)|(?:\b[aeiouAEIOU][\p{L}&&[^aeiouAEIOU]]{1,3}\.)|(?:\b\p{L}\.)',
'slash_abbreviation': r'(?:\p{L}\/\p{L}(?:\/\p{L})*)',
'emoticon_1': r'(?:(?<![\p{L}\p{N}])[03578BbPpOODdXx×VvÞþ][\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]+[03578BbPpOODdXx×VvÞþ]?0?3?(?![\p{L}\p{N}\p{P}\p{S}]))',
'emoticon_2': r'(?:(?<![\p{L}\p{N}])[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]+[\p{L}\p{N}]{1,2}[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]*[oO0.]?3?[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]*(?![\p{L}\p{N}\p{P}]))',
'ordinals': r'(?:\b[0-9]*(?:1st|2nd|3rd|11th|12th|13th|[4-9]th)\b)',
'masked_words_numalpha': r'(?:\b\p{N}{1,2}\p{L}[\p{L}\p{N}\p{M}@$#*+✞]*)',
'digits': r'(?:[.+\-]?\p{N}+(?:[.,\-:\/]*\p{N}+)*)',
'contractions': r'(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))',
'taboo_punct': r'(?:[$*@][\p{L}\p{M}\p{N}@$#*+✞]+(?:[\-\'’,.][\p{L}\p{M}\p{N}]+)*)',
'words': r'(?:\p{L}[\p{L}\p{M}\p{N}@$#*+✞]*(?:[\-\'’][\p{L}\p{M}\p{N}@$#*+✞]+)*)',
'punct_repeat': r'(?:(?:\.\.+|--+|__+|~~+|[!?][!?]+|\*\*+|//+|##+)[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}°\p{InBOX_DRAWING}\p{InGEOMETRIC_SHAPES}]*)',
'punct': r'(?:[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}°\p{InBOX_DRAWING}\p{InGEOMETRIC_SHAPES}]+)',
'key_caps': r'(?:[#\*0-9]\ufe0f?\u20e3)',
'flags': r'(?:[\U0001F1E6-\U0001F1FF]{2})|(?:[⚐⚑⛳⛿🎌🏁🏱🏳🏴🚩🏴](?:[\u200d\uFE0F]{1,2}\p{So}\uFE0F?)?)',
'emojis': r'(?:'+emoji+r'[\U0001f3fb-\U0001f3ff]?\uFE0F?(?:\u200D'+emoji+r'[\U0001f3fb-\U0001f3ff]?\uFE0F?)*)',
'symbols': r'(?:\p{S}+)',
'space': r'(?:[\p{Z}\p{C}]))'
}
def join_patterns(ignore_emoticons=False, ignore_taboo=False, split_punct=True, split_repeated_punct=False):
token_reguex = ''
patterns_added = 0
for pattern_name, pattern in patterns.items():
pattern_name = pattern_name
if split_repeated_punct and pattern_name == 'punct_repeat':
continue
if split_punct and pattern_name == 'punct':
pattern = r'(?:\p{P})'
if ignore_emoticons and pattern_name.find('emoticon') >= 0:
continue
if ignore_taboo and pattern_name.find('taboo') >= 0:
continue
if patterns_added > 0:
token_reguex += '|'
token_reguex += pattern
patterns_added += 1
return regex.compile(token_reguex, regex.V1)
big_regex = join_patterns()
def tokenize(text):
return regex.findall(big_regex, text)
# Assemble token patterns into various regexes for use by the token tests.
NORM_TAG_REGEX = regex.compile(patterns['norm_tags'], regex.V1)
IS_NORM_TAG_REGEX = regex.compile('^norm_(?<norm_opt>[a-zA-Z_]+)$', regex.V1)
TOKEN_REGEX = big_regex
EMAIL_REGEX = regex.compile(patterns['email'], regex.V1) # contains email
IS_EMAIL_REGEX = regex.compile('^' + patterns['email'] + '$', regex.V1)
URL_REGEX = regex.compile(patterns['url'], regex.IGNORECASE|regex.V1) # contains url
IS_URL_REGEX = regex.compile('^' + patterns['url'] + '$', regex.IGNORECASE|regex.V1)
IS_DIGITS_REGEX = regex.compile("^" + patterns['digits'] + "$", regex.V1)
IS_MONEY_REGEX = regex.compile(r'^\p{Sc}' + patterns['digits'] + "$", regex.V1)
DIGITS_REGEX = IS_DIGITS_REGEX # TODO: for Salton, update when Salton changes have been pushed
IS_ORDINAL_REGEX = regex.compile("^" + patterns['ordinals'] + "$", regex.V1)
IS_SPACE_REGEX = regex.compile('^' + Space_reguex + '$', regex.V1)
SPACE_REGEX_SPLIT = regex.compile(Space_reguex, regex.V1)
SPACE_REGEX_KEEP = regex.compile('(' + Space_reguex + ')', regex.V1)
IS_PUNCT_REGEX = regex.compile(r'^\p{P}+$', regex.V1) # sequence of punctuation marks
IS_SYMBOL_REGEX = regex.compile("^" + patterns['symbols'] + "$", regex.V1)
IS_WORD_REGEX = regex.compile("^" + patterns['words'] + "$", regex.V1)
IS_ACRONYM_REGEX = regex.compile("^" + patterns['acronyms'] + "$", regex.V1)
STRIP_REGEX = r'^(?:[\s"\']|\\")+|(?:[\s"\']|\\")+$' # leading or trailing quotes (possibly escaped) and spaces
QUOTATION_REGEX = r'["“”][^"“”]+?["“”]'
IS_QUOTATION_REGEX = regex.compile(r'^["“”«][^"“”«»]+?["“”»]$', regex.V1)
USERNAME_REGEX = regex.compile(patterns['twitter_usernames'], regex.V1)
IS_USERNAME_REGEX = regex.compile('^' + patterns['twitter_usernames'] + '$', regex.V1)
HASHTAG_REGEX = regex.compile(patterns['hashtags'], regex.V1)
IS_HASHTAG_REGEX = regex.compile('^' + patterns['hashtags'] + '$', regex.V1)
EMOTICON_REGEX = regex.compile(patterns['emoticon_1'] + '|' + patterns['emoticon_2'], regex.V1)
IS_EMOTICON_REGEX = regex.compile('^((?:' + emoticon_1 + ')|(?:' + emoticon_2 + ')|(?:' + emoticon_3 + '))$', regex.V1)
EMOJI_REGEX = regex.compile(patterns['key_caps'] + '|' + patterns['flags'] + '|' + patterns['emojis'], regex.V1)
IS_EMOJI_REGEX = regex.compile(
'^' + patterns['key_caps'] + '|' + patterns['flags'] + '|' + patterns['emojis'] + '$', regex.V1)
CONTRACTION_REGEX = regex.compile(r'(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))', regex.V1)
IS_CONTRACTION_REGEX = regex.compile(r'^(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))$', regex.V1)
CONTRACTION_SPLIT_REGEX = regex.compile(
r'(?:^(?<word>\p{L}[\p{L}\p{M}]*?)(?<contraction>(?:n[\'’]t)|(?:[\'’](?:s|d|m|ld|ll|re|ve|nt)))$)', regex.V1)
FLAG_REGEX = regex.compile(patterns['flags'], regex.V1)
IS_FLAG_REGEX = regex.compile('^' + patterns['flags'] + '$', regex.V1)
KEY_CAP_REGEX = regex.compile(patterns['flags'], regex.V1)
IS_KEY_CAP_REGEX = regex.compile('^' + patterns['flags'] + '$', regex.V1)
IS_SLASH_ABBREVIATION_REGEX = regex.compile('^' + patterns['slash_abbreviation'] + '$', regex.V1)
# TODO: this is not terribly efficient to scan with so many regexes
def pre_process_text(text, options):
if options['norm_codes']:
text = replace_entities(text)
if options['norm_quotations']:
text = regex.sub(QUOTATION_REGEX, "#{NORM_QUOTATION}", text)
if options['norm_space'] and options['keep_space']:
text = regex.sub(SPACE_REGEX_SPLIT, ' ', text)
return text
def regex_tokenize(text, keep_space=False, norm_space=False):
rgx_tokenize(text, keep_space=keep_space, norm_space=norm_space)
def rgx_tokenize(text, keep_space=False, norm_space=False, pre_process=True, ignore_emoticons=False, split_punct=False,
split_repeated_punct=False):
if pre_process:
text = pre_process_text(text, {keep_space: keep_space, norm_space: norm_space})
tokenization_reguex = join_patterns(ignore_emoticons=ignore_emoticons, split_punct=split_punct,
split_repeated_punct=split_repeated_punct )
if keep_space:
tokens = text.scan(tokenization_reguex)
else:
tokens = text.split(SPACE_REGEX_SPLIT)
tokens = map(lambda x: regex.findall(tokenization_reguex, x), tokens)
tokens = numpy.array(tokens).flatten()
# tokens = retokenize(tokens, options)
return tokens
def retokenize(tokens, opts):
tokenization_reguex = join_patterns(ignore_emoticons=True, ignore_taboo=True, split_punct=opts['split_punct'],
split_repeated_punct=opts['split_repeated_punct']) #if ignore_emoticons
retokenized = []
for token in tokens:
token_type = get_token_type(token)
if is_money(token):
retokenized.append(token.scan(tokenization_reguex))
elif is_taboo(token) or token_type[0] != TT_unk:
retokenized.append(token)
else:
retokenized.append(token.scan(tokenization_reguex))
new_tokens = numpy.array(retokenized).flatten()
return new_tokens
# TODO: Port taboo list
def is_taboo(text):
if text in ['shit', 'hell']:
return True
return False
def is_stopword(text):
return text.strip().lower() in lang_stopwords
# TODO: implement this loading elsewhere
def is_lang_stopword(text, lang='en'):
if lang != 'en':
the_stopwords = stopwords.get_stopwords(lang)
return is_stopword(text)
# TODO: implement features
#def has_feature(text, feature, language):
# my_featurizer.has_feature?(text, feature, language)
def extract_emails(text):
return regex.findall(EMAIL_REGEX, text)
def contains_email(text):
return regex.search(EMAIL_REGEX, text)
def is_email(text):
return regex.match(IS_EMAIL_REGEX, text.strip()) is not None
def extract_urls(text):
return regex.findall(URL_REGEX, text)
def contains_url(text):
return regex.search(URL_REGEX, text)
def is_url(text):
return regex.match(IS_URL_REGEX, text.strip())
def extract_hashtags(text):
return regex.findall(HASHTAG_REGEX, text)
def contains_hashtag(text):
return regex.search(HASHTAG_REGEX, text)
def is_hashtag(text):
return regex.match(IS_HASHTAG_REGEX, text.strip())
def extract_usernames(text):
return regex.findall(USERNAME_REGEX, text)
def contains_username(text):
return regex.search(USERNAME_REGEX, text)
def is_username(text):
return regex.match(IS_USERNAME_REGEX, text.strip())
def extract_emojis(text):
return regex.findall(EMOJI_REGEX, text)
def contains_emoji(text):
return regex.search(EMOJI_REGEX, text)
def is_emoji(text):
return regex.match(IS_EMOJI_REGEX, text.strip())
def extract_emoticons(text):
return regex.findall(EMOTICON_REGEX, text)
def contains_emoticon(text):
return regex.search(EMOTICON_REGEX, text)
# TODO: reimplement emoticon file reading with detection of unknown emoticons, etc.
def is_emoticon(text):
return regex.match(IS_EMOTICON_REGEX, text.strip())
def is_emoji_or_emoticon(text):
return is_emoji(text) or is_emoticon(text)
def is_digits(text):
return regex.match(IS_DIGITS_REGEX, text.strip())
def is_money(text):
return regex.match(IS_MONEY_REGEX, text.strip())
def is_ordinal(text):
return regex.match(IS_ORDINAL_REGEX, text.strip())
def is_space(text):
return regex.match(IS_SPACE_REGEX, text)
def is_punct(text):
return regex.match(IS_PUNCT_REGEX, text)
def is_symbol(text):
return regex.match(IS_SYMBOL_REGEX, text)
def is_normtag(text):
return regex.match(IS_NORM_TAG_REGEX, text)
def is_word(text):
return regex.match(IS_WORD_REGEX, text)
def is_acronym(text):
return regex.match(IS_ACRONYM_REGEX, text)
def is_slash_abbreviation(text):
return regex.match(IS_SLASH_ABBREVIATION_REGEX, text)
def is_quotation(text):
text = text.strip().lower()
return text == "#{NORM_prefix}#{TT_quotation}".lower() or regex.match(IS_QUOTATION_REGEX, text)
def has_contraction(text):
return regex.search(CONTRACTION_REGEX, text)
def is_contraction(text):
return has_contraction(text)
def is_flag(text):
return text.strip() in Base_flags or regex.match(IS_FLAG_REGEX, text.strip())
def all_symbol_or_punct(text, ignore_space=True):
if ignore_space:
text = regex.sub(r'\p{Z}', '', text)
return regex.match(r'^[\p{P}\p{S}\p{M}\p{Lm}]+$', text)
def all_emoji_chars(text, ignore_space=True):
if ignore_space:
text = regex.sub(r'\p{Z}', '', text)
return regex.match(r'^[へ\p{P}\p{S}\p{M}\p{Lm}\p{In_BOX_DRAWING}\p{In_GEOMETRIC_SHAPES}]+$', text)
# TODO: consider whether \p{Lm} belongs
def symbol_mark_punct_count(text):
match_count = 0
matches = regex.findall(r'[\p{P}\p{M}\p{S}]', text)
if matches:
match_count = len(matches)
return match_count
# get the token type and subtype, if applicable
def get_token_type(token_text):
if is_space(token_text):
return [TT_space, '']
if is_quotation(token_text):
return [TT_quotation, '']
if is_normtag(token_text):
return [TT_normtag, '']
if is_email(token_text):
return [TT_email, '']
if is_url(token_text):
return [TT_url, '']
if is_acronym(token_text):
return [TT_word, TS_acronym]
if is_slash_abbreviation(token_text):
return [TT_word, TS_abbreviation]
if is_word(token_text):
return [TT_word, '']
if is_hashtag(token_text):
return [TT_hashtag, '']
if is_ordinal(token_text):
return [TT_number, TS_ordinal]
if is_digits(token_text):
return [TT_number, TS_cardinal]
if is_money(token_text):
return [TT_number, TS_money]
if is_username(token_text):
return [TT_username, '']
if is_flag(token_text):
return [TT_emoji, TS_flag]
if is_emoji(token_text):
return [TT_emoji, TS_emoji]
if is_emoticon(token_text):
return [TT_emoji, TS_emoticon]
if is_punct(token_text):
return [TT_punct, '']
if is_symbol(token_text):
return [TT_symbol, '']
return [TT_unk, '']
| [
"lindamoreau08@gmail.com"
] | lindamoreau08@gmail.com |
e1d6a76f3ee7691607e6831868d4cd7850d105a8 | 60cde9d4107b3bb5724ec36618af4b4ecd692165 | /camera.py | 6a2f93f5e942721d3bb04ce5cbb64fdbe2e7022a | [] | no_license | Yemy/security-camera-in-python | 54cc176170c20c9b67dedefe037ced7410beb0d2 | eb9b2a3b549045c5980b12a50c9b43c143659e4a | refs/heads/main | 2023-08-02T16:56:27.780886 | 2021-10-09T10:41:52 | 2021-10-09T10:41:52 | 415,277,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import cv2
capture = cv2.VideoCapture("http://192.168.1.5:8080/video")
while True:
_, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
mirror = cv2.flip(gray, -1)
cv2.imshow('Live', mirror)
if cv2.waitKey(1) == ord("q"):
break
capture.release()
cv2.destroyAllWindows()
| [
"yemybold@gmail.com"
] | yemybold@gmail.com |
d4b0ecde11724f063f3638035d6adf52935d4f31 | 211f7fab75d54945e98be68974d35ff2ab8ac94a | /client/constants.py | fd9e555539f15c3e61b257342c9a62ddd2a26e7b | [
"MIT"
] | permissive | heni/rem | de0693e7dbdf63539b564ca7923860c3b56d9fff | 7472bc3b106f512355cfc2ca646e8290d19661c6 | refs/heads/master | 2022-02-23T15:06:59.926472 | 2022-01-19T11:52:42 | 2022-01-19T11:52:42 | 9,545,619 | 15 | 6 | null | 2015-11-17T05:33:49 | 2013-04-19T13:05:55 | Python | UTF-8 | Python | false | false | 297 | py | #for Job class
#one week
NOTIFICATION_TIMEOUT = 604800
#two weeks
KILL_JOB_DEFAULT_TIMEOUT = 1209600
#Packet`s names policy
IGNORE_DUPLICATE_NAMES_POLICY = 0b001
WARN_DUPLICATE_NAMES_POLICY = 0b010
DENY_DUPLICATE_NAMES_POLICY = 0b100
DEFAULT_DUPLICATE_NAMES_POLICY = DENY_DUPLICATE_NAMES_POLICY
| [
"lexplua@41d65440-b5be-11dd-afe3-b2e846d9b4f8"
] | lexplua@41d65440-b5be-11dd-afe3-b2e846d9b4f8 |
f32dc0bf2193c95d85801ddc0ca99e3a0991e3fe | 133dbe47cf8d64d11dfaf2a1109f472c67f56136 | /tests/objects/test_time.py | 1b32001d7b6594eabc6b56e2ad8db8d78cbdecca | [
"MIT"
] | permissive | My-Novel-Management/storybuilderunite | a2bae6f3d79a8bc22d141663a2b09dde12556299 | c003d3451e237f574c54a87ea7d4fd8da8e833be | refs/heads/master | 2021-07-13T16:35:31.922363 | 2021-01-29T00:38:04 | 2021-01-29T00:38:04 | 230,050,595 | 1 | 0 | MIT | 2020-06-24T01:49:03 | 2019-12-25T06:05:42 | Python | UTF-8 | Python | false | false | 966 | py | # -*- coding: utf-8 -*-
'''
Time class test
===============
'''
import datetime
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.objects import time as tm
class TimeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(tm.__name__, 'Time class')
def test_instance(self):
data = [
# (name, hour, minute, expect, exp_h, exp_m, exp_time)
(True, 'test', 5, 20, 'test', 5, 20, datetime.time(5,20)),
]
def checker(name, hour, minute, expect, exp_h, exp_m, exp_time):
tmp = tm.Time(name, hour, minute)
self.assertIsInstance(tmp, tm.Time)
self.assertEqual(tmp.name, expect)
self.assertEqual(tmp.hour, exp_h)
self.assertEqual(tmp.minute, exp_m)
self.assertEqual(tmp.time, exp_time)
validate_with_fail(self, 'class instance', checker, data)
| [
"nagisc007@yahoo.co.jp"
] | nagisc007@yahoo.co.jp |
a0bcdf633e6f26afa8a550121748d1798216dddb | 7994559f1baad2049751ac3c9d20455115d4a48b | /mySpartaSns/urls.py | c9d7678e8b3cf24e3b52e15eb598cf729615e6b6 | [] | no_license | normaljeon/mySpartaSns | 3f76f95aced89484520a8e6ad626b52ed73bfb6e | f934fac265a413dbcf74b04abde9eacb19ceb276 | refs/heads/main | 2023-06-07T01:40:06.608213 | 2021-06-26T16:13:56 | 2021-06-26T16:13:56 | 380,543,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | """mySpartaSns URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', views.base_response, name = 'first_test'),
path('first/', views.first_view, name = 'first_view'),
path('', include('user.urls')),
path('', include('tweet.urls')),
]
| [
"inputjsh@gmail.com"
] | inputjsh@gmail.com |
e03435734d25d798b79681af85b6574d5cbf61f6 | a3ac96f0c7da0b6b03b4b52a35fbae551f488c1c | /Assignment_3_sol.py | 9f978d91192edcbd63e299340d61007e5743209e | [] | no_license | Sohaib-50/OOP-assignment-3 | 4b59c98e74c2ba892d6e746f52f736878c84bee6 | 2974a00927070d5cb328908f6cf01b50d7141a00 | refs/heads/master | 2022-11-08T05:56:38.310520 | 2020-06-30T20:02:01 | 2020-06-30T20:02:01 | 276,196,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,173 | py | #### Q1
##class Circle:
## def setRadius(self, r):
## self.radius = r
##
## def getRadius(self):
## return self.radius
##
## def setColor(self, c):
## self.color = c
##
## def getColor(self):
## return self.color
##
## def getCircumference(self):
## return 2 * (22 / 7) * self.radius
##
## def getArea(self):
## return (22 / 7) * (self.radius ** 2)
##
##a = Circle()
##a.setRadius(32)
##print(f"Radius of a: {a.getRadius()}")
##a.setColor("Blue")
##print(f"Area and circumference of a: {a.getArea()}, {a.getCircumference()}")
##print(f"Color of a: {a.getColor()}")
##a.setRadius(2)
##print(f"New radius of a: {a.getRadius()}")
#### Q2
##class BankAccount:
## current_balance = 0
##
## def withdraw(self):
## amount = float(input("Enter amount to be withdrawn: "))
## if amount <= self.current_balance:
## self.current_balance -= amount
## else:
## print("Not enough balance.")
##
## def deposit(self):
## amount = float(input("Enter amount to be deposited: "))
## self.current_balance += amount
##
## def balance(self):
## return self.current_balance
##
##Ahmed_account = BankAccount()
##Ahmed_account.deposit()
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
##Ahmed_account.current_balance += 800
##print(Ahmed_account.current_balance)
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
#### Q3
##class BankAccount:
## __current_balance = 0
##
## def withdraw(self):
## amount = float(input("Enter amount to be withdrawn: "))
## if amount <= self.__current_balance:
## self.__current_balance -= amount
## else:
## print("Not enough balance.")
##
## def deposit(self):
## amount = float(input("Enter amount to be deposited: "))
## self.__current_balance += amount
##
## def balance(self):
## return self.__current_balance
##
##Ahmed_account = BankAccount()
##Ahmed_account.deposit()
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
##Ahmed_account._BankAccount__current_balance += 800
##print(Ahmed_account._BankAccount__current_balance)
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
#### Q4
##class Worker:
## def setHoursWorked(self, h):
## self.__hoursWorked = h
##
## def changeRate(self, r):
## self.__wageRate = r
##
## def pay(self):
## return self.__hoursWorked * self.__wageRate
##
##Ahmed = Worker()
##Ahmed.setHoursWorked(2)
##Ahmed.changeRate(10)
##print(Ahmed.pay())
##Ahmed.setHoursWorked(4)
##print(Ahmed.pay())
##Ahmed.changeRate(20)
##print(Ahmed.pay())
#### Q5
##class Worker:
## def __init__(self, h=0, r=0):
## self.__hoursWorked = h
## self.__wageRate = r
##
## def setHoursWorked(self, h):
## self.__hoursWorked = h
##
## def changeRate(self, r):
## self.__wageRate = r
##
## def pay(self):
## return self.__hoursWorked * self.__wageRate
##
##Ahmed = Worker(r = 10)
##print(Ahmed.pay())
##Ahmed.setHoursWorked(3)
##print(Ahmed.pay())
##Ali = Worker(3)
##print(Ali.pay())
##Kamran = Worker(3, 5)
##print(Kamran.pay())
#### Q6
##class Vehicle:
## def __init__(self, w = 4, c = "white", m = 0):
## self.__noOfWheels = w
## self.__color = c
## self.__modelNo = m
##
## def getNumWheels(self):
## return self.__noOfWheels
##
## def getColor(self):
## return self.__color
##
## def getModelNum(self):
## return self.__modelNo
##
## def setColor(self, c):
## self.__color = c
##
##a = Vehicle(8, 'black', 2007)
##print(f"a: wheels = {a.getNumWheels()}, color = {a.getColor()}, Model Number = {a.getModelNum()}")
##a.setColor("green")
##print(f"a: wheels = {a.getNumWheels()}, color = {a.getColor()}, Model Number = {a.getModelNum()}")
##b = Vehicle(m = 2019)
##print(f"b: wheels = {b.getNumWheels()}, color = {b.getColor()}, Model Number = {b.getModelNum()}")
#### Q7
##class Engine:
## def __init__(self, e = 0, d = 1900):
## self.__engineNo = e
## self.__dateOfManufacture = d
##
## def getEngineNo(self):
## return self.__engineNo
##
## def getDateOfManufacture(self):
## return self.__dateOfManufacture
##
##a = Engine(d = 2012, e = 492)
##b = Engine(625)
##print(f"a: Engine Number = {a.getEngineNo()}, Date of manufacture = {a.getDateOfManufacture()}")
##print(f"b: Engine Number = {b.getEngineNo()}, Date of manufacture = {b.getDateOfManufacture()}")
#### Q8
##class Int:
## def __init__(self, i = 0):
## try:
## self.__i = int(i)
## except ValueError:
## print("Invalid value for integer.")
## del self
##
## def setValue(self, i=0):
## self.__init__(i)
##
## def getValue(self):
## return self.__i
##
## def displayValue(self):
## print(self.__i)
##
## def add(self, a):
## return Int(self.__i + a.getValue())
##
##x = Int()
##y = Int("32")
##z = Int(-5)
##
##x = y.add(z)
##x.displayValue()
#### Q9
##class TollBooth:
## def __init__(self):
## self.__numCars = 0
## self.__moneyCollected = 0
##
## def payingCar(self):
## self.__numCars += 1
## self.__moneyCollected += 50
##
## def nopaycar(self):
## self.__numCars += 1
##
## def display(self):
## print(f"Cars passed: {self.__numCars}, Cash collected: {self.__moneyCollected}")
##
##TollBooth_x = TollBooth()
##import keyboard
##while 1:
## inp = input("Press 1 to count paying car, 2 to count nonpaying car, 0 to exit: ")
## if inp == "1":
## TollBooth_x.payingCar()
## elif inp == "2":
## TollBooth_x.nopaycar()
## elif inp == "0":
## TollBooth_x.display()
## print()
## break
## else:
## print("Invalid input")
## print()
#### Q10
##class Time:
## def __init__(self, hours=0, minutes=0, seconds=0):
## if any((type(seconds) != int, type(minutes) != int, type(hours) != int)):
## raise ValueError("Time values can only be ints")
## else:
## self.__seconds = int(seconds) % 60
## self.__minutes = ((seconds // 60) + int(minutes)) % 60
## self.__hours = ((minutes // 60) + int(hours)) % 24
##
##
## def displayTime(self):
## print(f"{self.__hours:02}:{self.__minutes:02}:{self.__seconds:02}")
##
## def addTime(self, t):
## if not isinstance(t, Time):
## raise ValueError("Can not add non-Time type object to Time type object")
## self.__seconds += t._Time__seconds
## self.__minutes += (self._Time__seconds // 60) + t._Time__minutes
## self.__hours += (self._Time__minutes // 60) + t._Time__hours
## self.__seconds %= 60
## self.__minutes %= 60
## self.__hours %= 24
##
##t1 = Time(23, 59,25)
##t1.displayTime()
##
##t2 = Time(61,5,45)
##t2.displayTime()
##
##t1.addTime(t2)
##t1.displayTime()
## Q11
##In ocean navigation, locations are measured in degrees and minutes of latitude and longitude.
##Thus if you’re lying off the mouth of Papeete Harbor in Tahiti, your location is 149 degrees 34.8
##minutes west longitude, and 17 degrees 31.5 minutes south latitude. This is written as 149°34.8’ W,
##17°31.5’ S. There are 60 minutes in a degree (an older system also divided a minute into 60 seconds,
## but the modern approach is to use decimal minutes instead).
##Longitude is measured from 0 to 180 degrees, east or west from Greenwich, England, to the international
##dateline in the Pacific. Latitude is measured from 0 to 90 degrees, north or south from the equator to
##the poles. Write code to create a class Angle that includes three member variables: int for degrees, a
##float for minutes, and a char for the direction letter (N, S, E, or W). This class can hold either a latitude
##variable or a longitude variable. Write one method to obtain an angle value (in degrees and minutes) and a direction
##from the user, and a second to display the angle value in 179°59.9’ E format. Also write a three-argument constructor.
##Write a main program that displays an angle initialized with the constructor, and then, within a loop, allows the user
##to input any angle value, and then displays the value. You can use the hex character constant ‘\xF8’, which usually
##prints a degree (°) symbol.
#### Q12
##class Tracker:
## count = 0
## def __init__(self):
## Tracker.count += 1
## self.__serialNo = Tracker.count
##
## def tellSerialNo(self):
## print(f"I am object number {self.__serialNo}")
##
##a = Tracker()
##b = Tracker()
##c = Tracker()
##a.tellSerialNo()
##b.tellSerialNo()
##c.tellSerialNo()
## Q13
| [
"noreply@github.com"
] | Sohaib-50.noreply@github.com |
6e39adc45883cef0bb9f57391ecb69ad5f28f226 | 77ed9133c4c184a93539d0eea303f2da9e4448cf | /models/CNN/LeNet.py | 0350cd1c51aef3a7d7ecd34a5eadd4fd2e175f43 | [] | no_license | AgFeather/StudyNote | 16e1bc34196cd2bce2ef26aed7eb3010e6adba5e | 4c258e73153cf38a6392937a75fa560b2a6bcc97 | refs/heads/master | 2020-06-22T11:31:10.176663 | 2019-10-10T13:08:49 | 2019-10-10T13:08:49 | 197,707,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,594 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
# 第一层卷积层的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5
# 第二层卷积层的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5
# 全连接层的节点个数
FC_SIZE = 512
class LeNetModel():
def __init__(self, flags):
self.flags = flags
self.build_model()
self.build_optimizer()
def build_model(self):
self.input_x = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE], name='input_x')
self.target_y = tf.placeholder(tf.float32, [None, NUM_LABELS], name='target_y')
self.keep_drop = tf.placeholder(tf.float32)
image_x = tf.reshape(self.input_x, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable('weight',
[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable('bias',
[CONV1_DEEP], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(image_x, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.variable_scope('layer1-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.variable_scope('layer2-conv2'):
conv2_weights = tf.get_variable('weight',
[CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_baises = tf.get_variable('bias',
[CONV2_DEEP], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_baises))
with tf.variable_scope('layer2-pool2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
pool_shape = pool2.get_shape().as_list()
flatten_size = pool_shape[1] * pool_shape[2] * pool_shape[3]
flatting = tf.reshape(pool2, [-1, flatten_size])
with tf.variable_scope('layer3-fc1'):
fc1_weights = tf.get_variable('weight',
[flatten_size, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
fc1_biases = tf.get_variable('bias',
[FC_SIZE],
initializer=tf.constant_initializer(0.0))
fc1 = tf.nn.relu(tf.matmul(flatting, fc1_weights) + fc1_biases)
fc1 = tf.nn.dropout(fc1, self.keep_drop)
with tf.variable_scope('layer4-fc2'):
fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1))
fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
self.logits = tf.matmul(fc1, fc2_weights) + fc2_biases
def build_optimizer(self):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.logits, labels=self.target_y), name='loss')
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(self.logits, axis=1),
tf.argmax(self.target_y, axis=1)), tf.float32), name='accuracy')
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(self.loss)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
self.merge_op = tf.summary.merge_all()
def train(self, mnist):
saver = tf.train.Saver()
global_step = 0
each_step = 500
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(self.flags.tensorboard_log_path, sess.graph)
for epoch in range(self.flags.num_epochs):
for _ in range(each_step):
global_step += 1
batch_x, batch_y = mnist.train.next_batch(self.flags.batch_size)
feed_dict = {self.input_x: batch_x, self.target_y: batch_y, self.keep_drop:0.5}
accu, loss, _, summary_str = sess.run([self.accuracy, self.loss, self.optimizer, self.merge_op],
feed_dict)
writer.add_summary(summary_str, global_step)
if global_step % 50 == 0:
print('Epoch: {}; Global Step: {}; accuracy: {:.2f}%; loss: {:.4f}'.
format(epoch + 1, global_step, accu*100, loss))
saver.save(sess, self.flags.model_save_path)
print('trained model has been saved in epoch:{}'.format(epoch+1))
def eval(self, mnist):
new_saver = tf.train.Saver()
with tf.Session() as new_sess:
# 获取参数到new_sess 中
graph = tf.get_default_graph()
new_saver.restore(new_sess, self.flags.model_save_path)
input_x = graph.get_tensor_by_name('input_x:0')
output_y = graph.get_tensor_by_name('target_y:0')
accuracy = graph.get_tensor_by_name('accuracy:0')
feed = {input_x: mnist.test.images, output_y: mnist.test.labels, self.keep_drop:1.0}
accu = new_sess.run(accuracy, feed_dict=feed)
print('test accuarcy:{:.2f}%'.format(accu * 100))
if __name__ == '__main__':
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
FLAGS = tf.app.flags.FLAGS
lenet_model = LeNetModel(FLAGS)
tf.app.flags.DEFINE_string('model_save_path', 'trained_model/lenet/lenet_model.ckpt', 'model_save_path')
tf.app.flags.DEFINE_float('learning_rate', 0.002, 'learning_rate')
tf.app.flags.DEFINE_integer('batch_size', 64, 'number of batch size')
tf.app.flags.DEFINE_integer('num_epochs', 1, 'number of epoch to train')
tf.app.flags.DEFINE_string('tensorboard_log_path', 'tensorboard_log/lenet/', 'tensorboard_log_path')
lenet_model.train(mnist)
lenet_model.eval(mnist) | [
"18843740508@163.com"
] | 18843740508@163.com |
ad91cb706816e164109bb744100c0b735da2602a | b8584fd8c5d6f7c44e92aa45c82d63ec79f6ec01 | /main.py | 149d3b22d7efaaf4d4d783f07c8e87b99ea7ff99 | [] | no_license | qcwthu/cnn-relation-extraction-with-ranking-loss | 24b2c37881f5cf6793a2a031df12aa90e98f052a | 3a73c445d3ee9bae6693e60188e7aeb5eb4c7c60 | refs/heads/master | 2022-01-13T22:36:36.918461 | 2019-05-22T08:24:50 | 2019-05-22T08:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,253 | py | import torch
import pandas as pd
import numpy as np
import csv
import spacy
import os
import re
from torchtext import data, datasets
import argparse
import train as trains
import model
import datetime
print('parse arguments.')
parser = argparse.ArgumentParser(description='CRCNN text classificer')
# learning
parser.add_argument('-lr', type=float, default=0.025, help='initial learning rate [default: 0.001]')
parser.add_argument('-epochs', type=int, default=300, help='number of epochs for train [default: 16]')
parser.add_argument('-batch-size', type=int, default=100, help='batch size for training [default: 256]')
parser.add_argument('-log-interval', type=int, default=100, help='how many steps to wait before logging training status [default: 500]')
parser.add_argument('-dev-interval', type=int, default=300, help='how many steps to wait before testing [default: 100]')
parser.add_argument('-save-interval', type=int, default=500, help='how many steps to wait before saving [default:500]')
parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')
parser.add_argument('-early-stop', type=int, default=2000, help='iteration numbers to stop without performance increasing')
parser.add_argument('-save-best', type=bool, default=True, help='whether to save when get best performance')
# data
parser.add_argument('-shuffle', action='store_true', default=False, help='shuffle the data every epoch')
# model
parser.add_argument('-dropout', type=float, default=0.75, help='the probability for dropout [default: 0.5]')
parser.add_argument('-max-norm', type=float, default=0, help='l2 constraint of parameters [default: 3.0]')
parser.add_argument('-embed-dim', type=int, default=300, help='number of embedding dimension [default: 128]')
parser.add_argument('-kernel-num', type=int, default=500, help='number of each kind of kernel')
parser.add_argument('-kernel-sizes', type=str, default='2,3,4,5', help='comma-separated kernel size to use for convolution')
parser.add_argument('-static', action='store_true', default=False, help='fix the embedding')
# device
parser.add_argument('-device', type=int, default=2, help='device to use for iterate data, -1 mean cpu [default: -1]')
# option
parser.add_argument('-snapshot', type=str, default=None, help='filename of model snapshot [default: None]')
parser.add_argument('-test', action='store_true', default=False, help='train or test')
args = parser.parse_args()
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(),value))
args.sent_len = 90
args.class_num = 19
args.pos_dim = 90
args.mPos = 2.5
args.mNeg = 0.5
args.gamma = 0.05
# args.device = torch.device(args.device)
args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
nlp = spacy.load('en_core_web_sm')
def tokenizer(text): # create a tokenizer function
# 返回 a list of <class 'spacy.tokens.token.Token'>
return [tok.text for tok in nlp.tokenizer(text)]
def emb_tokenizer(l):
r = [y for x in eval(l) for y in x]
return r
TEXT = data.Field(sequential=True, tokenize=tokenizer,fix_length=args.sent_len)
LABEL = data.Field(sequential=False, unk_token='OTHER')
POS_EMB = data.Field(sequential=True,unk_token=0,tokenize=emb_tokenizer,use_vocab=False,pad_token=0,fix_length=2*args.sent_len)
print('loading data...')
train,valid,test = data.TabularDataset.splits(path='../data/SemEval2010_task8_all_data',
train='SemEval2010_task8_training/TRAIN_FILE_SUB.CSV',
validation='SemEval2010_task8_training/VALID_FILE.CSV',
test='SemEval2010_task8_testing_keys/TEST_FILE_FULL.CSV',
format='csv',
skip_header=True,csv_reader_params={'delimiter':'\t'},
fields=[('relation',LABEL),('sentence',TEXT),('pos_embed',POS_EMB)])
TEXT.build_vocab(train,vectors='glove.6B.300d')
LABEL.build_vocab(train)
args.vocab = TEXT.vocab
args.cuda = torch.cuda.is_available()
# args.cuda = False
args.save_dir = os.path.join(args.save_dir,datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
train_iter, val_iter, test_iter = data.Iterator.splits((train,valid,test),
batch_sizes=(args.batch_size,len(valid),len(test)),
device=args.device,
sort_key=lambda x: len(x.sentence),
# sort_within_batch=False,
repeat=False)
print('build model...')
cnn = model.CRCNN(args)
if args.snapshot is not None:
print('\nLoding model from {}...'.format(args.snapshot))
cnn.load_state_dict(torch.load(args.snapshot))
if args.cuda:
torch.cuda.set_device(args.device)
cnn = cnn.cuda()
if args.test:
try:
trains.eval(test_iter,cnn,args)
except Exception as e:
print("\n test wrong.")
else:
trains.train(train_iter,val_iter,cnn,args) | [
"xisikongji2354@gmail.com"
] | xisikongji2354@gmail.com |
dc2b280772e197fed6525ef2bb9002c0ee5025f7 | 6e7463cfc51a2b190a42ea45b416cdcade6cffe8 | /Examenes/E1.3.Final/main.py | c993af08190cc29a774e5422c23db332c32ca9cc | [] | no_license | JoanAndoni/CyPS_2018 | f8b510c0c53b6426831678869549abaad169c056 | 945fe2f335f05ecaf829a53ea25d670c8e8c0bc6 | refs/heads/master | 2020-03-25T16:02:39.900345 | 2018-11-20T23:41:29 | 2018-11-20T23:41:29 | 143,912,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | # Joan Andoni Gonzalez Rioz
# A00569929
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import csv
driver = webdriver.Chrome()
driver.get("http://fiware01.cem.itesm.mx:3000/~certificatec/pronosticos")
driver.find_element_by_id("login_username").send_keys("ariel.garcia@itesm.mx")
driver.find_element_by_id("login_password").send_keys("1234")
driver.find_element_by_id("login_submit").click()
sleep(1)
driver.find_element_by_xpath('//*[@id="nav"]/div/ul/li[7]/a').click()
sleep(1)
driver.find_element_by_xpath('//*[@id="sidebar"]/ul[1]/li[5]/a').click()
sleep(1)
table = len(driver.find_elements_by_tag_name("tr"))+1
alumnos = [['Matricula', 'Nombre', 'Materia', 'Codigo']]
for row in xrange(1,table):
matricula = driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[1]/a').text
nombre = driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[2]/a').text
#print(matricula)
#print(nombre)
driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[1]/a').click()
sleep(1)
driver.find_element_by_xpath('//*[@id="sidebar"]/ul[1]/li[3]/a').click()
sleep(1)
tableMaterias = len(driver.find_elements_by_tag_name("tr"))+1
for row2 in xrange(1,tableMaterias):
codigo = driver.find_element_by_xpath('//*[@id="pronosticos_container"]/tbody/tr['+str(row2)+']/td[1]').text
nombreMateria = driver.find_element_by_xpath('//*[@id="pronosticos_container"]/tbody/tr['+str(row2)+']/td[2]').text
#print(codigo)
#print(nombreMateria)
sleep(1)
alumnos.append([matricula.encode('utf-8'), nombre.encode('utf-8'), nombreMateria.encode('utf-8'), codigo.encode('utf-8')])
driver.back()
driver.back()
sleep(1)
#print(alumnos)
with open('pronosticos.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
for col in alumnos:
writer.writerow(col)
csvFile.close()
| [
"mcr_joan@hotmail.com"
] | mcr_joan@hotmail.com |
fe619f1da9f99173a18dd7df28146699dca5644e | e133ce013b0a3ca56120db66a95884f464308f2f | /learning_logs/forms.py | 102a5d3d7097bca6f7bca5bc17359a91f583d6f9 | [] | no_license | zhouyl02/learninglogs | 27e2ec011cbc3c27db46c66ed42aa849186e1c81 | d23ad9a0bfc72c813140bca07a898ee1e22293a8 | refs/heads/master | 2021-04-01T13:23:45.175845 | 2020-12-04T12:30:54 | 2020-12-04T12:30:54 | 124,403,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django import forms
from .models import Topic, Entry
class TopicForm(forms.ModelForm):
class Meta:
model = Topic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['text']
labels = {'text': ''}
widgets = {'text': forms.Textarea(attrs={'cols': 80})} | [
"915186072@qq.com"
] | 915186072@qq.com |
fe0fe5ceb581258a2127b48eb65b8201e67adcf1 | 2310bf22a57aae02b09866169ca228a04a160fa3 | /pysfOp/vsGraphOp.py | 686ac1fba2c5b6e905c832cc58f0a307f1e2e4d0 | [] | no_license | lobosKobayashi/PythonSfCp932 | e16f8e94c38cea89879021b3ae4378f37a39e5e1 | 78127ce59cadc54e291d5e0afd4440240be630a1 | refs/heads/master | 2021-01-23T13:42:08.187821 | 2017-01-03T20:21:38 | 2017-01-03T20:21:38 | 11,274,335 | 0 | 0 | null | null | null | null | SHIFT_JIS | Python | false | false | 8,733 | py | # -*- encoding: cp932 -*-
from __future__ import division
"""'
english:
PythonSf pysfOp\vsGraphOp.py
https://github.com/lobosKobayashi
http://lobosKobayashi.github.com/
Copyright 2016, Kenji Kobayashi
All program codes in this file was designed by kVerifierLab Kenji Kobayashi
I release souce codes in this file under the GPLv3
with the exception of my commercial uses.
2016y 12m 28d Kenji Kokbayashi
japanese:
PythonSf pysfOp\vsGraphOp.py
https://github.com/lobosKobayashi
http://lobosKobayashi.github.com/
Copyright 2016, Kenji Kobayashi
このファイルの全てのプログラム・コードは kVerifierLab 小林憲次が作成しました。
作成者の小林本人に限っては商用利用を許すとの例外条件を追加して、
このファイルのソースを GPLv3 で公開します。
2016年 12月 28日 小林憲次
'"""
import sfFnctnsOp as sf
__obj2dDisplayGeneratedStt = None # to enable overlap plot
def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0
, blAxes = True):
"""' plot 2D/3D trajectory. You can hand over list of length 2 element at 2D
or length 3 element at 3D.
The line radius is 1/200 for max display size. The line radius can be
changed by radiusRate.
If blAxes = False then the RGB axis is not displayed.
At 2D plot, if xyRage == False then plot in a same hight/width square
'"""
if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):
arg = list(arg)
vs = sf.vs_()
color = tuple(color) # color argment may be list/vector
if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(
arg,type(sf.sc.array([0,]))):
from octnOp import ClOctonion
if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):
arg = [ (x.real, x.imag) for x in arg]
elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):
arg = [ x[1:4] for x in arg]
if len(arg[0])==2:
import visual.graph as vg
global __obj2dDisplayGeneratedStt
maxX = max([abs(elm[0]) for elm in arg])
maxY = max([abs(elm[1]) for elm in arg])
print "maxX:",maxX, " maxY:",maxY
if (__obj2dDisplayGeneratedStt == None):
if xyRate == True: # 11.01.16 to
maxAt = max(maxX, maxY)
__obj2dDisplayGeneratedStt = vg.gdisplay(
width=600*maxX/maxAt,height=600*maxY/maxAt)
else:
__obj2dDisplayGeneratedStt = vg.gdisplay(
width=600,height=600)
#__bl2dDisplayGeneratedStt = True
grphAt = vg.gcurve(color = color)
for i in range(len(arg)):
assert len(arg[i])==2, "unexpeted length data:"+str(arg[i])
grphAt.plot(pos = arg[i])
#return __obj2dDisplayGeneratedStt
#import pdb; pdb.set_trace()
#print "debug:",grphAt.gcurve.pos
# plot start mark
grphSqAt = vg.gcurve(color = color)
pos0At = grphAt.gcurve.pos[0,:][:2]
rateAt = 50
for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]
, [-maxY/rateAt, maxY/rateAt]):
grphSqAt.plot(pos = pos0At+[x,y])
grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])
return grphAt # 09.02.04 to animate graph
elif len(arg[0])==3:
vs.scene.forward=(-1,+1,-1)
vs.scene.up=(0,0,1)
c = vs.curve( color = color )
maxX, maxY, maxZ = 0,0,0
for i in range(len(arg)):
if maxX < abs(arg[i][0]):
maxX = abs(arg[i][0])
if maxY < abs(arg[i][1]):
maxY = abs(arg[i][1])
if maxZ < abs(arg[i][2]):
maxZ = abs(arg[i][2])
c.append( arg[i] )
#print c.pos
print "maxX:",maxX, " maxY:",maxY, " maxZ:",maxZ
maxAt = max(maxX,maxY,maxZ)
c.radius = maxAt/radiusRate
vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)
if blAxes == True:
# draw axise
vs.curve( pos=[(0,0,0), (maxAt,0,0)]
, color=(1,0,0)
, radius = maxAt/100 )
vs.curve( pos=[(0,0,0), (0,maxAt,0)]
, color=(0,1,0)
, radius = maxAt/100 )
vs.curve( pos=[(0,0,0), (0,0,maxAt)]
, color=(0,1,1)
, radius = maxAt/100 )
#return vs.scene
return c # 09.02.04 to animate graph
else:
assert False,"unexpeted data:"+str(arg)
__objGrDisplayGeneratedStt = None # to enable overlap plot
def plotGr(vctAg, start=(), end=None, N=50, color = sf.cyan):
"""' plot graph for a function or vector data
If you call plotGr(..) a number of times, then the graphs were plotted
in piles.
start,end are domain parameters, which are used if vctAg type is
function
if you want to vanish the graph then do as below
objAt=plotGr(..)
.
.
objAt.visible = None
usage:
plotGr(sin) # plot sin graph in a range from 0 to 1
plotGr(sin,-3,3) #plot sin in a range from -3 to 3
plotGr(sin,[-3,-2,0,1])
# plot sequential line graph by
# [(-3,sin(-3),(-2,sin(-2),(0,sin(0),(1,sin(1)]
plotGr([sin(x) for x in klsp(-3,3)]) # plot a sequence data
'"""
if not(hasattr(vctAg, '__getitem__')) and hasattr(vctAg, '__iter__'):
vctAg = list(vctAg)
vs = sf.vs_()
global __objGrDisplayGeneratedStt
color = tuple(color) # color argment may be list/vector
import visual.graph as vg
if __objGrDisplayGeneratedStt == None:
__objGrDisplayGeneratedStt = vg.gdisplay()
grphAt = vg.gcurve( color = color)
#grphAt = vg.gcurve(gdisplay=dspAt, color = color)
#import pdb; pdb.set_trace()
if '__call__' in dir(vctAg):
# vctAg is function
if start != () and end == None and hasattr(start, '__iter__'):
for x in start:
grphAt.plot(pos = [x, float(vctAg(x))] )
else:
if start == ():
start = 0
if end == None:
end = 1
assert start != end
if start > end:
start, end = end, start
#assert start != end
"""'
for x in arsq(start, N, float(end-start)/N):
# 08.10.27 add float(..) cast to avoid below error
# "No registered converter was able to produce a C++ rvalue"
# at ;;n=64;plotGr([sf.sc.comb(n,i) for i in range(n)])
grphAt.plot(pos = [x, float(vctAg(x))] )
'"""
for x in sf.klsp(start, end, N):
# 09.12.03 to display end and avoid 0
grphAt.plot(pos = [x, float(vctAg(x))] )
#return grphAt
return __objGrDisplayGeneratedStt
else:
if (start != ()) or (end != None):
#import pdb; pdb.set_trace()
if start == ():
start = 0
if end == None:
end = 1
assert start != end
if start > end:
start, end = end, start
N = len(vctAg)
for i, x in enmasq([start, N, (end - start)/N]):
grphAt.plot(pos = [x, float(vctAg[i])] )
else:
for i in range(len(vctAg)):
grphAt.plot(pos = [i, float(vctAg[i])] )
#return grphAt
return __objGrDisplayGeneratedStt
def plotDbl(sq0,sq1, region=None, N=50):
if isinstance(sq0, (tuple, list, sf.np.ndarray)):
sf.plotGr(sq0)
return sf.plotGr(sq1,color=sf.red)
else:
assert hasattr(sq0,'__call__'), "at plotDbl(..), you set parameter sq0 that is not function"
assert hasattr(sq1,'__call__'), "at plotDbl(..), you set parameter sq1 that is not function"
if region==None:
region = [-1,1]
assert(isinstance(region,(tuple, list, sf.np.ndarray)) and len(region)==2)
sf.plotGr(sq0,region[0],region[1],N=N)
return sf.plotGr(sq1, region[0], region[1], N=N, color=sf.red)
| [
"lobosKobayashi@gmail.com"
] | lobosKobayashi@gmail.com |
a22ecfc5db8d716f603611bc6d34e5846f57d403 | ddb76ba98767c0e042d1eacfa41591ef2ee8bce8 | /case/exchange/trade/test_navigation.py | 9310eead8212d2ff05c16356b42dfcc3df72e747 | [] | no_license | linzhiyang85/CryptoTasks | 710f9b7cec613056090dc2de6109adcf132f28bc | 7e0bfc8a09e32c9a4388ab9ea9b55f0b8dce757f | refs/heads/main | 2023-09-01T08:14:07.790208 | 2021-09-25T20:46:21 | 2021-09-25T20:46:21 | 410,161,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | import pytest
import time
from page.exchange import ExchangePage
from page.trade import TradePage
class TestTrade:
@pytest.mark.parametrize('source, target', [('CRO', 'USDC'), ('ATOM', 'CRO')])
def test_navigate_to_trade_page(self, source, target, driver, settings):
# initialize page object
exchange_page = ExchangePage(driver)
trade_page = TradePage(driver)
# maximize
exchange_page.maximize_window()
# open initial page, accept cookie
exchange_page.open(settings.get_start_url('exchange'))
exchange_page.accept_cookie()
# find target instrument
exchange_page.click_market_menu(target)
instrument = exchange_page.get_instrument(source, target)
assert instrument is not None, f'Failed to find instrument for {source}/{target}'
# click to open trade page
exchange_page.open_instrument(instrument)
exchange_page.wait_for_url_change(driver.current_url)
# verification
assert '/trade/' in driver.current_url and f'{source}_{target}' in driver.current_url, f"Failed to open trade page for {source}/{target}"
assert f'{source}/{target}' == trade_page.get_instrument_name(), "Instrument name in trade page is not correct"
| [
"linzhiyang@aliyun.com"
] | linzhiyang@aliyun.com |
91efd913c270d343c4b45b6d1eb44d4aa58f912c | 35a6b6b5cabcf9fb39527bab020ef7c96265a026 | /p3.py | 5911e61bf240cc3e917c3377949ca16c9c46851d | [] | no_license | mepky/data-structure-and-algorithm | 9a1324142276e6966692c51734613f15234f5300 | 96f64e657f97e46fc2d32cca5294fa0f104d5d01 | refs/heads/master | 2020-03-24T08:57:41.692564 | 2020-02-10T12:40:13 | 2020-02-10T12:40:13 | 142,614,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from collections import defaultdict
t=int(input())
for _ in range(t):
l=defaultdict(int)
n=int(input())
d=2**20
t=0
s=input()
a=[-1]*27
for i in range(n):
if a[ord(s[i])-97]==-1:
a[ord(s[i])-97]=i
else:
d=min(d,i-a[ord(s[i])-97])
t=1
a[ord(s[i])-97]=i
if t==0:
print(0)
else:
print(n-d)
| [
"noreply@github.com"
] | mepky.noreply@github.com |
69f71e98982e26c3a6a8f76756133b13cb028e93 | 2f7c81de79cfd34770051d9bda3ee3fd1a0c9477 | /solucion_usando_verlet.py | 7fccdfab5b9a2c0dd2c002a6bea23273981abe5f | [
"MIT"
] | permissive | TatiFlores/04Tarea | ae38959d6313c16b5ff628912d88be45ff27d54d | 2f219a4b02865ccac716c76dd5545248b8789f44 | refs/heads/master | 2020-05-29T12:15:21.408718 | 2015-10-22T02:14:26 | 2015-10-22T02:14:26 | 44,268,867 | 0 | 0 | null | 2015-10-14T18:45:56 | 2015-10-14T18:45:56 | null | UTF-8 | Python | false | false | 1,486 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from planeta import Planeta
import numpy as np
import matplotlib.pyplot as plt
condicion_inicial = [10, 0, 0, 0.4]
p = Planeta(condicion_inicial)
dt = 0.1
iteraciones = 60000
x = np.zeros(iteraciones)
y = np.zeros(iteraciones)
vx = np.zeros(iteraciones)
vy = np.zeros(iteraciones)
energia = np.zeros(iteraciones)
tiempo = np.zeros(iteraciones)
x[0] = p.y_actual[0]
y[0] = p.y_actual[1]
energia[0] = p.energia_total()
tiempo[0] = p.t_actual
vx[0] = p.y_actual[2]
vy[0] = p.y_actual[3]
#Verlet necesita una iteracion extra
p.avanza_rk4(dt)
x[1] = p.y_actual[0]
y[1] = p.y_actual[1]
energia[1] = p.energia_total()
tiempo[1] = p.t_actual
vx[1] = p.y_actual[2]
vy[1] = p.y_actual[3]
for i in range(2, iteraciones):
y_anterior = np.array([x[i-2], y[i-2], vx[i-2], vy[i-2]])
p.avanza_verlet(dt, y_anterior)
x[i] = p.y_actual[0]
y[i] = p.y_actual[1]
vx[i] = p.y_actual[2]
vy[i] = p.y_actual[3]
energia[i] =p.energia_total()
tiempo [i] = p.t_actual
plt.figure(1)
plt.clf()
plt.plot(x,y,color='green')
plt.xlabel('x[m]')
plt.ylabel('y[m]')
plt.grid(True)
plt.title(' 'u'Ó''rbita calculada con el m'u'é''todo de Verlet ')
plt.savefig('Orbita_verlet.eps')
plt.figure(2)
plt.clf()
plt.plot(tiempo,energia,'green')
plt.xlabel('Tiempo [s]')
plt.ylabel('Energ'u'í''a')
plt.grid(True)
plt.title(' Energ'u'í''a en funci'u'ó''n del tiempo, m'u'é''todo de Verlet ')
plt.savefig('Energia_verlet.eps')
plt.show()
| [
"tatiflores.4@gmail.com"
] | tatiflores.4@gmail.com |
265dc95d5597dd6704b8d1810358d09d0ea447cb | ea788d354226634a96fdba62d9f4a7587f6cb09f | /process_dynamixel_data.py | 609ced7ba71a0288cee3a616012e02ea0279529a | [] | no_license | DeinFreund/px4_measurements | 47f7cb2fd00017522df15b4ac6f5f97ee9c98ca6 | a53e5f781634a11c2b24d145304228c7afa66b9e | refs/heads/master | 2020-12-04T06:36:58.166716 | 2020-01-03T20:31:21 | 2020-01-03T20:31:21 | 231,660,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | #import rosbag
import matplotlib.pyplot as plt
import numpy as np
import sys
dyn_time = []
angle_measured = np.empty((0,6))
angle_cmd = np.empty((0,6))
for line in sys.stdin:
try:
if len(line.split()) == 14 and line.split()[0] == 'LOG':
dyn_time.append(int(line.split()[1])/1e6)
cmd = []
measured = []
for i in range(6):
cmd.append(float(line.split()[2*i+3]))
measured.append(float(line.split()[2*i+2]))
angle_measured = np.concatenate((angle_measured, np.array([measured])))
angle_cmd = np.concatenate((angle_cmd, np.array([cmd])))
except:
pass
dyn_time = np.array(dyn_time) - dyn_time[0]
fig_new, ax_new = plt.subplots(6,1, sharey = True, sharex=True)
fig_new.suptitle('Position tracking')
for i in range(6):
ax_new[i].plot(dyn_time, angle_measured[:,i], '.', label='measured_angles')
ax_new[i].plot(dyn_time, angle_cmd[:,i], '.', label='cmd angles' )
ax_new[i].legend(loc=0)
fig, ax = plt.subplots(6,1, sharey = True, sharex=True)
fig.suptitle('Position tracking')
for i in range(6):
ax[i].plot(dyn_time, angle_cmd[:,i] - angle_measured[:,i], '.', label='cmd - measured_angles')
ax[i].legend(loc=0)
plt.show()
| [
"flyck@ethz.ch"
] | flyck@ethz.ch |
4883732ef5574518d7a8eea9d3610c27c1229f76 | 72371fbbca5e1631844de3a3815be721ad37ad4f | /compilador/simbolo.py | 9abf35cbb0cc6a7ea53456333b78c3dfd9813edb | [] | no_license | gustavokida/compiladores_1 | 7c046386e8b92831e5b1b48afa301732cf29373b | cf50f20381c99784da1e76547f00abe0b51396aa | refs/heads/master | 2023-08-02T19:36:53.712753 | 2021-09-25T22:11:24 | 2021-09-25T22:11:24 | 404,526,949 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | class Simbolo:
def __init__(self, nome, tipo):
self.nome = nome
self.tipo = tipo
def getNome(self):
return self.nome
def setNome(self, nome):
self.nome = nome
def getTipo(self):
return self.tipo
def setTipo(self, tipo):
self.tipo = tipo | [
"45009122+gustavokida@users.noreply.github.com"
] | 45009122+gustavokida@users.noreply.github.com |
f4cc030b9c8573c816c10160ff087a8c68c9d808 | e00cf0bf72421ec31e4d3608c615aeeba5064731 | /wows/move.py | 3165d0d74b85208a58ea1b2ed7ee70fd489a053c | [] | no_license | lorne-luo/auto-wows | b4a84c7d99585c84a635fb5be11fd0f03a5f37fd | 992ad473f1d5a78686e1c4c939c6c218e72373d7 | refs/heads/master | 2020-12-30T00:52:17.497039 | 2020-02-25T11:10:30 | 2020-02-25T11:10:30 | 238,803,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | import time
from random import randint
import pyautogui as pag
import settings as settings
from helper import search_template, get_map_image
class WOWS_Move(object):
def move_ship(self):
global MOVE_TO
pag.press('m', presses=1, interval=0.25)
pag.sleep(1.5)
if not MOVE_TO:
map_image = get_map_image()
self_loc = search_template(map_image, 'map_self_icon.bmp')
print('self_loc', self_loc)
if self_loc:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] - self_loc[1],
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] - self_loc[0])
else:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] / 2,
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] / 2)
for i in range(4):
loc = (MOVE_TO[0] + randint(-50, 50),
MOVE_TO[1] + randint(-50, 50))
pag.moveTo(loc)
pag.click(clicks=2, interval=0.5, button='left')
time.sleep(1)
pag.press('esc')
time.sleep(2)
| [
"dev@luotao.net"
] | dev@luotao.net |
672cf8f365696192c08ad050669fd61e20d0a34b | 504398cad76a23e1ad5f4eebf8f499b855d57771 | /manage.py | 2e47f39a342cb53f05ed73b51f03116c9fe19445 | [] | no_license | jvxtaposed/WebFrame | 75cd0c437eb94ed4c78e8e2dd47d0e6d21fa4421 | 4da3d76fb8ed79c8a0f32dccb28ca671688802cf | refs/heads/master | 2020-03-18T16:57:43.814453 | 2018-05-26T21:36:00 | 2018-05-26T21:36:00 | 134,996,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebFrame.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"iliketreeslol@gmail.com"
] | iliketreeslol@gmail.com |
28279a0327096fede9a88609bda8fc917788902d | e022862c20d92931f6b119998792377d028d0137 | /multi-agent-irl/irl/mack/ncdail.py | f13bb1d098d528977b874cc1834e4af99ef3efb1 | [
"Apache-2.0"
] | permissive | renos/CoDAIL | 3519dface3720318c79bd9afa3cc0ae889b9ee05 | 92423c90166ac7c5b88637ca1ef0002f126918db | refs/heads/master | 2023-06-26T06:20:09.275801 | 2021-07-30T03:11:33 | 2021-07-30T03:11:33 | 384,311,940 | 0 | 0 | Apache-2.0 | 2021-07-09T03:36:48 | 2021-07-09T03:36:47 | null | UTF-8 | Python | false | false | 27,922 | py | import os.path as osp
import random
import time
import joblib
import numpy as np
import tensorflow as tf
from scipy.stats import pearsonr, spearmanr
from rl.acktr.utils import Scheduler, find_trainable_variables, discount_with_dones
from rl.acktr.utils import cat_entropy, mse, onehot, multionehot
from rl import logger
from rl.acktr import kfac
from rl.common import set_global_seeds, explained_variance
from irl.mack.kfac_discriminator_ncdail import Discriminator
# from irl.mack.kfac_discriminator_wgan import Discriminator
from irl.dataset import Dset
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=2, nsteps=200,
nstack=1, ent_coef=0.00, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, lrschedule='linear', identical=None):
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=nprocs,
inter_op_parallelism_threads=nprocs)
config.gpu_options.allow_growth = True
self.sess = sess = tf.Session(config=config)
nbatch = nenvs * nsteps
self.num_agents = num_agents = len(ob_space)
self.n_actions = [ac_space[k].n for k in range(self.num_agents)]
if identical is None:
identical = [False for _ in range(self.num_agents)]
scale = [1 for _ in range(num_agents)]
pointer = [i for i in range(num_agents)]
h = 0
for k in range(num_agents):
if identical[k]:
scale[h] += 1
else:
pointer[h] = k
h = k
pointer[h] = num_agents
A, ADV, R, PG_LR = [], [], [], []
for k in range(num_agents):
if identical[k]:
A.append(A[-1])
ADV.append(ADV[-1])
R.append(R[-1])
PG_LR.append(PG_LR[-1])
else:
A.append(tf.placeholder(tf.int32, [nbatch * scale[k]]))
ADV.append(tf.placeholder(tf.float32, [nbatch * scale[k]]))
R.append(tf.placeholder(tf.float32, [nbatch * scale[k]]))
PG_LR.append(tf.placeholder(tf.float32, []))
# A = [tf.placeholder(tf.int32, [nbatch]) for _ in range(num_agents)]
# ADV = [tf.placeholder(tf.float32, [nbatch]) for _ in range(num_agents)]
# R = [tf.placeholder(tf.float32, [nbatch]) for _ in range(num_agents)]
# PG_LR = [tf.placeholder(tf.float32, []) for _ in range(num_agents)]
# VF_LR = [tf.placeholder(tf.float32, []) for _ in range(num_agents)]
pg_loss, entropy, vf_loss, train_loss = [], [], [], []
self.model = step_model = []
self.model2 = train_model = []
self.pg_fisher = pg_fisher_loss = []
self.logits = logits = []
sample_net = []
self.vf_fisher = vf_fisher_loss = []
self.joint_fisher = joint_fisher_loss = []
self.lld = lld = []
for k in range(num_agents):
if identical[k]:
step_model.append(step_model[-1])
train_model.append(train_model[-1])
else:
step_model.append(policy(sess, ob_space[k], ac_space[k], ob_space, ac_space,
nenvs, 1, nstack, reuse=False, name='%d' % k))
train_model.append(policy(sess, ob_space[k], ac_space[k], ob_space, ac_space,
nenvs * scale[k], nsteps, nstack, reuse=True, name='%d' % k))
logpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=train_model[k].pi, labels=A[k])
lld.append(tf.reduce_mean(logpac))
logits.append(train_model[k].pi)
pg_loss.append(tf.reduce_mean(ADV[k] * logpac))
entropy.append(tf.reduce_mean(cat_entropy(train_model[k].pi)))
pg_loss[k] = pg_loss[k] - ent_coef * entropy[k]
vf_loss.append(tf.reduce_mean(mse(tf.squeeze(train_model[k].vf), R[k])))
train_loss.append(pg_loss[k] + vf_coef * vf_loss[k])
pg_fisher_loss.append(-tf.reduce_mean(logpac))
sample_net.append(train_model[k].vf + tf.random_normal(tf.shape(train_model[k].vf)))
vf_fisher_loss.append(-vf_fisher_coef * tf.reduce_mean(
tf.pow(train_model[k].vf - tf.stop_gradient(sample_net[k]), 2)))
joint_fisher_loss.append(pg_fisher_loss[k] + vf_fisher_loss[k])
self.policy_params = []
self.value_params = []
for k in range(num_agents):
if identical[k]:
self.policy_params.append(self.policy_params[-1])
self.value_params.append(self.value_params[-1])
else:
self.policy_params.append(find_trainable_variables("policy_%d" % k))
self.value_params.append(find_trainable_variables("value_%d" % k))
self.params = params = [a + b for a, b in zip(self.policy_params, self.value_params)]
params_flat = []
for k in range(num_agents):
params_flat.extend(params[k])
self.grads_check = grads = [
tf.gradients(train_loss[k], params[k]) for k in range(num_agents)
]
clone_grads = [
tf.gradients(lld[k], params[k]) for k in range(num_agents)
]
self.optim = optim = []
self.clones = clones = []
update_stats_op = []
train_op, clone_op, q_runner = [], [], []
for k in range(num_agents):
if identical[k]:
optim.append(optim[-1])
train_op.append(train_op[-1])
q_runner.append(q_runner[-1])
clones.append(clones[-1])
clone_op.append(clone_op[-1])
else:
with tf.variable_scope('optim_%d' % k):
optim.append(kfac.KfacOptimizer(
learning_rate=PG_LR[k], clip_kl=kfac_clip,
momentum=0.9, kfac_update=1, epsilon=0.01,
stats_decay=0.99, async=0, cold_iter=10,
max_grad_norm=max_grad_norm)
)
update_stats_op.append(optim[k].compute_and_apply_stats(joint_fisher_loss, var_list=params[k]))
train_op_, q_runner_ = optim[k].apply_gradients(list(zip(grads[k], params[k])))
train_op.append(train_op_)
q_runner.append(q_runner_)
with tf.variable_scope('clone_%d' % k):
clones.append(kfac.KfacOptimizer(
learning_rate=PG_LR[k], clip_kl=kfac_clip,
momentum=0.9, kfac_update=1, epsilon=0.01,
stats_decay=0.99, async=0, cold_iter=10,
max_grad_norm=max_grad_norm)
)
update_stats_op.append(clones[k].compute_and_apply_stats(
pg_fisher_loss[k], var_list=self.policy_params[k]))
clone_op_, q_runner_ = clones[k].apply_gradients(list(zip(clone_grads[k], self.policy_params[k])))
clone_op.append(clone_op_)
update_stats_op = tf.group(*update_stats_op)
train_ops = train_op
clone_ops = clone_op
train_op = tf.group(*train_op)
clone_op = tf.group(*clone_op)
self.q_runner = q_runner
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
self.clone_lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
advs = [rewards[k] - values[k] for k in range(num_agents)]
for step in range(len(obs)):
cur_lr = self.lr.value()
ob = np.concatenate(obs, axis=1)
td_map = {}
for k in range(num_agents):
if identical[k]:
continue
new_map = {}
if num_agents > 1:
action_v = []
for j in range(k, pointer[k]):
action_v.append(np.concatenate([multionehot(actions[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1))
action_v = np.concatenate(action_v, axis=0)
new_map.update({train_model[k].A_v: action_v})
td_map.update({train_model[k].A_v: action_v})
new_map.update({
train_model[k].X: np.concatenate([obs[j] for j in range(k, pointer[k])], axis=0),
train_model[k].X_v: np.concatenate([ob.copy() for j in range(k, pointer[k])], axis=0),
A[k]: np.concatenate([actions[j] for j in range(k, pointer[k])], axis=0),
ADV[k]: np.concatenate([advs[j] for j in range(k, pointer[k])], axis=0),
R[k]: np.concatenate([rewards[j] for j in range(k, pointer[k])], axis=0),
PG_LR[k]: cur_lr / float(scale[k])
})
sess.run(train_ops[k], feed_dict=new_map)
td_map.update(new_map)
if states[k] != []:
td_map[train_model[k].S] = states
td_map[train_model[k].M] = masks
policy_loss, value_loss, policy_entropy = sess.run(
[pg_loss, vf_loss, entropy],
td_map
)
return policy_loss, value_loss, policy_entropy
def clone(obs, actions):
td_map = {}
cur_lr = self.clone_lr.value()
for k in range(num_agents):
if identical[k]:
continue
new_map = {}
new_map.update({
train_model[k].X: np.concatenate([obs[j] for j in range(k, pointer[k])], axis=0),
A[k]: np.concatenate([actions[j] for j in range(k, pointer[k])], axis=0),
PG_LR[k]: cur_lr / float(scale[k])
})
sess.run(clone_ops[k], feed_dict=new_map)
td_map.update(new_map)
lld_loss = sess.run([lld], td_map)
return lld_loss
def save(save_path):
ps = sess.run(params_flat)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params_flat, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.clone = clone
self.save = save
self.load = load
self.train_model = train_model
self.step_model = step_model
def step(ob, av, *_args, **_kwargs):
a, v, s = [], [], []
obs = np.concatenate(ob, axis=1)
for k in range(num_agents):
a_v = np.concatenate([multionehot(av[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1)
a_, v_, s_ = step_model[k].step(ob[k], obs, a_v)
a.append(a_)
v.append(v_)
s.append(s_)
return a, v, s
self.step = step
def value(obs, av):
v = []
ob = np.concatenate(obs, axis=1)
for k in range(num_agents):
a_v = np.concatenate([multionehot(av[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1)
v_ = step_model[k].value(ob, a_v)
v.append(v_)
return v
self.value = value
self.initial_state = [step_model[k].initial_state for k in range(num_agents)]
class Runner(object):
def __init__(self, env, model, discriminator, nsteps, nstack, gamma, lam, disc_type):
self.env = env
self.model = model
self.discriminator = discriminator
self.disc_type = disc_type
self.num_agents = len(env.observation_space)
self.nenv = nenv = env.num_envs
self.batch_ob_shape = [
(nenv * nsteps, nstack * env.observation_space[k].shape[0]) for k in range(self.num_agents)
]
self.obs = [
np.zeros((nenv, nstack * env.observation_space[k].shape[0])) for k in range(self.num_agents)
]
self.actions = [np.zeros((nenv, )) for _ in range(self.num_agents)]
obs = env.reset()
self.update_obs(obs)
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.n_actions = [env.action_space[k].n for k in range(self.num_agents)]
self.dones = [np.array([False for _ in range(nenv)]) for k in range(self.num_agents)]
def update_obs(self, obs):
# TODO: Potentially useful for stacking.
self.obs = obs
# for k in range(self.num_agents):
# ob = np.roll(self.obs[k], shift=-1, axis=1)
# ob[:, -1] = obs[:, 0]
# self.obs[k] = ob
# self.obs = [np.roll(ob, shift=-1, axis=3) for ob in self.obs]
# self.obs[:, :, :, -1] = obs[:, :, :, 0]
def run(self):
# mb_episode_r = [[] for _ in range(self.num_agents)]
mb_obs = [[] for _ in range(self.num_agents)]
mb_true_rewards = [[] for _ in range(self.num_agents)]
mb_rewards = [[] for _ in range(self.num_agents)]
mb_actions = [[] for _ in range(self.num_agents)]
mb_values = [[] for _ in range(self.num_agents)]
mb_dones = [[] for _ in range(self.num_agents)]
mb_masks = [[] for _ in range(self.num_agents)]
mb_states = self.states
for n in range(self.nsteps):
actions, values, states = self.model.step(self.obs, self.actions)
self.actions = actions
if self.disc_type == 'decentralized':
mul = [multionehot(self.actions[k], self.n_actions[k]) for k in range(self.num_agents)]
rewards = [np.squeeze(self.discriminator[k].get_reward(
self.obs[k], np.concatenate(mul, axis=1)))
for k in range(self.num_agents)]
elif self.disc_type == 'decentralized-all':
mul = [multionehot(self.actions[k], self.n_actions[k]) for k in range(self.num_agents)]
rewards = [np.squeeze(self.discriminator[k].get_reward(
np.concatenate(self.obs, axis=1), np.concatenate(mul, axis=1)))
for k in range(self.num_agents)]
else:
assert False
for k in range(self.num_agents):
mb_obs[k].append(np.copy(self.obs[k]))
mb_actions[k].append(actions[k])
mb_values[k].append(values[k])
mb_dones[k].append(self.dones[k])
mb_rewards[k].append(rewards[k])
actions_list = []
for i in range(self.nenv):
actions_list.append([onehot(actions[k][i], self.n_actions[k]) for k in range(self.num_agents)])
obs, true_rewards, dones, _ = self.env.step(actions_list)
self.states = states
self.dones = dones
for k in range(self.num_agents):
for ni, done in enumerate(dones[k]):
if done:
self.obs[k][ni] = self.obs[k][ni] * 0.0
self.update_obs(obs)
for k in range(self.num_agents):
mb_true_rewards[k].append(true_rewards[k])
for k in range(self.num_agents):
mb_dones[k].append(self.dones[k])
# batch of steps to batch of rollouts
# print(mb_rewards[0])
for k in range(self.num_agents):
# mb_episode_r[k] = np.sum(mb_rewards[k]) / np.shape(mb_rewards[k])[-1]
mb_obs[k] = np.asarray(mb_obs[k], dtype=np.float32).swapaxes(1, 0).reshape(self.batch_ob_shape[k])
mb_true_rewards[k] = np.asarray(mb_true_rewards[k], dtype=np.float32).swapaxes(1, 0)
mb_rewards[k] = np.asarray(mb_rewards[k], dtype=np.float32).swapaxes(1, 0)
mb_actions[k] = np.asarray(mb_actions[k], dtype=np.int32).swapaxes(1, 0)
mb_values[k] = np.asarray(mb_values[k], dtype=np.float32).swapaxes(1, 0)
mb_dones[k] = np.asarray(mb_dones[k], dtype=np.bool).swapaxes(1, 0)
mb_masks[k] = mb_dones[k][:, :-1]
mb_dones[k] = mb_dones[k][:, 1:]
# last_values = self.model.value(self.obs, self.actions)
#
# mb_advs = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
# mb_returns = [[] for _ in range(self.num_agents)]
#
# lastgaelam = 0.0
# for k in range(self.num_agents):
# for t in reversed(range(self.nsteps)):
# if t == self.nsteps - 1:
# nextnonterminal = 1.0 - self.dones[k]
# nextvalues = last_values[k]
# else:
# nextnonterminal = 1.0 - mb_dones[k][:, t + 1]
# nextvalues = mb_values[k][:, t + 1]
# delta = mb_rewards[k][:, t] + self.gamma * nextvalues * nextnonterminal - mb_values[k][:, t]
# mb_advs[k][:, t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
# mb_returns[k] = mb_advs[k] + mb_values[k]
# mb_returns[k] = mb_returns[k].flatten()
# mb_masks[k] = mb_masks[k].flatten()
# mb_values[k] = mb_values[k].flatten()
# mb_actions[k] = mb_actions[k].flatten()
mb_returns = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
mb_true_returns = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
last_values = self.model.value(self.obs, self.actions)
# discount/bootstrap off value fn
for k in range(self.num_agents):
for n, (rewards, true_rewards, dones, value) in enumerate(zip(mb_rewards[k], mb_true_rewards[k], mb_dones[k], last_values[k].tolist())):
rewards = rewards.tolist()
dones = dones.tolist()
true_rewards = true_rewards.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
true_rewards = discount_with_dones(true_rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
true_rewards = discount_with_dones(true_rewards, dones, self.gamma)
mb_returns[k][n] = rewards
mb_true_returns[k][n] = true_rewards
for k in range(self.num_agents):
mb_returns[k] = mb_returns[k].flatten()
mb_masks[k] = mb_masks[k].flatten()
mb_values[k] = mb_values[k].flatten()
mb_actions[k] = mb_actions[k].flatten()
mh_actions = [multionehot(mb_actions[k], self.n_actions[k]) for k in range(self.num_agents)]
mb_all_obs = np.concatenate(mb_obs, axis=1)
mh_all_actions = np.concatenate(mh_actions, axis=1)
return mb_obs, mb_states, mb_returns, mb_masks, mb_actions,\
mb_values, mb_all_obs, mh_actions, mh_all_actions, mb_rewards, mb_true_rewards, mb_true_returns#, mb_episode_r
def learn(policy, expert, env, env_id, seed, total_timesteps=int(40e6), gamma=0.99, lam=0.95, log_interval=1, nprocs=32,
nsteps=20, nstack=1, ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, save_interval=50, lrschedule='linear', dis_lr=0.001, disc_type='decentralized',
bc_iters=500, identical=None, d_iters=1):
tf.reset_default_graph()
set_global_seeds(seed)
buffer = None
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
num_agents = (len(ob_space))
make_model = lambda: Model(policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=nprocs, nsteps=nsteps,
nstack=nstack, ent_coef=ent_coef, vf_coef=vf_coef, vf_fisher_coef=vf_fisher_coef,
lr=lr, max_grad_norm=max_grad_norm, kfac_clip=kfac_clip,
lrschedule=lrschedule, identical=identical)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if disc_type == 'decentralized':
discriminator = [
Discriminator(model.sess, ob_space, ac_space, nstack, k, disc_type=disc_type,
scope="Discriminator_%d" % k, # gp_coef=gp_coef,
total_steps=total_timesteps // (nprocs * nsteps),
lr_rate=dis_lr) for k in range(num_agents)
]
elif disc_type == 'dentralized-all':
discriminator = [
Discriminator(model.sess, ob_space, ac_space, nstack, k, disc_type=disc_type,
scope="Discriminator_%d" % k, # gp_coef=gp_coef,
total_steps=total_timesteps // (nprocs * nsteps),
lr_rate=dis_lr) for k in range(num_agents)
]
else:
assert False
tf.global_variables_initializer().run(session=model.sess)
runner = Runner(env, model, discriminator, nsteps=nsteps, nstack=nstack, gamma=gamma, lam=lam, disc_type=disc_type)
nbatch = nenvs * nsteps
tstart = time.time()
coord = tf.train.Coordinator()
# enqueue_threads = [q_runner.create_threads(model.sess, coord=coord, start=True) for q_runner in model.q_runner]
for _ in range(bc_iters):
e_obs, e_actions, _, _ = expert.get_next_batch(nenvs * nsteps)
e_a = [np.argmax(e_actions[k], axis=1) for k in range(len(e_actions))]
lld_loss = model.clone(e_obs, e_a)
# print(lld_loss)
for update in range(1, total_timesteps // nbatch + 1):
obs, states, rewards, masks, actions, values, all_obs,\
mh_actions, mh_all_actions, mh_rewards, mh_true_rewards, mh_true_returns = runner.run()#, mh_episode_r = runner.run()
# d_iters = 1
g_loss, e_loss = np.zeros((num_agents, d_iters)), np.zeros((num_agents, d_iters))
idx = 0
idxs = np.arange(len(all_obs))
random.shuffle(idxs)
all_obs = all_obs[idxs]
mh_actions = [mh_actions[k][idxs] for k in range(num_agents)]
mh_obs = [obs[k][idxs] for k in range(num_agents)]
mh_values = [values[k][idxs] for k in range(num_agents)]
if buffer:
buffer.update(mh_obs, mh_actions, None, all_obs, mh_values)
else:
buffer = Dset(mh_obs, mh_actions, None, all_obs, mh_values, randomize=True, num_agents=num_agents)
d_minibatch = nenvs * nsteps
for d_iter in range(d_iters):
e_obs, e_actions, e_all_obs, _ = expert.get_next_batch(d_minibatch)
g_obs, g_actions, g_all_obs, _ = buffer.get_next_batch(batch_size=d_minibatch)
if disc_type == 'decentralized':
for k in range(num_agents):
g_loss[k, d_iter], e_loss[k, d_iter], _, _ = discriminator[k].train(
g_obs[k],
np.concatenate(g_actions, axis=1),
e_obs[k],
np.concatenate(e_actions, axis=1)
)
elif disc_type == 'decentralized-all':
for k in range(num_agents):
g_loss[k, d_iter], e_loss[k, d_iter], _, _ = discriminator[k].train(
g_all_obs,
np.concatenate(g_actions, axis=1),
e_all_obs,
np.concatenate(e_actions, axis=1))
else:
assert False
idx += 1
if update > 10:
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
model.old_obs = obs
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
if update % log_interval == 0 or update == 1:
ev = [explained_variance(values[k], rewards[k]) for k in range(model.num_agents)]
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update * nbatch)
logger.record_tabular("fps", fps)
for k in range(model.num_agents):
logger.record_tabular("explained_variance %d" % k, float(ev[k]))
# logger.record_tabular("episode_reward %d" % k, float(mh_episode_r[k]))
if update > 10:
logger.record_tabular("policy_entropy %d" % k, float(policy_entropy[k]))
logger.record_tabular("policy_loss %d" % k, float(policy_loss[k]))
logger.record_tabular("value_loss %d" % k, float(value_loss[k]))
try:
logger.record_tabular('pearson %d' % k, float(
pearsonr(rewards[k].flatten(), mh_true_returns[k].flatten())[0]))
logger.record_tabular('reward %d' % k, float(np.mean(rewards[k])))
logger.record_tabular('spearman %d' % k, float(
spearmanr(rewards[k].flatten(), mh_true_returns[k].flatten())[0]))
except:
pass
# logger.record_tabular("episode_sum_reward", float(np.sum(mh_episode_r[k])))
if update > 10 and env_id == 'simple_tag':
try:
logger.record_tabular('in_pearson_0_2', float(
pearsonr(rewards[0].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_pearson_1_2', float(
pearsonr(rewards[1].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_spearman_0_2', float(
spearmanr(rewards[0].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_spearman_1_2', float(
spearmanr(rewards[1].flatten(), rewards[2].flatten())[0]))
except:
pass
g_loss_m = np.mean(g_loss, axis=1)
e_loss_m = np.mean(e_loss, axis=1)
# g_loss_gp_m = np.mean(g_loss_gp, axis=1)
# e_loss_gp_m = np.mean(e_loss_gp, axis=1)
for k in range(num_agents):
logger.record_tabular("g_loss %d" % k, g_loss_m[k])
logger.record_tabular("e_loss %d" % k, e_loss_m[k])
# logger.record_tabular("g_loss_gp %d" % k, g_loss_gp_m[k])
# logger.record_tabular("e_loss_gp %d" % k, e_loss_gp_m[k])
logger.dump_tabular()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
savepath = osp.join(logger.get_dir(), 'm_%.5i' % update)
print('Saving to', savepath)
model.save(savepath)
if disc_type == 'decentralized':
for k in range(num_agents):
savepath = osp.join(logger.get_dir(), 'd_%d_%.5i' % (k, update))
discriminator[k].save(savepath)
elif disc_type == 'decentralized-all':
for k in range(num_agents):
savepath = osp.join(logger.get_dir(), 'd_%d_%.5i' % (k, update))
discriminator[k].save(savepath)
coord.request_stop()
# coord.join(enqueue_threads)
env.close()
| [
"ericliuof97@gmail.com"
] | ericliuof97@gmail.com |
8405cbf135eef4a8ea7864cace75bd9e40f91ebf | adbc667076cc5d35f2411239c1d717558df7f9a2 | /ecom/blog/migrations/0001_initial.py | 29cf764f4f6b1bfbc605d0254ade79b4f5b094be | [] | no_license | satyam2912/E-commerce-django | a455f9943c3e04de4b74a4101010fbccf4b10bc4 | 455d28ce6a1ec502bdccfc4c430c5f86a812127b | refs/heads/master | 2023-03-01T17:22:11.923843 | 2021-02-14T11:42:23 | 2021-02-14T11:42:23 | 326,479,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | # Generated by Django 2.2.12 on 2020-09-29 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blogpost',
fields=[
('post_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('head0', models.CharField(default='', max_length=500)),
('chead0', models.CharField(default='', max_length=5000)),
('head1', models.CharField(default='', max_length=500)),
('chead1', models.CharField(default='', max_length=5000)),
('head2', models.CharField(default='', max_length=500)),
('chead2', models.CharField(default='', max_length=5000)),
('publish_date', models.DateField()),
('thumbnail', models.ImageField(default='', upload_to='shop/images')),
],
),
]
| [
"pandeysatyam1996@gmail.com"
] | pandeysatyam1996@gmail.com |
8b564330a2dd2c16aace1926c913fcca01a19d48 | fd19e9dc496d7af22cbdd8cd8d185d4e99583c18 | /blog/urls.py | 15b95e2762e9645f4f966fa434d380810ee61a13 | [] | no_license | averlor/Blog_django | aa5e652e6970cf5aac6c8b155134685076bc9a25 | 53c324be0e2ca3b4fe5e4cc245fec361375d57de | refs/heads/master | 2020-04-08T07:39:36.313018 | 2018-11-26T15:39:34 | 2018-11-26T15:39:34 | 159,146,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>\d+)/edit/$',views.post_edit,name='post_edit'),
] | [
"vabramanc@gmail.com"
] | vabramanc@gmail.com |
35fd3b5566f1aaafda384b69fbcda78b39a3b231 | 334a0efacc8abaa26d92d4a06ef1ba8bb5b7b213 | /clockString2image.py | ab8a88dd1535e142436f310ecce5b87b73ee8cce | [] | no_license | gunman-vagabond/gunclock-tensorflow-flask | 84c39e2ca387f4b9bdf13b6ebf65c77af8f32563 | d6a0af8672d2c6583b385c54248364fd555119a2 | refs/heads/master | 2022-12-11T06:14:32.194994 | 2020-02-11T03:04:05 | 2020-02-11T03:04:05 | 236,270,374 | 0 | 0 | null | 2022-09-23T22:35:25 | 2020-01-26T05:18:08 | Python | UTF-8 | Python | false | false | 447 | py | import os,csv
import sys
import numpy as np
from PIL import Image,ImageDraw,ImageFont
def clockString2image(clockString):
text = clockString
text_split = text.split("\n")
clocksize = len(text_split) - 1
img = Image.new("RGB", (clocksize*12, clocksize*12), (255,255,255))
draw = ImageDraw.Draw(img)
for i in range(clocksize):
draw.text((0,i*12),text_split[i],(0,0,0))
img = img.resize((80,80))
return img
| [
"noreply@github.com"
] | gunman-vagabond.noreply@github.com |
8ef65c6fea59304140275f2c14c1aed2bd814595 | be26f8fa36ea4e52ef94555fc69a104e88d3fd16 | /models/Juan's Models/old scripts/gen_logits_and_embeddings.py | 2427ab44a4892dcff7b042188c28007e2df740d6 | [] | no_license | davidday99/twitter-data-collector | 599eeed1bf0fd995da28053d3547f3b95ddb2385 | bf7d203c380dc727d30f1e17b0297f1c011642b0 | refs/heads/master | 2022-06-02T09:41:26.940830 | 2020-05-06T19:20:47 | 2020-05-06T19:20:47 | 254,766,551 | 3 | 3 | null | 2020-04-27T08:35:19 | 2020-04-11T00:59:04 | Python | UTF-8 | Python | false | false | 13,350 | py | import random
import os
import pandas as pd
import numpy as np
from transformers import BertTokenizer, BertForSequenceClassification, BertModel, BertPreTrainedModel, BertConfig
from transformers import AdamW, BertConfig, get_linear_schedule_with_warmup
from keras.preprocessing.sequence import pad_sequences
import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, Subset, DataLoader, RandomSampler, SequentialSampler, TensorDataset
import io
from sklearn.model_selection import train_test_split
df_train = pd.read_csv('data/profile_data_train.csv')
df_test = pd.read_csv('data/profile_data_test.csv')
df_train = df_train.drop('Unnamed: 0', axis=1).reset_index(drop=True)
df_test = df_test.drop('Unnamed: 0', axis=1).reset_index(drop=True)
# Split train dataframe into 8 stratified chunks
X_train = df_train.drop('age_group', axis=1)
Y_train = df_train.age_group.values.tolist()
X_test = df_test.drop('age_group', axis=1)
Y_test = df_test.age_group.values.tolist()
# First split: 50-50
X_1, X_2, Y_1, Y_2 = train_test_split(X_train, Y_train, stratify=Y_train, test_size=0.5, random_state=42)
# Second split: 25-25-25-25
X_1, X_3, Y_1, Y_3 = train_test_split(X_1, Y_1, stratify=Y_1, test_size=0.5, random_state=42)
X_2, X_4, Y_2, Y_4 = train_test_split(X_2, Y_2, stratify=Y_2, test_size=0.5, random_state=42)
# Third split: 12.5 x 8
X_1, X_5, Y_1, Y_5 = train_test_split(X_1, Y_1, stratify=Y_1, test_size=0.5, random_state=42)
X_2, X_6, Y_2, Y_6 = train_test_split(X_2, Y_2, stratify=Y_2, test_size=0.5, random_state=42)
X_3, X_7, Y_3, Y_7 = train_test_split(X_3, Y_3, stratify=Y_3, test_size=0.5, random_state=42)
X_4, X_8, Y_4, Y_8 = train_test_split(X_4, Y_4, stratify=Y_4, test_size=0.5, random_state=42)
X_1 = X_1.reset_index(drop=True)
X_2 = X_2.reset_index(drop=True)
X_3 = X_3.reset_index(drop=True)
X_4 = X_4.reset_index(drop=True)
X_5 = X_5.reset_index(drop=True)
X_6 = X_6.reset_index(drop=True)
X_7 = X_7.reset_index(drop=True)
X_8 = X_8.reset_index(drop=True)
X_splits = [X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8]
Y_splits = [Y_1, Y_2, Y_3, Y_4, Y_5, Y_6, Y_7, Y_8]
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Compute accuracy given logits and true labels
def accuracy(logits, labels):
preds = np.argmax(logits, axis=1).flatten()
labels = labels.flatten()
return np.sum(preds == labels) / len(labels)
# Train a model over one epoch
def train_one_epoch(model, train_dataloader, optimizer, scheduler):
print("*************STARTING EPOCH*************")
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
total_loss = 0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Epoch
model.train()
for step, batch in enumerate(train_dataloader):
if step % 100 == 0:
print("Batch " + str(step) + ' of ' + str(len(train_dataloader)))
batch_input_ids = batch[0].to(device)
batch_input_masks = batch[1].to(device)
batch_labels = batch[2].to(device)
model.zero_grad()
outputs = model(batch_input_ids, token_type_ids=None, attention_mask=batch_input_masks, labels=batch_labels)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
print("Done training epoch. Average training loss: " + str(avg_train_loss))
return avg_train_loss
# Evaluate a model's accuracy and return predictions, true labels, embeddings
def eval_profiles(model, test_dataloader):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.eval()
predictions, true_labels, embedding_layers = [], [], []
eval_accuracy, eval_steps = 0, 0
for step, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
if step % 100 == 0:
print("batch " + str(step) + ' of ' + str(len(test_dataloader)))
batch_input_ids, batch_input_mask, batch_labels = batch
with torch.no_grad():
outputs = model(batch_input_ids, token_type_ids=None, attention_mask=batch_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
labels = batch_labels.to('cpu').numpy()
embed = outputs[1][-1].detach().cpu().numpy()
predictions.append(logits)
true_labels.append(labels)
embedding_layers.append(embed)
eval_accuracy += accuracy(logits, labels)
eval_steps += 1
print("Calculated accuracy on eval set: " + str(eval_accuracy / eval_steps))
return predictions, true_labels, embedding_layers
# Assign initial training logits
df_train = df_train.assign(Logit0=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit1=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit2=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit3=np.zeros(df_train.shape[0]))
epochs = 7
batch_size = 8
for holdout_idx in range(8):
print(
"************************************************************************************************************")
print("************************************GENERATING LOGITS FOR HOLDOUT " + str(
holdout_idx + 1) + " OF 8************************************")
print(
"************************************************************************************************************\n")
X_temp, Y_temp = [], []
X_holdout, Y_holdout = [], []
# Generate temporary X and Y for training and holdout
for idx in range(8):
if idx != holdout_idx:
X_temp = X_temp + X_splits[idx].tweets_text.values.tolist()
Y_temp = Y_temp + Y_splits[idx]
else:
X_holdout = X_splits[idx].tweets_text.values.tolist()
Y_holdout = Y_splits[idx]
# Tokenize and pad inputs
input_ids, holdout_input_ids = [], []
for tweet in X_temp:
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
input_ids.append(encoded)
input_ids = pad_sequences(input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
for tweet in X_holdout:
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
holdout_input_ids.append(encoded)
holdout_input_ids = pad_sequences(holdout_input_ids, maxlen=512, dtype='long', value=0, padding='post',
truncating='post')
# Attention masks to ignore padded tokens
attention_masks, holdout_attention_masks = [], []
for tweet in input_ids:
mask = [int(token_id > 0) for token_id in tweet]
attention_masks.append(mask)
for tweet in holdout_input_ids:
mask = [int(token_id > 0) for token_id in tweet]
holdout_attention_masks.append(mask)
# Prep torch data
train_inputs = torch.tensor(input_ids)
train_labels = torch.tensor(Y_temp)
train_masks = torch.tensor(attention_masks)
holdout_inputs = torch.tensor(holdout_input_ids)
holdout_labels = torch.tensor(Y_holdout)
holdout_masks = torch.tensor(holdout_attention_masks)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
holdout_data = TensorDataset(holdout_inputs, holdout_masks, holdout_labels)
holdout_dataloader = DataLoader(holdout_data, sampler=None, batch_size=batch_size)
# Load empty model
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4, output_attentions=False,
output_hidden_states=True)
model.cuda()
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# Train over epochs
losses = []
for epoch in range(epochs):
print("Epoch " + str(epoch + 1))
loss = train_one_epoch(model, train_dataloader, optimizer, scheduler)
losses.append(loss)
predictions, true_labels, embedding_layers = eval_profiles(model, holdout_dataloader)
# Assign logits and embeddings to dataframe
logits = [item for sublist in predictions for item in sublist]
logits = np.array(logits)
embeddings = np.array([item for sublist in embedding_layers for item in sublist]) # N x 512 x 768
embeddings = embeddings[:, 0, :] # Embedding of [CLS] token represents sentence, N x 768
X_splits[holdout_idx] = X_splits[holdout_idx].reset_index(drop=True)
for idx, row in X_splits[holdout_idx].iterrows():
handle = row['handle']
df_train.loc[df_train['handle'] == handle, 'Logit0'] = logits[idx][0]
df_train.loc[df_train['handle'] == handle, 'Logit1'] = logits[idx][1]
df_train.loc[df_train['handle'] == handle, 'Logit2'] = logits[idx][2]
df_train.loc[df_train['handle'] == handle, 'Logit3'] = logits[idx][3]
for i in range(768):
feat_name = 'embed' + str(i)
df_train.loc[df_train['handle'] == handle, feat_name] = embeddings[idx][i]
del model # Free cuda memory, prevent information leakage
# Save generated training features
df_train.to_csv('train.csv')
print("Successfully saved training features:")
print(df_train.head())
input_ids = []
for tweet in X_train.tweets_text.values.tolist():
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
input_ids.append(encoded)
input_ids = pad_sequences(input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
attention_masks = []
for tweet in input_ids:
mask = [int(token_id > 0) for token_id in tweet]
attention_masks.append(mask)
train_inputs = torch.tensor(input_ids)
train_labels = torch.tensor(Y_train)
train_masks = torch.tensor(attention_masks)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# Load empty model
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4, output_attentions=False,
output_hidden_states=True)
model.cuda()
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# Train over epochs
losses = []
for epoch in range(epochs):
print("Epoch " + str(epoch + 1))
loss = train_one_epoch(model, train_dataloader, optimizer, scheduler)
losses.append(loss)
test_input_ids = []
for tweet in X_test.tweets_text.values.tolist():
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
test_input_ids.append(encoded)
test_input_ids = pad_sequences(test_input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
test_attention_masks = []
for tweet in test_input_ids:
mask = [int(token_id > 0) for token_id in tweet]
test_attention_masks.append(mask)
test_inputs = torch.tensor(test_input_ids)
test_labels = torch.tensor(Y_test)
test_masks = torch.tensor(test_attention_masks)
test_data = TensorDataset(test_inputs, test_masks, test_labels)
test_dataloader = DataLoader(test_data, sampler=None, batch_size=batch_size)
# Make predictions for test sest and get logits
predictions, true_labels, embedding_layers = eval_profiles(model, test_dataloader)
logits = [item for sublist in predictions for item in sublist]
logits = np.array(logits)
embeddings = np.array([item for sublist in embedding_layers for item in sublist]) # N x 512 x 768
embeddings = embeddings[:, 0, :] # Embedding of [CLS] token represents sentence
# Assign test logits
df_test = df_test.assign(Logit0=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit1=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit2=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit3=np.zeros(df_test.shape[0]))
# Save logits to df
X_test = X_test.reset_index(drop=True)
for idx, row in X_test.iterrows():
handle = row['handle']
df_test.loc[df_test['handle'] == handle, 'Logit0'] = logits[idx][0]
df_test.loc[df_test['handle'] == handle, 'Logit1'] = logits[idx][1]
df_test.loc[df_test['handle'] == handle, 'Logit2'] = logits[idx][2]
df_test.loc[df_test['handle'] == handle, 'Logit3'] = logits[idx][3]
for i in range(768):
feat_name = 'embed' + str(i)
df_test.loc[df_test['handle'] == handle, feat_name] = embeddings[idx][i]
df_test.to_csv('test.csv')
print("Successfully saved training features:")
print(df_test.head())
# Save fine-tuned model
output_dir = './bert_finetuned/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
| [
"juanpaez@utexas.edu"
] | juanpaez@utexas.edu |
5771ab25a8f03b02ce6256a0612f9f2a7e8269a9 | 81a02bce72a7db755eb813cb9619c423bb06684d | /core/migrations/0004_auto_20190314_2308.py | e3e1c39da39e49c4d011f278cb2ddf2d92819f71 | [
"MIT"
] | permissive | tsnaf/semita | 5d6f88ea982b5296649cfe65e4689fad45ac1641 | 23cc7954c9bcdf5607592b8d3bb264f69098d5a8 | refs/heads/master | 2020-08-22T16:59:13.381126 | 2019-09-06T00:12:52 | 2019-09-06T00:12:52 | 216,441,977 | 1 | 0 | MIT | 2019-10-20T23:36:21 | 2019-10-20T23:36:21 | null | UTF-8 | Python | false | false | 1,706 | py | # Generated by Django 2.1.7 on 2019-03-14 23:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("core", "0003_grant_attachment")]
operations = [
migrations.CreateModel(
name="Dashboard",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("dash", models.CharField(max_length=50, null=True)),
],
),
migrations.AlterField(
model_name="contact",
name="organisation",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="contactorgslist",
to="core.Organisation",
),
),
migrations.AlterField(
model_name="grant",
name="fund",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="grantfundslist",
to="core.Fund",
),
),
migrations.AlterField(
model_name="grant",
name="organisation",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="grantorgslist",
to="core.Organisation",
),
),
]
| [
"hello@rae.li"
] | hello@rae.li |
7ce4e5c6f137596f23f8186d9b680431e5a52ceb | ad37bbf3b1b0d4d6ad6bf0c127bd92669a8f3029 | /node_modules/@web3-js/websocket/build/config.gypi | 83aac1135a412270945de7c7f9ac49c1d0349e62 | [
"Apache-2.0"
] | permissive | william-rittmeyer/private_ethereum_blockchain | 9b7bef440186430c0fd293220543abc41a63b4e7 | 079a21a3d9f41784934e5abc7d59024e9fcfd706 | refs/heads/master | 2022-05-23T22:25:26.886789 | 2020-05-02T00:33:33 | 2020-05-02T00:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,459 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"gas_version": "2.27",
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"llvm_version": 0,
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.64",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/sensorweb1/.cache/node-gyp/10.19.0",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.13.4 node/v10.19.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"noproxy": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/sensorweb1/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"format_package_lock": "true",
"prefix": "/usr",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/sensorweb1/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fund": "true",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"before": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "10.19.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/sensorweb1/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"wittspace77@gmail.com"
] | wittspace77@gmail.com |
fe11ab8369162212f8ea26d7d1324131d3d39039 | 7c285bc226eb1424a7b9dae154301e92af08e2ee | /.c9/metadata/environment/products/models.py | 2d3c42af567c10672fc988634cbb7a3ce5766a4a | [] | no_license | JShad30/ecommerce | f0755d06e2790a9456b3b90f6e8cd7bb9e3f5f51 | 1634618b00dee14400948d4d06321d02a999a5c4 | refs/heads/master | 2020-06-20T03:40:41.073735 | 2019-07-15T10:45:30 | 2019-07-15T10:45:30 | 196,979,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | {"filter":false,"title":"models.py","tooltip":"/products/models.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":10,"column":24},"end":{"row":10,"column":24},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":45,"mode":"ace/mode/python"}},"hash":"e0b274e4a9a212f98c452e16d3b65848db0d56f5","undoManager":{"mark":-1,"position":-1,"stack":[]},"timestamp":1563055655208} | [
"ubuntu@ip-172-31-80-73.ec2.internal"
] | ubuntu@ip-172-31-80-73.ec2.internal |
cb015eac44936cc54ab38c8fc85c628d7da24a26 | d99a84bfcd2709bb04da73979c3ec5ea7e68cef9 | /data/abilities/move.py | 98965503339949433cd4314a3fdf56969155d212 | [] | no_license | nikolr/PythonIsometricJRPG | 151591cac17682807a202f6d4c47e316451b5078 | d6c631fd06fb2b9a858cb9e36ac8e0ee0cee1334 | refs/heads/master | 2023-08-26T12:37:31.237974 | 2021-10-04T15:38:41 | 2021-10-04T15:38:41 | 408,345,232 | 0 | 0 | null | 2021-09-27T09:42:28 | 2021-09-20T07:16:03 | Python | UTF-8 | Python | false | false | 1,057 | py | from data.abilities.ability import Ability
class Move(Ability):
def __init__(self, name: str, potency: int, ap_cost: int, targeting_type, range: int, user=None):
super().__init__(name, potency, ap_cost, targeting_type, range, user=user)
self.description = "Move 1 square forward"
def activate(self):
if self.user.sprite.move_a_square() == True:
self.user.scene.current_character.action_points = self.user.scene.current_character.action_points - self.ap_cost
if self.user.scene.current_character.action_points > 0:
self.user.scene.state_machine.change_state(self.user.scene.turn_state)
else:
print("Next turn")
self.user.scene.current_character.action_points = self.user.scene.current_character.base_action_points
self.group_manager.determine_turn_queue()
self.user.scene.current_character = self.group_manager.get_next_character()
self.state_machine.change_state(self.user.scene.turn_state)
| [
"nikolai.rantimo@gmail.com"
] | nikolai.rantimo@gmail.com |
70c2e83deb862bcaf5c7853c8fc59b7e6fda372a | 2c4739888c53871524eb883f884470932c93542a | /PyPoll/Main/main.py | 14dffa7ea8bf1fea7e838735332869024f228e15 | [] | no_license | ncastal/python-challenge | c3171530437cd499f40f27cdb0f66b8a336ec62a | cc4eee24dbbf411c067252e4ad5b04672f9d77bc | refs/heads/master | 2020-09-16T14:27:59.102292 | 2019-12-06T03:14:58 | 2019-12-06T03:14:58 | 223,798,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,865 | py | import os #importing libraries
import csv
os.chdir(os.path.dirname(os.path.abspath(__file__))) #change working directory to one where the python file is located
election_csv = os.path.join("..","Resources","election_data.csv") #giving the location for election_data.csv
election_result = os.path.join("..","Output","election_result.txt") #setting location for election result text file
print("Election Results")
print("-------------------------")
with open(election_csv, newline="") as csvfile: #reading election_data.csv
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvfile) #skipping header
total_votes=0 #variable to count how many total votes
khan_votes=0 #variables to count how many votes for each canidate
correy_votes=0
li_votes=0
otooley_votes=0
for row in csvreader:
total_votes=total_votes+1 #counting total votes
if row[2]=="Khan": #If statements to count votes for each canidate
khan_votes=khan_votes+1
if row[2]=="Correy":
correy_votes=correy_votes+1
if row[2]=="Li":
li_votes=li_votes+1
if row[2]=="O'Tooley":
otooley_votes=otooley_votes+1
canidates={"Khan":khan_votes,"Correy":correy_votes,"Li":li_votes,"O'Tooley":otooley_votes} #creating a dictonary for the canidates and votes cast for them
percent_khan = round((khan_votes/total_votes)*100,2) #calculating the percentage of the votes each candiate recieved
percent_correy = round((correy_votes/total_votes)*100,2)
percent_li = round((li_votes/total_votes)*100,2)
percent_otooley=round((otooley_votes/total_votes)*100,2)
print(f"Total votes: {total_votes}") #printing the results
print("-------------------------")
print(f"Khan: {percent_khan}% ({khan_votes})")
print(f"Correy: {percent_correy}% ({correy_votes})")
print(f"Li: {percent_li}% ({li_votes})")
print(f"O'Tooley: {percent_otooley}% ({otooley_votes})")
print("-------------------------")
winner = max(canidates, key=canidates.get) #determines the winner of election by finding the canidate with the most votes
print(f"Winner: {winner}")
print("-------------------------")
with open(election_result, 'w') as writer: #wtiting the results to a text file
writer.writelines('Election Results\n')
writer.writelines(f"Total votes: {total_votes}\n")
writer.writelines("-------------------------\n")
writer.writelines(f"Khan: {percent_khan}% ({khan_votes})\n")
writer.writelines(f"Correy: {percent_correy}% ({correy_votes})\n")
writer.writelines(f"Li: {percent_li}% ({li_votes})\n")
writer.writelines(f"O'Tooley: {percent_otooley}% ({otooley_votes})\n")
writer.writelines("-------------------------\n")
writer.writelines(f"Winner: {winner}\n")
writer.writelines("-------------------------\n") | [
"nick.cast89@gmail.com"
] | nick.cast89@gmail.com |
327de0fb6195fa9d70bb2f59a1b649c60f9ad8da | 31900bdf5648061a3093230711c5394e20b90436 | /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/porn/cliphunter.py | f0ad44408f3a495045054598cafc29b1ceb97fb7 | [] | no_license | linuxbox10/enigma2-plugin-extensions-mediaportal | aa6f14ecfc42ce91e22c487070541459a1ab820c | e6b388918c186442718e7200e03c83d0db260831 | refs/heads/master | 2021-05-01T18:50:50.332850 | 2018-02-10T11:33:48 | 2018-02-10T11:33:48 | 121,009,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,746 | py | # -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2018
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
default_cover = "file://%s/cliphunter.png" % (config.mediaportal.iconcachepath.value + "logos")
class cliphunterGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
url = "http://www.cliphunter.com/categories/"
getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
Cats = re.findall(' <a href="(/categories/.*?)" title="(.*?)">.*?<img src="(.*?)"/>', data, re.S)
if Cats:
for (Url, Title, Image) in Cats:
Url = 'http://www.cliphunter.com%s/' % Url.replace(' ','%20')
if not Title == "All":
self.genreliste.append((Title, Url, Image))
self.genreliste.sort()
self.genreliste.insert(0, ("Pornstars", 'http://www.cliphunter.com/pornstars/top/overview/', default_cover))
self.genreliste.insert(0, ("Top Year", 'http://www.cliphunter.com/popular/ratings/year/', default_cover))
self.genreliste.insert(0, ("Top Month", 'http://www.cliphunter.com/popular/ratings/month/', default_cover))
self.genreliste.insert(0, ("Top Week", 'http://www.cliphunter.com/popular/ratings/week/', default_cover))
self.genreliste.insert(0, ("Top Yesterday", 'http://www.cliphunter.com/popular/ratings/yesterday/', default_cover))
self.genreliste.insert(0, ("Top Today", 'http://www.cliphunter.com/popular/ratings/today/', default_cover))
self.genreliste.insert(0, ("Hall of Fame", 'http://www.cliphunter.com/popular/ratings/all/', default_cover))
self.genreliste.insert(0, ("Newest", 'http://www.cliphunter.com/categories/All/', default_cover))
self.genreliste.insert(0, ("--- Search ---", "callSuchen", default_cover))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
if Name == "--- Search ---":
self.suchen()
elif Name == "Pornstars":
self.session.open(cliphunterPornstarScreen, Link, Name)
else:
self.session.open(cliphunterFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '%20')
Link = '%s' % (self.suchString)
Name = "--- Search ---"
self.session.open(cliphunterFilmScreen, Link, Name)
class cliphunterPornstarScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self.genreliste = []
url = "%s%s" % (self.Link, str(self.page))
getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
self.getLastPage(data, '', 'maxPages="(.*?)"')
Parse = re.search('photoGrid">(.*?)class="clearfix">', data, re.S)
Cats = re.findall('href="(.*?)">.*?src=\'(.*?)\'/>.*?<span>(.*?)</span>', Parse.group(1), re.S)
if Cats:
for (Url, Image, Title) in Cats:
Url = "http://www.cliphunter.com" + Url + "/movies/"
self.genreliste.append((Title.title(), Url, Image))
self.ml.setList(map(self._defaultlistleft, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.genreliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
Title = self['liste'].getCurrent()[0][0]
Image = self['liste'].getCurrent()[0][2]
self['name'].setText(Title)
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(cliphunterFilmScreen, Link, Name)
class cliphunterFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://www.cliphunter.com/search/%s/%s" % (self.Link, str(self.page))
else:
url = "%s%s" % (self.Link, str(self.page))
getPage(url, agent=agent).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, '', 'maxPages="(.*?)"')
Movies = re.findall('class="t"\shref="(/w/\d+/(.*?))".*?class="i"\ssrc="(.*?)".*?class="tr">(.*?)</div>.*?class="vttl.*?">(.*?)</a>', data, re.S)
if Movies:
for (Url, TitleUrl, Image, Runtime, Title) in Movies:
Url = "http://www.cliphunter.com" + Url
self.filmliste.append((TitleUrl.replace('_',' '), Url, Image, Runtime))
if len(self.filmliste) == 0:
self.filmliste.append((_('No videos found!'), '', None, ''))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
pic = self['liste'].getCurrent()[0][2]
runtime = self['liste'].getCurrent()[0][3]
self['handlung'].setText("Runtime: %s" % runtime)
self['name'].setText(title)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
self.keyLocked = True
getPage(Link, agent=agent).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
url = re.findall('"url":"(.*?)"}', data, re.S)
if url:
url = url[-1]
url = url.replace('\u0026', '.')
translation_table = {
'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
'y': 'l', 'z': 'i',
'$': ':', '&': '.', '(': '=', '^': '&', '=': '/',
}
url = ''.join(translation_table.get(c, c) for c in url)
self.keyLocked = False
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='cliphunter') | [
"jaysmith940@hotmail.co.uk"
] | jaysmith940@hotmail.co.uk |
ac05dc636b481b86f4960bbd377201c4bffdcfe9 | 569498d8a61dd6cdaa515165415d3a72c5dbf0c5 | /part 2/050.py | a014890e32dbfcda0d6f27e272044b4dc8cee371 | [] | no_license | baikzzi/python | 068e0b05ea13ab30a3798e97a25f9ea48084c36b | 112b4565d8bc09eb08b9c152eaf2dd6dd0ba8b35 | refs/heads/master | 2023-03-04T20:45:03.028018 | 2021-02-15T01:53:04 | 2021-02-15T01:53:04 | 338,942,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | class MyClass:
var = '안녕하세요'
def sayHello(self):
param1 = '안녕'
self.param2 = '하이'
print(param1) #'안녕'이 출력됨
print(self.var) #'안녕하세요'가 출력됨
obj = MyClass()
print(obj.var) #'안녕하세요'가 출력됨
obj.sayHello()
#obj.param1 | [
"qorwlghks120@gmail.com"
] | qorwlghks120@gmail.com |
afc1a666a4f46b1ed0c0e581248921cb5c0ed709 | 3a9ab7e571e992b7af50c23673cb9b31971a8868 | /python essentials/indexing and slicing/indexing.py | 45ac5d5eedadc0db9495a51a432e1fd02c3abf73 | [] | no_license | chenkeyu1997/python | 6e3f3997aa62d245699e1b9066c201a2b015c434 | 9a638242b06735cfa48ad074909842452b104651 | refs/heads/master | 2022-12-18T11:07:10.126932 | 2020-09-21T06:00:05 | 2020-09-21T06:00:05 | 296,557,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | s='hello world'
print(s[0])
"""负向索引,负几则表示倒数第几个元素"""
print(s[-2])
try:
print(s[123])
except IndexError:
print("string index out of range")
| [
"cky123"
] | cky123 |
a66199c42e47bad003812f72f14cdc6b3c5af967 | 54b52c70ebf3b3c17c72f7c5cc8219a060c1ffdf | /main/migrations/0001_initial.py | d78f2b40b82a4ca0837be9975207f8df6bde8129 | [] | no_license | arslan77/learnobot | e892d990736bd4a882ba6786cfbe00dc00acd326 | 7646cde351231a138b6e30529690f76b3989837e | refs/heads/master | 2020-03-29T10:40:46.554285 | 2018-09-24T03:53:48 | 2018-09-24T03:53:48 | 149,817,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | # Generated by Django 2.1.1 on 2018-09-20 23:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Course', '0004_auto_20180921_0417'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MyCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_completed', models.BooleanField(default=False)),
('percentage', models.IntegerField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.Course')),
('current_course_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.CourseWork')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MyQuiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('marks', models.IntegerField(blank=True, null=True)),
('course_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.CourseWork')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MyQuizDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('selected_option', models.CharField(blank=True, max_length=5, null=True)),
('right_option', models.CharField(blank=True, max_length=5, null=True)),
('is_right', models.BooleanField(blank=True, default=False, null=True)),
('myQuiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.MyQuiz')),
],
),
]
| [
"arslanarshad07@gmail.com"
] | arslanarshad07@gmail.com |
36303e57f19d8642e572535bcda07112eb1b0a31 | e1266f257c741395be3f9f3fe02c34a652c3612e | /scrapyP1/scrapyP1/settings.py | 3b8e8c21ffcf3c53286d11e9b94ccd902342b438 | [] | no_license | zhaocc1106/Web-spider | 94ffadcaadeb38fbdc1da8f5841a5a623565b236 | ed0de975c22c2d7235dfc668b99ea946ad99a6c5 | refs/heads/master | 2020-04-09T04:01:49.756553 | 2018-12-02T02:51:51 | 2018-12-02T02:51:51 | 160,007,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | # -*- coding: utf-8 -*-
# Scrapy settings for scrapyP1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapyP1'
SPIDER_MODULES = ['scrapyP1.spiders']
NEWSPIDER_MODULE = 'scrapyP1.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapyP1 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapyP1.middlewares.Scrapyp1SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapyP1.middlewares.Scrapyp1DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapyP1.pipelines.Scrapyp1Pipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"450959507@qq.com"
] | 450959507@qq.com |
a6ae38ab912151500b6230c13145389b39b83dc1 | 01677f99acef3c457b49804990321a7db10f35f7 | /LaneCV.py | c11a6e30953006fb4eb6b4b0e2e619d2383e0909 | [] | no_license | pranitdemiri/Cap | d56569b75b17bcba23d13e4177d4f457965a82cd | 5eb2111b9e0d17ab3ec85e939f8986309210972e | refs/heads/master | 2023-01-06T17:43:58.960119 | 2020-10-21T21:20:39 | 2020-10-21T21:20:39 | 306,150,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,321 | py | import cv2
import numpy as np
import logging
import math
_SHOW_IMAGE = False
class HandCodedLaneFollower(object):
def __init__(self, car=None):
logging.info('Creating a HandCodedLaneFollower...')
self.car = car
self.curr_steering_angle = 90
def follow_lane(self, frame):
# Main entry point of the lane follower
show_image("orig", frame)
lane_lines, frame = detect_lane(frame)
final_frame = self.steer(frame, lane_lines)
return final_frame
def steer(self, frame, lane_lines):
logging.debug('steering...')
if len(lane_lines) == 0:
logging.error('No lane lines detected, nothing to do.')
return frame
new_steering_angle = compute_steering_angle(frame, lane_lines)
self.curr_steering_angle = stabilize_steering_angle(self.curr_steering_angle, new_steering_angle,
len(lane_lines))
if self.car is not None:
self.car.front_wheels.turn(self.curr_steering_angle)
curr_heading_image = display_heading_line(frame, self.curr_steering_angle)
show_image("heading", curr_heading_image)
return curr_heading_image
############################
# Frame processing steps
############################
def detect_lane(frame):
logging.debug('detecting lane lines...')
edges = detect_edges(frame)
show_image('edges', edges)
cropped_edges = region_of_interest(edges)
show_image('edges cropped', cropped_edges)
line_segments = detect_line_segments(cropped_edges)
line_segment_image = display_lines(frame, line_segments)
show_image("line segments", line_segment_image)
lane_lines = average_slope_intercept(frame, line_segments)
lane_lines_image = display_lines(frame, lane_lines)
show_image("lane lines", lane_lines_image)
return lane_lines, lane_lines_image
#OPEN CV STARTS HERE
def detect_edges(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
lower_blue = np.array([30, 40, 0])
upper_blue = np.array([150, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
show_image("blue mask", mask)
# detect edges
edges = cv2.Canny(mask, 200, 400)
return edges
def detect_edges_old(frame):
# filter for blue lane lines
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
for i in range(16):
lower_blue = np.array([30, 16 * i, 0])
upper_blue = np.array([150, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
show_image("blue mask Sat=%s" % (16 * i), mask)
edges = cv2.Canny(mask, 200, 400)
return edges
#this is for ROI
def region_of_interest(canny):
height, width = canny.shape
mask = np.zeros_like(canny)
polygon = np.array([[
(0, height * 1 / 2),
(width, height * 1 / 2),
(width, height),
(0, height),
]], np.int32)
cv2.fillPoly(mask, polygon, 255)
show_image("mask", mask)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
def detect_line_segments(cropped_edges):
rho = 1 # precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # degree in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv2.HoughLinesP(cropped_edges, rho, angle, min_threshold, np.array([]), minLineLength=8,
maxLineGap=4)
if line_segments is not None:
for line_segment in line_segments:
logging.debug('detected line_segment:')
logging.debug("%s of length %s" % (line_segment, length_of_line_segment(line_segment[0])))
return line_segments
#This is for the left and right lane
def average_slope_intercept(frame, line_segments):
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
left_fit = []
right_fit = []
boundary = 1 / 3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)
continue
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_fit.append((slope, intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
if len(left_fit) > 0:
lane_lines.append(make_points(frame, left_fit_average))
right_fit_average = np.average(right_fit, axis=0)
if len(right_fit) > 0:
lane_lines.append(make_points(frame, right_fit_average))
logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]
return lane_lines
def compute_steering_angle(frame, lane_lines):
if len(lane_lines) == 0:
logging.info('No lane lines detected, do nothing')
return -90
height, width, _ = frame.shape
if len(lane_lines) == 1:
logging.debug('Only detected one lane line, just follow it. %s' % lane_lines[0])
x1, _, x2, _ = lane_lines[0][0]
x_offset = x2 - x1
else:
_, _, left_x2, _ = lane_lines[0][0]
_, _, right_x2, _ = lane_lines[1][0]
camera_mid_offset_percent = 0.02
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
logging.debug('new steering angle: %s' % steering_angle)
return steering_angle
def stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines,
max_angle_deviation_two_lines=5, max_angle_deviation_one_lane=1):
if num_of_lane_lines == 2:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
else:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
logging.info('Proposed angle: %s, stabilized angle: %s' % (new_steering_angle, stabilized_steering_angle))
return stabilized_steering_angle
############################
# Utility Functions
############################
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=10):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
def display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5, ):
heading_image = np.zeros_like(frame)
height, width, _ = frame.shape
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width)
heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)
return heading_image
def length_of_line_segment(line):
x1, y1, x2, y2 = line
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def show_image(title, frame, show=_SHOW_IMAGE):
if show:
cv2.imshow(title, frame)
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
############################
# Test Functions
############################
def test_photo(file):
land_follower = HandCodedLaneFollower()
frame = cv2.imread(file)
combo_image = land_follower.follow_lane(frame)
show_image('final', combo_image, True)
cv2.waitKey(0)
cv2.destroyAllWindows()
def test_video(video_file):
lane_follower = HandCodedLaneFollower()
cap = cv2.VideoCapture(video_file + '.avi')
# skip first second of video.
for i in range(3):
_, frame = cap.read()
video_type = cv2.VideoWriter_fourcc(*'XVID')
video_overlay = cv2.VideoWriter("%s_overlay.avi" % (video_file), video_type, 20.0, (320, 240))
try:
i = 0
while cap.isOpened():
_, frame = cap.read()
print('frame %s' % i)
combo_image = lane_follower.follow_lane(frame)
cv2.imwrite("%s_%03d_%03d.png" % (video_file, i, lane_follower.curr_steering_angle), frame)
cv2.imwrite("%s_overlay_%03d.png" % (video_file, i), combo_image)
video_overlay.write(combo_image)
cv2.imshow("Road with Lane line", combo_image)
i += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
cap.release()
video_overlay.release()
cv2.destroyAllWindows()
| [
"pranitpokhrel@gmail.com"
] | pranitpokhrel@gmail.com |
aa51cfc869fb53ffd850f220d3edca3c6f687c19 | bb9f832674635e264e1950d041c03acdec7bd83e | /Day 27/main.py | d2dc8b3910956424adeeeaec0b72a63f4f5c1ce5 | [] | no_license | guilhermeaugusto9/100daysofpython | 4e8030a2ae63b5d210b72844e2dccd5cfcf95ee3 | 8100ac0c80ae6af8fb4af49fe5973b0b3575ee0d | refs/heads/master | 2023-08-11T16:22:22.681346 | 2021-10-11T14:40:34 | 2021-10-11T14:40:34 | 415,956,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from tkinter import *
# Creating a new window and configurations
window = Tk()
window.title("Widget Examples")
window.minsize(width=500, height=500)
# Labels
label = Label(text="This is old text")
label.config(text="This is new text")
label.pack()
# Buttons
def action():
print("Do something")
# calls action() when pressed
button = Button(text="Click Me", command=action)
button.pack()
# Entries
entry = Entry(width=30)
# Add some text to begin with
entry.insert(END, string="Some text to begin with.")
# Gets text in entry
print(entry.get())
entry.pack()
# Text
text = Text(height=5, width=30)
# Puts cursor in textbox.
text.focus()
# Adds some text to begin with.
text.insert(END, "Example of multi-line text entry.")
# Get's current value in textbox at line 1, character 0
print(text.get("1.0", END))
text.pack()
# Spinbox
def spinbox_used():
# gets the current value in spinbox.
print(spinbox.get())
spinbox = Spinbox(from_=0, to=10, width=5, command=spinbox_used)
spinbox.pack()
# Scale
# Called with current scale value.
def scale_used(value):
print(value)
scale = Scale(from_=0, to=100, command=scale_used)
scale.pack()
# Checkbutton
def checkbutton_used():
# Prints 1 if On button checked, otherwise 0.
print(checked_state.get())
# variable to hold on to checked state, 0 is off, 1 is on.
checked_state = IntVar()
checkbutton = Checkbutton(
text="Is On?", variable=checked_state, command=checkbutton_used)
checked_state.get()
checkbutton.pack()
# Radiobutton
def radio_used():
print(radio_state.get())
# Variable to hold on to which radio button value is checked.
radio_state = IntVar()
radiobutton1 = Radiobutton(text="Option1", value=1,
variable=radio_state, command=radio_used)
radiobutton2 = Radiobutton(text="Option2", value=2,
variable=radio_state, command=radio_used)
radiobutton1.pack()
radiobutton2.pack()
# Listbox
def listbox_used(event):
# Gets current selection from listbox
print(listbox.get(listbox.curselection()))
listbox = Listbox(height=4)
fruits = ["Apple", "Pear", "Orange", "Banana"]
for item in fruits:
listbox.insert(fruits.index(item), item)
listbox.bind("<<ListboxSelect>>", listbox_used)
listbox.pack()
window.mainloop()
| [
"guilherme.augusto9@outlook.com"
] | guilherme.augusto9@outlook.com |
2a1904b74022c083eb628ee0359acd8f7fb3c450 | 4b93aa80436d4683d0254c4bd4b1e95c41e8c6ce | /ZenPacks/community/zenAppProfiler/ProfileSets.py | 5e8074b104b9d14da519239077c4f3a688f2f899 | [] | no_license | j053ph4/ZenPacks.community.zenAppProfiler | 984d5aa04766cc6209ee76a055ceab3373baa0bd | e41d030d67b5df8e41a40f180fa1f57656f20a03 | refs/heads/master | 2021-01-22T22:39:16.448979 | 2011-10-20T20:39:36 | 2011-10-20T20:39:36 | 2,608,336 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,993 | py | import re
import os
import string
import Globals
from Products.ZenModel.ZenPackable import ZenPackable
from ProfileData import ProfileData
class ProfileSets(ZenPackable):
''' Class containing logic for evaluating rule outcomes.
'''
def __init__(self,dmd):
self.dmd = dmd
self.data = ProfileData(self.dmd)
def evalSets(self,sets,matchAll):
""" compute union or intersection of a set of sets
matchAll of True == intersection, union otherwise
"""
resultSet = None
for i in range(len(sets)):
setI = sets[i]
if resultSet == None:
resultSet = setI
for j in range(i):
setJ = sets[j]
if matchAll == True:
matchSet = setI.intersection(setJ)
resultSet = resultSet.intersection(matchSet)
else:
matchSet = setI.union(setJ)
resultSet = resultSet.union(matchSet)
return resultSet
def evalRulesets(self,rulesets,matchAll):
""" evaluate multiple rulesets
"""
sets = []
for ruleset in rulesets:
rset = self.evalRuleset(ruleset)
sets.append(rset)
return self.evalSets(sets,matchAll)
def evalRuleset(self,ruleset,easy=False):
""" evaluate all rules in a ruleset, return set of matching devices
"""
sets = [] # array containing sets of rule-matched devices
if ruleset != None:
rules = ruleset.rules()
for rule in rules:
results = []
if rule.enabled == True:
if easy == True:
results = self.evalRuleSimple(rule)
else:
results = self.evalRule(rule)
#if len(results) > 0:
sets.append(results)
if len(sets) > 0:
return self.evalSets(sets,ruleset.matchAll)
else:
"returning sets"
return sets
def evalRuleSimple(self,rule):
""" faster testing assuming that matches are already built
"""
ruleMatches = []
ruleMatches += rule.getRulePotentialMatches()
ruleMatches += rule.getRuleCurrentMatches()
return set(ruleMatches)
def evalRule(self,rule):
""" evaluate a rule, return set of matching devices
"""
if rule.ruleKey == 'Ruleset':
ruleSet = self.dmd.Profiles.findRuleset(rule.ruleValue)
return self.evalRuleset(ruleSet,easy=True)
ruleMatches = set()
for device in self.dmd.Devices.getSubDevices():
if self.data.evalRuleOnDevice(rule,device) == True:
ruleMatches.add(device)
return ruleMatches
def evalRuleComponents(self,rule,devices,getAll=True):
""" evaluate a rule, return set of matching components
"""
components = []
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Ruleset' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
for device in devices:
components += self.data.evalRuleWithObjects(rule,device)
if rule.ruleKey == 'Ruleset':
rs = self.dmd.Profiles.findRuleset(rule.ruleValue)
if getAll == True:
components += self.getRulesetComponents(rs,devices)
else:
components += self.getRulesetFilteredComponents(rs,devices)
#print "found",len(components),"components on rule",rule.ruleKey,rule.ruleValue,"for",len(devices),"devices"
rule.ruleComponents = components
return components
def getRulesetComponents(self,ruleset,devices):
print "components on ruleset",ruleset.id,"for",len(devices),"devices"
components = []
for rule in ruleset.rules():
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
comps = self.evalRuleComponents(rule,devices)
components += comps
#print "found",len(components),"components"
return components
def getRulesetFilteredComponents(self,ruleset,devices):
#print "components on ruleset",ruleset.id,"for",len(devices),"devices"
componentsets = []
for rule in ruleset.rules():
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
comps = self.evalRuleComponents(rule,devices,False)
if len(comps) > 0:
componentsets.append(set(comps))
rulesetcomponents = self.evalSets(componentsets,ruleset.matchAll)
#print "set of rs components",len(rulesetcomponents)
if rulesetcomponents != None:
return rulesetcomponents
else:
return []
| [
"janderson@atxg.com"
] | janderson@atxg.com |
f71f2963e3eed53924dacf78596b44a5360a72d6 | ea7555f1a0ae52a3f97268add832ba1e45d23c8e | /plate_tracker.py | e0573c66651ed579341fc45b01fe37f2c9464008 | [] | no_license | alexjfreij/Projects | bae27c580b29e9d40aa2071ac79afbc35d97838f | 71ba51929e62ff2ae0d3de4910bb32ce500b372f | refs/heads/master | 2021-05-25T21:17:02.814498 | 2020-04-07T22:19:10 | 2020-04-07T22:19:10 | 253,923,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # Program to read a jpg files and convert it to one file
# with data that has a time stamp from the file and convert
# the content of the file into a character string of license plate
# and date and time to process
#
#
import os, datetime, time
from datetime import datetime
from os.path import getmtime
import argparse, csv, struct, array, sys, operator, binascii, keyword, os, string
import subprocess, re, codecs
from subprocess import Popen, PIPE
from shutil import copyfile
#entries = os.listdir('plates/')
#for entry in entries:
# print(entry)
# detect car coming in park
#
# Take a snapshot
#
file="arabic.jpg"
file="alex.jpg"
# get time and date
#******************
str1 = time.ctime(os.path.getctime(file)) # retrieve Date and Time
datetime_object = datetime.strptime(str1, '%a %b %d %H:%M:%S %Y')
# print (datetime_object)
line = datetime_object.strftime("%m/%d/%Y %H:%M:%S \n \n" ) # Date format change to 06/07/2013
#print (datetime_object.strftime)
#image_lp = open(file, 'rb')
#get license plates
#******************
command ="alpr.exe -c eu -n 1 " + file +" >> text.txt"
subprocess.call(command, shell=True)
#with open('text.txt', 'r') as f: # load file
# lines = f.read().splitlines() # read lines
# lines = lines[10:10]
#print (lines)
f = open('text.txt','r')
lines =f.read()
lines = lines[24:33]
f.close()
command ="del text.txt"
subprocess.call(command, shell=True)
fw = open("tracker.txt", "a")
fw.write(lines+line)
fw.close() | [
"noreply@github.com"
] | alexjfreij.noreply@github.com |
4296ad62182dde60033292fbec716ff5b8b146d6 | fe45c03ac8ee55122e89b4ee2e541cd196c43b93 | /Coffee/apps.py | a1e1644ad55807044d97515b49cbaabd4c882566 | [] | no_license | ssemtner/CheckYoSelf | 593a23b25d36ce7e5550822364b713ec3cfa7a51 | 36b497df3cd87af03b77476d393b56d3ba436edc | refs/heads/main | 2023-02-16T09:23:26.179680 | 2021-01-13T18:23:03 | 2021-01-13T18:23:03 | 312,876,268 | 1 | 1 | null | 2020-12-10T02:25:52 | 2020-11-14T18:22:38 | Python | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class CoffeeConfig(AppConfig):
name = 'Coffee'
| [
"sjsemtner@gmail.com"
] | sjsemtner@gmail.com |
eaafa5a0d6f534e474290d094f533956580c7495 | 689fcced10cc920c263e4d85bed5a51f85c76abb | /aragwas_server/aragwas/settings/dev.py | 8c4d30842e367920ea352a2e0638d1fd37694449 | [
"MIT"
] | permissive | 1001genomes/AraGWAS | ddb10ea3e476c8cee31e75f9db6dc2bd79f7f487 | be02c0480bf18228b07853740e63f249fe31d7e5 | refs/heads/master | 2022-12-29T00:12:59.936918 | 2020-08-31T16:32:04 | 2020-08-31T16:32:04 | 82,693,787 | 13 | 9 | MIT | 2022-12-06T20:20:08 | 2017-02-21T15:11:31 | Vue | UTF-8 | Python | false | false | 554 | py | """
Development settings using sqlite3 and DEBUG = TRUE
"""
import os
# Load defaults in order to then add/override with dev-only settings
from .defaults import *
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
DATACITE_REST_URL='https://mds.test.datacite.org/'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"uemit.seren@gmail.com"
] | uemit.seren@gmail.com |
b1bd3b08a695c7d52e2546ee8ac9436a091b47ac | 22a7e337eca6a244205b48e4d06cca67f2b5033a | /SimpleCode/PY_CookBook/chapter8/ceshi_yield.py | 06b50f2eafeb4ee2b61554155e44228699ac437a | [] | no_license | chen19901225/SimplePyCode | 48f373f66e486276ed6603b7d0d8e388bd236a6c | 8276c660e7663688d3d381391a77a50f90e61afa | refs/heads/master | 2021-01-01T19:34:57.697967 | 2014-11-12T10:05:14 | 2014-11-12T10:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py |
def echo(value=None):
print "Execution starts when 'next()' is called for the first time."
try:
while True:
try:
value=(yield value)
except Exception,e:
value=e
finally:
print "Don't forget to clean up when 'close()' is called."
geneator=echo(1)
print geneator.next()
print geneator.next()
print geneator.send(2)
| [
"guoning.leng@o2omobi.com"
] | guoning.leng@o2omobi.com |
91e05bb48354f85dc8b7543296aeacca210c3e50 | 2fcef29373541c8707b4047c444cfd567aef03a4 | /src/bttracker/_version.py | 805186d291b945c1733ebe1b3ee68f7d3081fdab | [] | no_license | manjuladangalla/BtTracker | a6c94e752abfca89cad6b47e246847aec7203b3b | 9641d7f6ac62c91986c1fd83879c4852bf208448 | refs/heads/master | 2022-12-03T15:39:31.276673 | 2020-08-07T03:35:02 | 2020-08-07T03:35:02 | 285,728,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,442 | py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "bttracker-"
cfg.versionfile_source = "src/bttracker/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "1.0",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "1.0", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "1.0", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| [
"noreply@github.com"
] | manjuladangalla.noreply@github.com |
a5b7936e3d25a8f5c8f9deb635771dc4d7de386f | 898f25bb22cd08c191b461934e27c2c6b5bcff42 | /finalcnn.py | 9e0d18cfce01691895ebb723d8b5e5fe2e8a0c03 | [] | no_license | alkadafare01/multiclasscnnmodel | ca4e042d91d3b59774b472d53c6f2622a32e1159 | 2cf12defd0d3c81dc28ee1c06380bf7d6969bdad | refs/heads/main | 2023-04-19T00:12:26.681393 | 2021-05-07T12:36:49 | 2021-05-07T12:36:49 | 322,548,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from keras.models import Sequential
from keras.layers import Conv2D, Dropout
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
#from tensorflow_core.python.client import session
import pathlib
session = 'simpleNASNet'
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu',kernel_initializer='he_uniform', padding='same'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.2))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu',kernel_initializer='he_uniform', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.2))
#Adding a third convolutional layer
classifier.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
classifier.add(MaxPooling2D((2, 2)))
classifier.add(Dropout(0.2))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu', kernel_initializer='he_uniform'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 3, activation = 'softmax'))
# Compiling the CNN
classifier.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('data/train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('data/test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
logfile = session + '-train' + '.log'
csv_logger = CSVLogger(logfile, append=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
best_model_filename=session+'-weights.{epoch:02d}-{val_loss:.2f}.h5'
best_model = ModelCheckpoint(best_model_filename, monitor='val_acc', verbose=1, save_best_only=True)
# this is the augmentation configuration we will use for training
##classifier.fit_generator(
# generator=training_set,
# epochs=10,
# verbose=1,
#validation_data=test_set,
#callbacks=[best_model, csv_logger, early_stopping])##
model = classifier.fit_generator(training_set,
steps_per_epoch = 1000,
epochs = 3,
validation_data = test_set,
validation_steps = 32)
classifier.save("model.h5")
print("Saved model to disk")
# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('data/val/person1_bacteria_1.jpeg', target_size = (64, 64))
#test_image = image.load_img('inputImage.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
print(result)
training_set.class_indices
if result[1][0][0] == 1:
prediction = 'Normal'
print(prediction)
elif result[0][1][0] == 1:
prediction = 'Pneumonia'
print(prediction)
elif result[0][0][1] == 1:
prediction = 'Covid'
print(prediction) | [
"pinki.dafare@gmail.com"
] | pinki.dafare@gmail.com |
6884574422f998a6b1d4e95b2c63df25a2b77db1 | ddc7c07a4273aab5289a44fde5230856404a771b | /practica_02/p02e04.py | 6a44df4ae2e37f90f2693033f98b3c0245ae6e05 | [] | no_license | juan81mg/python2020 | 58736dd993461ab4b2952ccf4438df6e87523f54 | e9f8d740569727d95c30ec306ada2774570dd04f | refs/heads/master | 2021-03-16T20:19:56.648054 | 2020-06-22T16:49:27 | 2020-06-22T16:49:27 | 246,938,731 | 0 | 0 | null | 2020-05-25T19:55:44 | 2020-03-12T22:04:13 | Python | UTF-8 | Python | false | false | 597 | py | from random import shuffle
preguntas = [['Buenos Aires limita con Santiago del Estero', 'no'], ['Jujuy limita con Bolivia', 'si'], ['San Juan limita con Misiones', 'no']]
puntaje = 0 #inicializo el puntaje
shuffle(preguntas) #desordeno la lista
print('+++++ Juego de Preguntas +++++\n')
for p in preguntas:
print('>>> pregunta <<<\n', p[0])
res = input('respuesta (si/no):')
if (res == p[1]): #evaluo la respuesta
print('--->>> respuesta correcta\n')
puntaje = puntaje + 1
else:
print('--->>> respuesta incorrecta\n')
print('su puntaje es >>>>>', puntaje) | [
"juan81mg@gmail.com"
] | juan81mg@gmail.com |
00ea6f281f439dcb4972df7de507c087190b305f | fd8bbeed2fe5de26cce3630bab9ba477b371b3aa | /csv_to_android_table_layout.py | 047c6e145a029d6498d844bbfd513cd120d8fff5 | [] | no_license | himanshugarg/scripts | b905820a1a1e6ddeb0870d183b9ceb8b98c666fe | 88140d9784b2fd236556f766fccd266f36b02da2 | refs/heads/master | 2023-09-02T17:20:17.220500 | 2021-11-21T02:33:32 | 2021-11-21T02:33:32 | 121,906,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | import csv
import sys
import os
import pdb
print """<?xml version="1.0" encoding="utf-8"?>
<TableLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:shrinkColumns="*"
android:stretchColumns="*">"""
#extract table id prefix from csv file name
table_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
with open(sys.argv[1], 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
rownum = 0
for row in csvreader:
print """
<TableRow
android:id="@+id/{0}_row_{1}"
android:layout_height="wrap_content"
android:layout_width="match_parent">""".format(table_name, rownum)
colnum = 0
for field in row:
if rownum == 0:
print """
<TextView
android:textStyle="bold"
android:id="@+id/{0}_row_{1}_col_{2}"
android:text="{3}" />""".format(table_name, rownum, colnum, field.strip())
else:
print """
<TextView
android:id="@+id/{0}_row_{1}_col_{2}"
android:text="{3}" />""".format(table_name, rownum, colnum, field.strip())
colnum = colnum+1
print """
</TableRow>"""
rownum = rownum+1
print """
</TableLayout>"""
| [
"noreply@github.com"
] | himanshugarg.noreply@github.com |
1954811c048f96bc8ce065432708a723ac852065 | 24d9ab3789aa0f7c3c6fffdf876315abb8b3f0ee | /app.py | ede3560a2c5a86c85ad5d62f44c4022c88dd421f | [] | no_license | winocas/project3 | cc876362988c4f418a9c4bc98a3527d667be51c6 | 8e55cffc815eaef38c586aebefc6aec17ecbb351 | refs/heads/master | 2023-03-29T05:29:54.804122 | 2021-03-30T12:28:05 | 2021-03-30T12:28:05 | 352,922,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # from flask import Flask, url_for, redirect
# app = Flask(__name__)
# @app.route('/')
# def home():
# return redirect(url_for('main'))
# @app.route('/service')
# def main():
# return '서비스'
# if __name__=='__main__':
# app.run(debug=True)
from flask import Flask, render_template, redirect, request, url_for
import corona_data
app = Flask(__name__)
@app.route('/')
def index():
data1 = corona_data.get_tot_coro()
data2 = corona_data.get_total_coro()
return render_template('index.html', data1=data1, data2=data2)
@app.route('/city')
def region():
data = corona_data.get_city_coro()
return render_template('region.html', data=data)
@app.route('/coro/')
@app.route('/coro/<city>')
def inputTest(city=None):
data = corona_data.get_city_coro()
return render_template('main.html', city=city, data=data)
@app.route('/cityinfo',methods=['POST'])
def calculate(city=None):
data = corona_data.get_city_coro()
if request.method == 'POST':
temp = request.form['city']
else:
temp = None
return redirect(url_for('inputTest',city=temp))
if __name__ == '__main__':
app.run() | [
"sec021122@gmail.com"
] | sec021122@gmail.com |
2dd168e0be879c0702c0c7d22fff6db2edb3e519 | 528add9808b43905fb2f71c94fdf20d374b69878 | /usersApp/models.py | 8445c225db2dc6d46af37df5e1a28df75e4342fc | [] | no_license | paulsmalze/singleModelOrm | 5695804a492e1be06650bc0598779df9288c5dac | 30080a33a2a44ba13d595a7af136f5d9fd8ff12e | refs/heads/main | 2023-04-25T12:20:55.718554 | 2021-05-08T15:43:25 | 2021-05-08T15:43:25 | 365,535,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email_address = models.EmailField(unique=True)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"waffwegs@yahoo.com"
] | waffwegs@yahoo.com |
5e7d45027e713ff5923387f455b14bddd9a31ef3 | 2ae53bf6063c0bb5c227e17c049c0c5963861d7f | /setup.py | 73d9782ed28756e09d357999e1812ddbc45eda21 | [] | no_license | PsychoDramatic/openshift_django | 207e5757e3479ce1baaa6196a4c9175afd089e3a | 0d76a07d3b9559429f3cd8031114949e3f141c62 | refs/heads/master | 2021-01-16T23:11:25.913630 | 2014-03-01T15:33:54 | 2014-03-01T15:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from setuptools import setup
setup(name='MyDjangoApp',
version='1.0',
description='Measuredly website',
author='Yourname',
author_email='your_email@somewhere.com',
url='http://www.python.org/sigs/distutils-sig/',
install_requires=['django==1.6','django-crispy-forms'],
)
| [
"steve@mocarski.com"
] | steve@mocarski.com |
452d72297d0a3b0666a56a4a72ddda202448eb36 | c2c0f1f565a285146a30dcab99e08e8353e03e54 | /geocoding_cache/backup.py | a74de831b589d1e9793dc0a64c2e9ade43388c34 | [] | no_license | adamhammes/geocoding-cache | f522f9be6078ae32a62cf9dd427b94489662285e | 110d6607af73d3ed064e06f432e6da33af2afb80 | refs/heads/master | 2023-06-14T04:42:25.109278 | 2021-07-07T17:47:20 | 2021-07-07T17:47:20 | 274,026,001 | 0 | 0 | null | 2021-03-20T04:25:04 | 2020-06-22T02:55:40 | Python | UTF-8 | Python | false | false | 657 | py | import datetime
import sqlite3
import tempfile
import boto3
def backup_db(source: sqlite3.Connection):
print("Backing up the database")
s3_client = boto3.client("s3")
day_of_month = datetime.datetime.today().day
object_name = f"geocoding_cache/{day_of_month:02}.sqlite3"
with tempfile.NamedTemporaryFile() as destination_file:
dest = sqlite3.connect(destination_file.name)
print("Making a copy of the database...")
source.backup(dest)
dest.close()
print("Uploading to s3...")
s3_client.upload_file(destination_file.name, "kijiji-apartments", object_name)
print("...done")
| [
"ahammes@cortexmedia.ca"
] | ahammes@cortexmedia.ca |
575b513edcb70131289f04f5a2a9725d843d874e | 0875d9c4ed9bec3794aee6435eb49243ab84ac05 | /Machine Learning/IntroduccionML/Algoritmos/IntroVectorSR.py | 9716ba998a6b438171a84f7cd323db2249efb065 | [] | no_license | AngelSosaGonzalez/IntroduccionMachineLearning | b49ba29a717bd2c3bbd6c93615d3ef3bc5e1561d | d9e13efe5703e6a6375a971c87fd000ba98024c7 | refs/heads/main | 2023-03-05T14:08:23.066293 | 2021-02-20T04:31:34 | 2021-02-20T04:31:34 | 327,723,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,221 | py | """ Vectore de soporte de regresion: En este proyecto veremos en la marcha el concepto de como se puede aplicar este algoritmo de ML,
en este proyecto aplicaremos los conociemiento basicos aprendidos en ML, de los antiguos proyecto que estan en este recopilatorio,
te recomiendo revisar los proyecto que hablan sobre regrecion (Lineal, Multiple, Polinomial), antes de comenzar quiero aclarar que este proyecto se basa (o copia mas bien) del curso de
Machine Learning del canal de: AprendeIA con Ligdi Gonzalez, fuente: https://www.youtube.com/watch?v=zvB0cshd0TM&list=PLJjOveEiVE4Dk48EI7I-67PEleEC5nxc3&index=24 """
#Importaremos los modulos necesarios para el proyecto
#Importamos Numpy para los arreglos
import numpy
#Importamos el modulo de matplotlib para graficar
import matplotlib.pyplot as plt
#Ya para el DataSet (casas de boston) importaremos el modulo correspondiente
from sklearn import datasets
#Importamos el modulo que nos ayuda a separar los datos de prueba a los de entrenamiento
from sklearn.model_selection import train_test_split
#Importamos el algoritmo a seleccionar (en este caso es VSR o SVR)
from sklearn.svm import SVR
#Importamos nuestro DataSet en una variable (Esto para poder manipular los datos, ya lo hemos hecho en antiguos proyectos)
BostonDatos = datasets.load_boston()
#NOTA: Puede imprimir el DataSet para verificar si los datos son los correctos
#Al igual que el proyecto de "IntroRegresionPoli.py" obtendremos los datos que queremos
#Ahora vamos a seleccionar los datos necesarios para esto usaremos la cantidad de habitaciones
X_VR = BostonDatos.data[:, numpy.newaxis, 5]
#Obtendremos las etiquetas de los datos
Y_VR = BostonDatos.target
#Graficamos los datos que obtuvimos de data y target, para esto usamos matplotlib
plt.scatter(X_VR, Y_VR) #Recuerda que scatter son para graficas de dispercion
plt.show()
#Separamos los datos en entrenamiento y prueba
X_Entrena, X_Prueba, Y_Entrena, Y_Prueba = train_test_split(X_VR, Y_VR, test_size = 0.2) #Recuerda que test_size, es el tamaño de la muestra que obtendremos del DataSet
""" Invocamos a nuestro algoritmo
Atributos de la funcion de nuestro algoritmo:
- Kernel: Especificamos el tipo de datos a utilizar en nuestro algoritmo, como vimos en la grafica de dispercion nuestros datos
son de tipo lineal, por lo que tenemos que especificarle a nuestro algoritmo que tipo de datos usamos
- C: Parámetro de regularización. La fuerza de la regularización es inversamente proporcional a C.
Debe ser estrictamente positiva. La penalización es una penalización l2 al cuadrado.
- epsilon: Epsilon en el modelo epsilon-SVR. Especifica el tubo de épsilon dentro del cual no se
asocia ninguna penalización en la función de pérdida de entrenamiento con puntos predichos dentro de una distancia epsilon desde el valor real.
Todo esto lo puedes leer en la documentacion de la funcion: https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html """
AlgoSVR = SVR(kernel='linear', C=1.0, epsilon=0.2)
#Ya teniendo el algoritmo ya creado con sus atributos vamos a entrenarlo
AlgoSVR.fit(X_Entrena, Y_Entrena)
#Vemos el score que nos arroja nuestro algoritmo
print(AlgoSVR.score(X_Entrena, Y_Entrena)) #Usaremos lo datos de entrenamiento
print(AlgoSVR.score(X_Prueba, Y_Prueba)) #Ahora con los datos de prueba
#Vemos nos dio un resultamo muy bajo vamos a graficar para ver que cantidad de datos toma de muestra nuestro algoritmo
#Primero realizamos una prediccion
Y_Prediccion = AlgoSVR.predict(X_Prueba)
#Vamos a graficar, para este caso graficaremos igual que el algoritmo de regrecion polinomial
plt.scatter(X_Prueba, Y_Prueba)
plt.plot(X_Prueba, Y_Prediccion, color = 'red', linewidth = 2) #Recuerda que plot sirve para la graficas de linea (Para este caso, nos mostrara los datos que recolecta nuestro algoritmo)
plt.show()
""" Ahora gracias a la grafica veremos que solamente se dibuja una linea esto porque en la creacion del algoritmo en el atributo del Kernel
seleccionamos lineal, por lo tanto solo se dibujara una linea y los datos que agarra la linea son lo que usaremos para la prediccion """
#Perooo... te preguntaras como podemos mejorar el algoritmo, cambiando parametros es la mas acertada, pero vamos a experimentar solamente usando el algoritmo sin modificar parametros
#Primero eliminamos la variable de nuestro algortimo (Esto para no cargar mucho el sistema)
del AlgoSVR
#Ahora lo volvemos a crear (Invocamos el algoritmo)
AlgoSVR = SVR()
#Entrenamos nuestro algortimo
AlgoSVR.fit(X_Entrena, Y_Entrena)
#Vemos el score que nos arroja nuestro algoritmo
print(AlgoSVR.score(X_Entrena, Y_Entrena)) #Usaremos lo datos de entrenamiento
print(AlgoSVR.score(X_Prueba, Y_Prueba)) #Ahora con los datos de prueba
#Realzamos una prediccion
Y_Prediccion = AlgoSVR.predict(X_Prueba)
#Graficamos para ver como esta nuestro algoritmo
plt.scatter(X_Prueba, Y_Prueba)
plt.plot(X_Prueba, Y_Prediccion, color = 'red', linewidth = 2)
plt.show()
""" Comparando los resultados veremos que el algorito aumento el score, no mucho, como digo podemos mejorarlo cambiando parametros,
pero como usamos un kernel lineal una linea no agarra todos los datos necesarios para tener una buena prediccion """ | [
"angelsosagonz@gmail.com"
] | angelsosagonz@gmail.com |
12e05ceaac7c5c4174fb21ada9bdbb1e70c90c54 | ffb05b145989e01da075e2a607fb291955251f46 | /pypers/oxford/non_cooperative.py | 6c7b293967ae50f89ebf7f90ccccdc8e62ba6d40 | [] | no_license | micheles/papers | a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7 | be9070f8b7e8192b84a102444b1238266bdc55a0 | refs/heads/master | 2023-06-07T16:46:46.306040 | 2018-07-14T04:17:51 | 2018-07-14T04:17:51 | 32,264,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # non_cooperative.py
class B1(object):
def __init__(self, **kw):
print "B1.__init__"
super(B1, self).__init__(**kw)
class B2(object):
def __init__(self, **kw):
print "B2.__init__"
super(B2, self).__init__(**kw)
| [
"michele.simionato@gmail.com"
] | michele.simionato@gmail.com |
94f2093636ae67fdc8ec2d5431c2b52cbd51d7c2 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=25/params.py | 1e06a0ee411f4cd8e4e96c1df8f010d7336d6730 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.041500',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 25,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.