blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7571fd6f80cb52e31b43fa0fa9746d3faafb0c1
|
de817cc84baa1ca5cef3ceaff56dc235b00073d9
|
/dokdo.py
|
dfbc8aad66a041c9a7e72135de16c2b1fb75b035
|
[
"MIT"
] |
permissive
|
song9446/Dokdo-HTML-template-compiler-python3
|
32023dd38f57b091a6d4a8288e07ddb8663c892e
|
2d26aa7d84c0c7606ae5140126691d6f1a6e930e
|
refs/heads/master
| 2020-04-09T01:31:23.923621
| 2018-12-01T06:36:39
| 2018-12-01T06:36:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,558
|
py
|
#!/usr/bin/python3
from string import Template
import lxml.html
from lxml import etree
import copy
import re
import os
import sass
VERSION = "0.1"
variable_pattern = re.compile("\{\{\{([^}]+)\}\}\}")
def dom2str(element):
return lxml.html.tostring(element, encoding=str)
def dom2innerstr(element):
text = lxml.html.tostring(element, encoding=str)
return text[text.find(">")+1:text.rfind("<")]
def replace(text, rule, replacer):
matches = [(match.start(), match.end(), match.groups()[0].strip()) for match in re.finditer(rule, text)]
matches.reverse()
characters = list(text)
for start, end, variable in matches:
characters[start:end] = replacer(variable)
return "".join(characters)
def compile(path, variables={}, innerhtmls=[], isroot=True, statics={}):
# 1. build tree
with open(path) as f:
text = f.read()
# 1.1. replace variable
replace(text, variable_pattern, lambda x: variables[x])
if text.strip().startswith("<!DOCTYPE") or text.strip().startswith("<html"):
roots = (lxml.html.fromstring(text),)
else:
roots = lxml.html.fragments_fromstring(text)
# 2. substract styles & statics
styles = [root for root in roots if root.tag == "style"] + \
[style.drop_tree() or style for root in roots for style in root.xpath(".//style")]
for style in styles:
if style.get("type") is "text/scss": style.text = sass.compile(string=style.text)
poststatics = [root for root in roots if root.tag == "static" and "post" in root.attrib] + \
[static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "post" in static.attrib]
prestatics = [root for root in roots if root.tag == "static" and "pre" in root.attrib] + \
[static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "pre" in static.attrib]
roots = list(filter(lambda x: x.tag not in ("style", "static"), roots))
if path not in statics: statics[path] = (styles, poststatics, prestatics)
# 3. replace imports
for imp in (imp for root in roots for imp in root.xpath("//import")):
ipath = os.path.join(os.path.dirname(path), imp.get("path"))
importing_roots = compile(ipath, variables=imp.attrib, innerhtmls=imp, isroot=False, statics=statics)
if len(importing_roots) == 1:
importing_roots[0].attrib.update(imp.attrib)
if imp in roots:
imp_index = roots.index(imp)
roots = list(filter(lambda x: x!=imp, roots))
for i, root in enumerate(importing_roots):
roots.insert(imp_index + i, root)
else:
imp_parent = imp.getparent()
imp_index = imp_parent.index(imp)
imp.drop_tree()
for i, root in enumerate(importing_roots):
imp_parent.insert(imp_index + i, root)
# 4. replace innerhtmls
innerhtml_map = {innerhtml.get("id", i):innerhtml for i, innerhtml in enumerate(innerhtmls)}
target_innerhtmls = [innerhtml for root in roots for innerhtml in root.xpath(".//innerhtml")]
for i, target_innerhtml in enumerate(target_innerhtmls):
id_ = target_innerhtml.get("id", i)
if id_ in innerhtml_map:
innerhtml_map[id_].attrib.update(target_innerhtml.attrib)
target_innerhtml.getparent().replace(target_innerhtml, innerhtml_map[id_])
else: target_innerhtml.drop_tree()
# 5. if this is a root: put statics and return string
if isroot:
head = roots[0].xpath("//head")[0]
body = roots[0].xpath("//body")[0]
etree.SubElement(head, "style").text = "".join((sass.compile(string=dom2innerstr(style)) if style.get("type", "text/css") == "text/scss" else dom2innerstr(style)) \
for i in statics for style in statics[i][0])
for i in statics:
for poststatic in statics[i][1]: body.append(poststatic)
for prestatic in statics[i][2]: head.append(prestatic)
return "".join(dom2str(root) for root in roots)
else: return roots
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog {}".format(VERSION))
parser.add_option("-c", "--src", dest="source",
help="source html path", metavar="SRC")
parser.add_option("-o", "--out",
action="store_false", dest="out", default="a.html",
help="destination of output", metavar="OUT")
parser.add_option("-C", "--srcdir", dest="sourcedir",
help="source dir path(it filters html files automatically)", default="src", metavar="SRCDIR")
parser.add_option("-O", "--outdir", dest="outdir", default="build",
help="out dir path", metavar="OUTDIR")
(option, tags) = parser.parse_args()
if tags:
print(compile(tags[0]))
else:
if option.source:
with open(option.out, "w") as f:
f.write(compile(tags[0]))
elif option.sourcedir:
compilables = [os.path.join(d, f) for (d, _, fs) in os.walk(option.sourcedir) for f in fs if f.endswith(".html")]
if not os.path.exists(option.outdir):
os.makedirs(option.outdir)
for source in compilables:
with open(os.path.join(option.outdir, os.path.basename(source)), "w") as f:
f.write(compile(source))
|
[
"song9446@unist.ac.kr"
] |
song9446@unist.ac.kr
|
acaaff5ac222121f65916b2c51dba801a44b99f3
|
37496577a9fa05bf949bd018fca17f0b6d546ecd
|
/client/pdo/client/scripts/AuctionTestCLI.py
|
4a1e9c064ad1516c800d154a615e56b89dbcc513
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"Zlib",
"MIT",
"CC-BY-4.0"
] |
permissive
|
EugeneYYY/private-data-objects
|
cce9250648252f4baf92e0007c9584ac82d46401
|
d96033bbfa9bd3fe72a549487e8e5c83c7c580ca
|
refs/heads/master
| 2020-03-15T07:11:36.278038
| 2018-05-01T21:04:26
| 2018-05-01T22:40:35
| 132,023,932
| 0
| 0
| null | 2018-05-03T16:45:45
| 2018-05-03T16:45:44
| null |
UTF-8
|
Python
| false
| false
| 20,659
|
py
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import argparse
import random
from string import Template
import logging
logger = logging.getLogger(__name__)
import pprint
pp = pprint.PrettyPrinter(indent=4)
import pdo.common.crypto as pcrypto
from pdo.client.SchemeExpression import SchemeExpression
from pdo.common.keys import ServiceKeys
from pdo.contract import ContractCode
from pdo.contract import ContractState
from pdo.contract import Contract
from pdo.contract import register_contract
from pdo.contract import add_enclave_to_contract
from pdo.service_client.enclave import EnclaveServiceClient
from pdo.service_client.provisioning import ProvisioningServiceClient
enclave_services_by_url = {}
enclave_services = {}
participant_keys = {}
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def GetEnclaveServiceByURL(url) :
global enclave_services_by_url, enclave_service
if url not in enclave_services_by_url :
eservice = EnclaveServiceClient(url)
enclave_services_by_url[url] = eservice
enclave_services[eservice.enclave_id] = eservice
return enclave_services_by_url[url]
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def GetKeysForIdentity(config, identity) :
key_config = config['Key']
global participant_keys
if identity not in participant_keys :
#keypath = key_config['SearchPath']
#keyfile = Template(key_config['KeyFileTemplate']).substitute({'identity' : identity })
#participant_keys[identity] = ServiceKeys.read_from_file(keyfile, keypath)
participant_keys[identity] = ServiceKeys.create_service_keys()
return participant_keys[identity]
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def SendMessageAsIdentity(config, contract, invoker_keys, message, fmt = 'python', wait=False) :
ledger_config = config.get('Sawtooth')
contract_config = config.get('Contract')
try :
logger.info('send message %s to contract %s', message, contract.contract_code.name)
enclave_id = random.choice(contract.provisioned_enclaves)
enclave_service = enclave_services[enclave_id]
request = contract.create_update_request(invoker_keys, enclave_service, message)
response = request.evaluate()
logger.debug('result: %s, ', response.result)
except Exception as e :
logger.error('method invocation failed for message %s: %s', message, str(e))
sys.exit(-1)
try :
if wait :
response.submit_update_transaction(ledger_config, wait=30)
else :
response.submit_update_transaction(ledger_config)
contract.set_state(response.encrypted_state)
data_dir = contract_config['DataDirectory']
contract.contract_state.save_to_cache(data_dir=data_dir)
except Exception as e:
logger.error('transaction submission failed for message %s; %s', message, str(e))
sys.exit(-1)
expression = SchemeExpression.ParseExpression(response.result)
if fmt == 'scheme' :
return expression
elif fmt == 'python' :
return expression.value
else :
raise ValueError('unknown format {}'.format(fmt))
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def CreateAndRegisterContract(config, contract_info, creator_keys) :
ledger_config = config.get('Sawtooth')
contract_config = config.get('Contract')
contract_creator_id = creator_keys.identity
contract_name = contract_info['Name']
source_file = contract_info['Source']
search_path = contract_config['SourceSearchPath']
contract_code = ContractCode.create_from_scheme_file(contract_name, source_file, search_path = search_path)
# --------------------------------------------------
logger.info('register the contract')
# --------------------------------------------------
pservice_urls = contract_info.get("ProvisioningServices")
provisioning_services = list(map(lambda url : ProvisioningServiceClient(url), pservice_urls))
provisioning_service_keys = list(map(lambda svc : svc.identity, provisioning_services))
contract_id = register_contract(ledger_config, creator_keys, contract_code, provisioning_service_keys)
logger.info('registered the contract as %s', contract_id)
contract_state = ContractState.create_new_state(contract_id)
contract = Contract(contract_code, contract_state, contract_id, contract_creator_id)
# --------------------------------------------------
logger.info('provision enclaves')
# --------------------------------------------------
eservice_urls = contract_info.get("EnclaveServices")
enclave_services = list(map(lambda url : GetEnclaveServiceByURL(url), eservice_urls))
for eservice in enclave_services :
secret_list = []
for pservice in provisioning_services :
message = pcrypto.string_to_byte_array(eservice.enclave_id + contract_id)
signature = creator_keys.sign(message)
secret = pservice.get_secret(eservice.enclave_id, contract_id, creator_keys.verifying_key, signature)
secret_list.append(secret)
secretinfo = eservice.verify_secrets(contract_id, contract_creator_id, secret_list)
encrypted_state_encryption_key = secretinfo['encrypted_state_encryption_key']
signature = secretinfo['signature']
txnid = add_enclave_to_contract(
ledger_config,
creator_keys,
contract_id,
eservice.enclave_id,
secret_list,
encrypted_state_encryption_key,
signature)
contract.set_state_encryption_key(eservice.enclave_id, encrypted_state_encryption_key)
# --------------------------------------------------
logger.info('create the initial contract state')
# --------------------------------------------------
eservice = random.choice(enclave_services)
initialize_request = contract.create_initialize_request(creator_keys, eservice)
initialize_response = initialize_request.evaluate()
contract.set_state(initialize_response.encrypted_state)
logger.info('initial state created')
# --------------------------------------------------
logger.info('save the initial state in the ledger')
# --------------------------------------------------
txnid = initialize_response.submit_initialize_transaction(ledger_config, wait=30)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateAssetContract(config) :
asset_config = config['AssetContract']
contract_config = config['Contract']
asset_creator_identity = asset_config['Creator']
asset_creator_keys = GetKeysForIdentity(config, asset_creator_identity)
contract = CreateAndRegisterContract(config, asset_config, asset_creator_keys)
data_dir = contract_config['DataDirectory']
contract.save_to_file(asset_config['Name'], data_dir = data_dir)
contract.contract_state.save_to_cache(data_dir = data_dir)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateAuctionContract(config) :
auction_config = config['AuctionContract']
contract_config = config['Contract']
auction_creator_identity = auction_config['Creator']
auction_creator_keys = GetKeysForIdentity(config, auction_creator_identity)
contract = CreateAndRegisterContract(config, auction_config, auction_creator_keys)
data_dir = contract_config['DataDirectory']
contract.save_to_file(auction_config['Name'], data_dir = data_dir)
contract.contract_state.save_to_cache(data_dir = data_dir)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateRandomAsset(config, asset_contract, invoker_keys, assetname, value = None) :
params = {}
params['asset'] = "asset_" + assetname
params['value'] = random.randint(0, 100) if value is None else value
message = Template("'(create \"${asset}\" ${value})").substitute(params)
logger.info('create asset %s with value %s', params['asset'], params['value'])
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
if result is None :
raise Exception('failed to create random asset')
return params['asset']
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def EscrowAsset(config, asset_contract, invoker_keys, asset, pubkey) :
## ( ((key "auction") (value 5) (owner "<ownerid>")) "<signature>" )
# first pass... escrow the asset and push the transaction
message = "'(escrow \"{0}\" \"{1}\")".format(asset, pubkey)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
# get the escrow attestation for handoff to the auction
message = "'(escrow-attestation \"{0}\")".format(asset)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message, fmt='scheme')
return (str(result.nth(0)), str(result.nth(1)), str(result.nth(2)))
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CancelBid(config, auction_contract, asset_contract, invoker_keys) :
try :
message = "'(cancel-bid)"
result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message)
message = "'(cancel-attestation)"
result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message, fmt='scheme')
## should be: (((key "offered") (value X) (owner "<ownerid")) (dependencies) "<signature>")
assetkey = dict(result.nth(0).value)['key']
dependencies = str(result.nth(1))
signature = str(result.nth(2))
message = "'(disburse \"{0}\" {1} {2})".format(assetkey, dependencies, signature)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
except :
pass
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def LocalMain(config) :
asset_config = config['AssetContract']
auction_config = config['AuctionContract']
user_config = config['Participants']
auction_keys = GetKeysForIdentity(config, auction_config['Creator'])
asset_keys = GetKeysForIdentity(config, asset_config['Creator'])
# create the asset contract
asset_contract = CreateAssetContract(config)
asset_contract_pubkey = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-public-signing-key)", fmt='python')
# ---------- create the asset to use for the auction, minimum bid is 10 ----------
auction_asset = CreateRandomAsset(config, asset_contract, auction_keys, 'auction', value = 10)
# ---------- create the assets for each of the identities ----------
assetmap = {}
for identity in user_config['Asset'] :
user_keys = GetKeysForIdentity(config, identity)
assetmap[identity] = CreateRandomAsset(config, asset_contract, user_keys, identity)
# ---------- create and initialize the auction contract ----------
auction_contract = CreateAuctionContract(config)
auction_contract_pubkey = SendMessageAsIdentity(config, auction_contract, auction_keys, "'(get-public-signing-key)", fmt='python')
message = "'(initialize \"{0}\")".format(asset_contract_pubkey)
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, wait=True)
# ---------- escrow the auction asset and prime the auction----------
(ecounter, edependencies, esignature) = EscrowAsset(
config, asset_contract, auction_keys, auction_asset, str(auction_contract_pubkey))
message = "'(prime-auction* {0} {1} {2})".format(ecounter, edependencies, esignature)
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
# ---------- submit bids ----------
for identity in user_config['Auction'] :
asset = assetmap[identity]
user_keys = GetKeysForIdentity(config, identity)
(ecounter, edependencies, esignature) = EscrowAsset(
config, asset_contract, user_keys, asset, auction_contract_pubkey)
message = "'(submit-bid* {0} {1} {2})".format(ecounter, edependencies, esignature)
result = SendMessageAsIdentity(config, auction_contract, user_keys, message)
## =================================================================
# we have to wait for the transactions to commit before we continue
#WaitForStateCommit(lwc, PrivateContractTransaction, asset_contract.ContractID, asset_contract.State.ComputeHash())
#WaitForStateCommit(lwc, PrivateContractTransaction, auction_contract.ContractID, auction_contract.State.ComputeHash())
## =================================================================
# ---------- get the max bid ----------
message = "'(max-bid)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
logger.info("maximum bid: %s", str(result))
# ---------- close the bidding and transfer the assets ----------
message = "'(close-bidding)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
message = "'(exchange-attestation)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, fmt='scheme')
## should be: (((key "offered") (value X) (owner "<ownerid")) ((key "bid") (value X) (owner "<ownerid")) dep sig)
logger.debug("closed bidding with result: %s", str(result))
offered = dict(result.nth(0).value)
maxbid = dict(result.nth(1).value)
dependencies = str(result.nth(2))
signature = str(result.nth(3))
logger.info('exchange ownership of keys %s and %s', offered['key'], maxbid['key'])
message = "'(exchange-ownership \"{0}\" \"{1}\" {2} {3})".format(offered['key'], maxbid['key'], dependencies, signature)
result = SendMessageAsIdentity(config, asset_contract, auction_keys, message)
# ---------- cancel the remaining bids ----------
for identity in user_config['Auction'] :
logger.info("attempt to cancel bid for %s", identity)
user_keys = GetKeysForIdentity(config, identity)
CancelBid(config, auction_contract, asset_contract, user_keys)
# ---------- dump the final state of the contract ----------
result = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-state)", fmt='python', wait=True)
pp.pprint(result)
print("auction contract id = {0}".format(auction_contract.contract_id))
print("asset contract id = {0}".format(asset_contract.contract_id))
sys.exit(0)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## DO NOT MODIFY BELOW THIS LINE
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## -----------------------------------------------------------------
ContractHost = os.environ.get("HOSTNAME", "localhost")
ContractHome = os.environ.get("CONTRACTHOME") or os.path.realpath("/opt/pdo")
ContractEtc = os.environ.get("CONTRACTETC") or os.path.join(ContractHome, "etc")
ContractKeys = os.environ.get("CONTRACTKEYS") or os.path.join(ContractHome, "keys")
ContractLogs = os.environ.get("CONTRACTLOGS") or os.path.join(ContractHome, "logs")
ContractData = os.environ.get("CONTRACTDATA") or os.path.join(ContractHome, "data")
ScriptBase = os.path.splitext(os.path.basename(sys.argv[0]))[0]
config_map = {
'base' : ScriptBase,
'data' : ContractData,
'etc' : ContractEtc,
'home' : ContractHome,
'host' : ContractHost,
'keys' : ContractKeys,
'logs' : ContractLogs
}
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def Main() :
import pdo.common.config as pconfig
import pdo.common.logger as plogger
# parse out the configuration file first
conffiles = [ 'auction-test.toml' ]
confpaths = [ ".", "./etc", ContractEtc ]
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='configuration file', nargs = '+')
parser.add_argument('--config-dir', help='configuration file', nargs = '+')
parser.add_argument('--logfile', help='Name of the log file, __screen__ for standard output', type=str)
parser.add_argument('--loglevel', help='Logging level', type=str)
parser.add_argument('--ledger', help='URL for the Sawtooth ledger', type=str)
parser.add_argument('--asset-contract', help='Name of the asset contract', default="integer-key", type = str)
parser.add_argument('--asset-identity', help='Identity to use for the asset contract', default="ikey-contract", type=str)
parser.add_argument('--auction-contract', help='Name of the auction contract', default="auction", type = str)
parser.add_argument('--auction-identity', help='Identity to use for the auction contract', default="auc-contract", type=str)
parser.add_argument('--key-dir', help='Directories to search for key files', nargs='+')
parser.add_argument('--contract-dir', help='Directories to search for contract files', nargs='+')
options = parser.parse_args()
# first process the options necessary to load the default configuration
if options.config :
conffiles = options.config
if options.config_dir :
confpaths = options.config_dir
global config_map
config_map['assetidentity'] = options.asset_identity
config_map['assetcontract'] = options.asset_contract
config_map['auctionidentity'] = options.auction_identity
config_map['auctioncontract'] = options.auction_contract
try :
config = pconfig.parse_configuration_files(conffiles, confpaths, config_map)
except pconfig.ConfigurationException as e :
logger.error(str(e))
sys.exit(-1)
# set up the logging configuration
if config.get('Logging') is None :
config['Logging'] = {
'LogFile' : '__screen__',
'LogLevel' : 'INFO'
}
if options.logfile :
config['Logging']['LogFile'] = options.logfile
if options.loglevel :
config['Logging']['LogLevel'] = options.loglevel.upper()
plogger.setup_loggers(config.get('Logging', {}))
# set up the ledger configuration
if config.get('Sawtooth') is None :
config['Sawtooth'] = {
'LedgerURL' : 'http://localhost:8008',
}
if options.ledger :
config['Sawtooth']['LedgerURL'] = options.ledger
# set up the key search paths
if config.get('Key') is None :
config['Key'] = {
'SearchPath' : ['.', './keys', ContractKeys]
}
if options.key_dir :
config['Key']['SearchPath'] = options.key_dir
# set up the data paths
if config.get('Contract') is None :
config['Contract'] = {
'SourceSearchPath' : [ '.', './contract', os.path.join(ContractHome, 'contracts') ]
}
if options.contract_dir :
config['Contract']['SourceSearchPath'] = options.contract_dir
# GO!
LocalMain(config)
## -----------------------------------------------------------------
## Entry points
## -----------------------------------------------------------------
Main()
|
[
"byron.marohn@intel.com"
] |
byron.marohn@intel.com
|
af0407d686f5be807f2d3d4b938ec56483a3f89e
|
d6b0bc433b260b5d519d73087d5df46aa516fcdd
|
/biobb_adapters/pycompss/biobb_amber/pmemd/pmemd_mdrun.py
|
e94945a6809b7c30cc12c1d92b7e2ea6151423f4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bioexcel/biobb_adapters
|
b5442fe953b90be4e66faf3460b4a88a40e6d448
|
3daa84ba83a7951add017dd0f05dc361aa99dfe5
|
refs/heads/master
| 2023-08-14T08:46:39.323257
| 2023-08-02T09:05:21
| 2023-08-02T09:05:21
| 157,351,268
| 0
| 2
|
Apache-2.0
| 2023-04-01T14:56:43
| 2018-11-13T09:07:36
|
Common Workflow Language
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.multinode import multinode
from pycompss.api.constraint import constraint
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_amber.pmemd.pmemd_mdrun import PmemdMDRun # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
computing_nodes = str(os.environ.get('TASK_COMPUTING_NODES', "1"))
computing_units = str(os.environ.get('TASK_COMPUTING_UNITS', "1"))
gpu_units = str(os.environ.get('TASK_GPU_UNITS', "0"))
@constraint(processors=[{'processorType':'CPU', 'computingUnits':computing_units}, {'processorType':'GPU', 'computingUnits':gpu_units}])
@multinode(computing_nodes=computing_nodes)
@task(input_top_path=FILE_IN, input_crd_path=FILE_IN, output_log_path=FILE_OUT, output_traj_path=FILE_OUT, output_rst_path=FILE_OUT, input_mdin_path=FILE_IN, input_cpin_path=FILE_IN, input_ref_path=FILE_IN, output_cpout_path=FILE_OUT, output_cprst_path=FILE_OUT, output_mdinfo_path=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _pmemdmdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs):
task_config.config_multinode(properties)
try:
PmemdMDRun(input_top_path=input_top_path, input_crd_path=input_crd_path, output_log_path=output_log_path, output_traj_path=output_traj_path, output_rst_path=output_rst_path, input_mdin_path=input_mdin_path, input_cpin_path=input_cpin_path, input_ref_path=input_ref_path, output_cpout_path=output_cpout_path, output_cprst_path=output_cprst_path, output_mdinfo_path=output_mdinfo_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def pmemd_mdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path=None, input_cpin_path=None, input_ref_path=None, output_cpout_path=None, output_cprst_path=None, output_mdinfo_path=None, properties=None, **kwargs):
if (output_log_path is None or (os.path.exists(output_log_path) and os.stat(output_log_path).st_size > 0)) and \
(output_traj_path is None or (os.path.exists(output_traj_path) and os.stat(output_traj_path).st_size > 0)) and \
(output_rst_path is None or (os.path.exists(output_rst_path) and os.stat(output_rst_path).st_size > 0)) and \
(output_cpout_path is None or (os.path.exists(output_cpout_path) and os.stat(output_cpout_path).st_size > 0)) and \
(output_cprst_path is None or (os.path.exists(output_cprst_path) and os.stat(output_cprst_path).st_size > 0)) and \
(output_mdinfo_path is None or (os.path.exists(output_mdinfo_path) and os.stat(output_mdinfo_path).st_size > 0)) and \
True:
print("WARN: Task PmemdMDRun already executed.")
else:
_pmemdmdrun( input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs)
|
[
"andriopau@gmail.com"
] |
andriopau@gmail.com
|
7ced0a5bfb9b3e5397190462506fd668a94e38af
|
a4185782266d2e596ff264af76776b82f9a3adf8
|
/2015/17_1.py
|
9c1440f546923ffc04173e18138eb3d52c77bae3
|
[] |
no_license
|
PavithraP/advent
|
04f2cfc268e3b8c84ac26dbb9bf300036a7502e3
|
9d9247c3add95263f4db1982d1f96d9f8e8e35ca
|
refs/heads/master
| 2021-01-10T16:02:47.754326
| 2016-12-14T13:50:27
| 2016-12-14T13:50:27
| 47,602,508
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
import math
cont = [11,30,47,31,32,36,3,1,5,3,32,36,15,11,46,26,28,1,19,3]
no = 0
for i in range(int(math.pow(2,20))):
num = i
count = 0
val = 0
while(num > 0):
if num%2 == 1:
val += cont[count]
num = num / 2
count += 1
if val == 150:
no+= 1
print no
|
[
"pavithra.p@sanctumnetworks.com"
] |
pavithra.p@sanctumnetworks.com
|
a3111a79779a9ea0cab3118b5d7b33943dbded16
|
98fe6781483ec7ff2a8016916edb2611d5c2e64c
|
/other/text_analysis_report.py
|
9a852707872523ccce57b5824953e76709b213d4
|
[] |
no_license
|
catris25/review_rating_prediction
|
124262d3baed594d812cb1459c3b95cb6a718312
|
fc296a58e39943d2021263e456dbfdd8b972308a
|
refs/heads/master
| 2021-01-16T17:49:47.367954
| 2018-08-14T05:35:44
| 2018-08-14T05:35:44
| 100,015,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
import numpy as np
import pandas as pd
import re, math
from collections import Counter
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize, word_tokenize
# from nltk.tokenize import RegexpTokenizer, PunktSentenceTokenizer, TweetTokenizer
# REMOVE ALL PUNCTUATIONS AND THEN TOKENIZE THE TEXT
def tokenize_df(df):
df_token = []
for review in df['reviewText']:
temp = review
sent_length = len(sent_tokenize(temp))
temp = re.sub("[^a-zA-Z']", " ", str(review))
temp = temp.replace("'", "")
temp = temp.lower()
word_length = len(word_tokenize(temp))
df_token.append({'reviewText': temp, 'word':word_length, 'sentence':sent_length})
df_token = pd.DataFrame(df_token)
return df_token
input_file='/home/lia/Documents/the_project/dataset/to_use/current/top_30.csv'
# input_file = '/home/lia/Documents/the_project/dataset/to_use/helpfulness/samples/30percent/6.csv'
df = pd.read_csv(input_file)
new_df = tokenize_df(df)
print(new_df.describe())
print(new_df.head(10))
# data = new_df['word']
#
# plt.hist(data, bins=200)
# plt.show()
# def outliers_z_score(ys):
# threshold = 3
#
# mean_y = np.mean(ys)
# stdev_y = np.std(ys)
# z_scores = [(y - mean_y) / stdev_y for y in ys]
# return np.where(np.abs(z_scores) > threshold)
#
# oz = outliers_z_score(data)
# print(oz)
# print('Number of words {}'.format (Counter(new_df['word'])))
# print('Number of sentences {}'.format (Counter(new_df['sentence'])))
# labels, values = zip(*Counter(data).items())
#
# indexes = np.arange(len(labels))
# width = 1
#
# plt.bar(indexes, values, width)
# plt.xticks(indexes + width * 0.5, labels,rotation = "vertical")
# plt.show()
# for w in new_df['word']:
# if w<=10:
# print(w)
too_long = df.loc[new_df['word'] >= 1000, 'reviewText']
too_short = df.loc[new_df['word'] <= 10, 'reviewText']
print('too long:', len(too_long))
print('too short:', len(too_short))
df['word'] = new_df['word']
del_id = too_long.index.append(too_short.index)
temp_df = df.drop(df.index[[del_id]])
print(temp_df.head(10))
#
# temp_df.to_csv('/home/lia/Documents/the_project/dataset/top_10_movies/top_10_clean.csv')
|
[
"blue.star95@outlook.com"
] |
blue.star95@outlook.com
|
d827d71d9c05c7c9a359841ae13e780b7c1620e1
|
0e0bd9d0082bf71918db9f6c92c2cefd32fd23bd
|
/guild/commands/runs_import.py
|
354c23dc47578e9820036cf0779f49107bcd69fb
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
christabella/guildai
|
b911d9758296503c431b571dc4696a3690f44b3d
|
10d34eb9aa02aa4a374c340e75b5d44d9f3d8a25
|
refs/heads/master
| 2022-12-17T18:34:45.766299
| 2020-08-31T12:42:25
| 2020-08-31T12:42:25
| 294,189,964
| 0
| 0
|
Apache-2.0
| 2020-09-09T18:02:13
| 2020-09-09T18:02:12
| null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import runs_support
def _ac_archive(**_kw):
return click_util.completion_dir()
def import_params(fn):
click_util.append_params(
fn,
[
runs_support.runs_arg,
click.Argument(("archive",)),
click.Option(
("-m", "--move"),
help="Move imported runs rather than copy.",
is_flag=True,
),
click.Option(
("--copy-resources",),
help="Copy resources for each imported run.",
is_flag=True,
),
runs_support.all_filters,
click.Option(
("-y", "--yes"), help="Do not prompt before importing.", is_flag=True
),
],
)
assert fn.__click_params__[-1].name == "runs", fn.__click_params__
fn.__click_params__[-1].autocompletion = _ac_archive
return fn
@click.command("import")
@import_params
@click.pass_context
@click_util.use_args
@click_util.render_doc
def import_runs(ctx, args):
"""Import one or more runs from `ARCHIVE`.
`ARCHIVE` must be a directory that contains exported runs. Archive
directories can be created using ``guild export``.
You may use ``guild runs list --archive ARCHIVE`` to view runs in
`ARCHIVE`.
By default, resources are NOT copied with each imported run, but
their links are maintained. To copy resources, use
`--copy-resources`.
**WARNING**: Use `--copy-resources` with care as each imported run
will contain a separate copy of each resource!
{{ runs_support.runs_arg }}
If a `RUN` argument is not specified, ``:`` is assumed (all runs
are selected).
{{ runs_support.all_filters }}
"""
from . import runs_impl
runs_impl.import_(args, ctx)
|
[
"g@rre.tt"
] |
g@rre.tt
|
5e92281f35cff75f5d8fd68958f6faad390bb658
|
1711a28e01e40c0164be23536ff109c428f3dd8c
|
/SUMO_compound_mdtraj_analysis.py
|
6d5a65145a08e70043aae6c8b2f867f060261593
|
[] |
no_license
|
sunhuaiyu/mdtraj
|
adafd4b4408b688f23fed659e8fbaefd4ff1bd42
|
d626841025e9f9411e988cee6631edcbf171499d
|
refs/heads/master
| 2020-05-07T20:28:33.381621
| 2019-05-02T00:00:02
| 2019-05-02T00:00:02
| 180,862,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,277
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import mdtraj as md
from glob import glob
from multiprocessing import Pool
def SUMO_ligand_dist(tr):
#coordinates for the Cgamma of SUMO1_F36, SUMO2_F31, or SUMO3_F31:
select_str = '(resname==PHE and (resid==15 or resid==30 or resid==17)) and (name==CG)'
atom_ix = tr.topology.select(select_str)[0]
a = tr.xyz[:, atom_ix]
# ligand all atom coordinatess:
lig = tr.atom_slice(tr.topology.select('chainid==1'))
# ligand center of mass:
b = md.compute_center_of_mass(lig)
# distance between K37/K32_CA and ligand center of mass:
return (((a - b) ** 2).sum(1)) ** 0.5
# read trajectory file in HDF5 format (*.h5), compute SUMO_ligand_dist
def name2traj(file_name):
tr = md.load(file_name)
if tr.n_frames > 10000:
tr = tr[::10]
return tr
# given trajectory file name in HDF5 format, plot SUMO_ligand_dist
def plot_dist(traj_name):
plt.plot(SUMO_ligand_dist(name2traj(traj_name)), linewidth=1)
plt.ylim(0, 4.5)
title = traj_name.split('.')[0]
plt.title(title)
plt.savefig(title + '.jpg', dpi=600)
plt.close()
# calculate fraction of frames where the distance is less than a cut-off
compound = ['PHG00686', 'SEW05414', 'HTS12268', 'BTB13496']
compound2traj_name = {i: glob('SUMO1_2uyz_{}_F*_5000ns.h5'.format(i)) for i in compound}
traj_files = sum(list(compound2traj_name.values()))
# traj_dict contains all loaded trajectories
# dist_dict contains all calculated distances;
# accelerate calculation with multiprocessing
def D(file_name):
tr = name2traj(file_name)
d = SUMO_ligand_dist(tr)
return [tr, d]
DD = Pool(48).map(D, traj_files)
traj_dict = {i[0]:i[1][0] for i in zip(traj_files, DD)}
dist_dict = {i[0]:i[1][1] for i in zip(traj_files, DD)}
# distance (nm) threshold
T = 0.7
# calculate the fraction of trajectories with compound at SIM-binding site
for cp in compound:
all_dist = np.array([dist_dict[i] for i in compound2traj_name[cp]]).ravel()
bound_frames, total_frames = sum(all_dist < T), len(all_dist)
fraction = bound_frames/total_frames
print(cp, round(fraction, 3), total_frames//1000)
# plotting: stack all distance plot together for each compound
for cp in compound:
n = len(compound2traj_name[cp])
fig, axs = plt.subplots(nrows=n, ncols=1, sharex=True)
fig.set_figheight(n)
fig.set_figwidth(4)
axs[0].set_title(cp)
for i in np.arange(n):
dc = dist_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)]
bound = dc < T
unbound = np.invert(bound)
length = dc.shape[0]
axs[i].plot(np.arange(length)[unbound], dc[unbound],
'C1.', markersize=0.5, alpha=0.6)
axs[i].plot(np.arange(length)[bound], dc[bound],
'C0.', markersize=0.5, alpha=0.6)
axs[i].set_ylim(0, 4.5)
fig.subplots_adjust(hspace=0)
fig.savefig('SUMO1_2uyz_{}_dist_all_traj.jpg'.format(cp),
dpi=600, bbox_inches='tight')
# extract a centroid frame from each traj ending with significant binding;
# for each compound, superpose all centroids along the SIM-binding pocket
# and save as one pdb file
centroids = {cp:[] for cp in compound}
for cp in compound:
n = len(compound2traj_name[cp])
for i in np.arange(n):
file_name = 'SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)
dc = dist_dict[file_name]
bound = dc < T
if sum(bound) > 1000:
tr = traj_dict[file_name][bound]
protein_atoms = tr.topology.select('residue 32 to 56')
compound_atoms = tr.topology.select('chainid==1')
atoms_ix = np.concatenate((protein_atoms, compound_atoms))
tr.superpose(tr, frame=0, atom_indices=atoms_ix)
m = np.empty((tr.n_frames, tr.n_frames)) # rmsd matrix
for i in range(tr.n_frames):
m[i] = md.rmsd(tr, tr, i, atom_indices=atoms_ix)
#compute the centroid frame: the one closest to mean frame
centroid_ix = np.exp(-m/m.std()).sum(1).argmax()
centroids[cp].append(tr[centroid_ix])
print(file_name)
centroids_tr = md.join(centroids[cp])
centroids_tr.superpose(centroids_tr, frame=0, atom_indices=protein_atoms)
centroids_tr.save_pdb('SUMO1_2uyz_{}_bound_centroids.pdb'.format(cp))
# compute RMSD among bound_centroids
from scipy.spatial.distance import squareform
for cp in compound:
tr = md.load('SUMO1_2uyz_{}_bound_centroids.pdb'.format(cp))
m = array([md.rmsd(tr, tr, i, atom_indices=protein_atoms) for i in range(len(tr))])
m = squareform(m, checks=False)
print(cp, min(m), max(m))
# compute atomic distances
T = 0.7
tr2uyz = md.join([md.load('SUMO1_2uyz_{}_400ns.h5'.format(i+1)) for i in range(12)])
cp = 'PHG00686'
d = [dist_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)] for i in range(12)]
tr1cp = md.join([traj_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)][d[i] < T] for i in range(12)])
def atom_pair_dist3(cp, pair='F36CG_R54CZ'):
top = tr2uyz[0].topology
s = pair.split('_')
pair_ix = top.select_pairs('residue=={0} and name=={1}'.format(s[0][1:3], s[0][3:]),
'residue=={0} and name=={1}'.format(s[1][1:3], s[1][3:]))
dist2uyz = md.compute_distances(tr2uyz, atom_pairs=pair_ix, periodic=False)
dist1cp = md.compute_distances(tr1cp, atom_pairs=pair_ix, periodic=False)
fig = plt.figure(figsize=(10, 4.8))
gs = GridSpec(1, 2, width_ratios=[2, 1])
ax0, ax1 = plt.subplot(gs[0]), plt.subplot(gs[1])
ax0.plot(dist2uyz, 'C1.', markersize=1)
ax0.plot(dist1cp, 'C0.', markersize=1, alpha=0.5)
ax0.tick_params(labelsize=15)
ax1.hist(dist2uyz, color='C1', bins=100, linewidth=1,
orientation='horizontal')
ax1.hist(dist1cp, color='C0', alpha=0.6, bins=100, linewidth=1,
orientation='horizontal')
ax1.tick_params(labelsize=15)
ax1.legend(['no compound', 'with {}'.format(cp)], fontsize=15, frameon=0)
fig.tight_layout()
fig.savefig('SUMO1_2uyz_{0}_dist_{1}.jpg'.format(cp, pair), dpi=600)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ce7c48f9f8686e922f04be56fd4bf8ab959eb8de
|
d9d516490b35d4589787dd1c2f02e1cb39967ae4
|
/021 Jogo da adivinhação.py
|
f27f947c56eeb6ea3fe7e4a0cacdc82c2896aca5
|
[] |
no_license
|
Emerson53na/exercicios-python-3
|
e3ec9e88e9d413ee9dee432a2c120447a22a3f3d
|
8f0349a94aca822722c02084c6e3d13cd8c27051
|
refs/heads/master
| 2021-05-19T09:31:31.686547
| 2020-04-22T23:54:41
| 2020-04-22T23:54:41
| 251,631,178
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
from random import choice
print('=-'*20,'\nVou pensar em um número de 0 a 5.Tente adivinhar...')
print('=-'*20)
num = int(input('Em que número eu pensei? '))
lista = [0,1,2,3,4,5]
cpu = choice(lista)
if cpu == num:
print('O número escolhido foi: {}\n\033[32mParabens, você ganhou!\033[m'.format(cpu))
else:
print('O número escolhido foi: {}\n\033[31mVocê errou!\033[m'.format(cpu))
|
[
"noreply@github.com"
] |
noreply@github.com
|
171783a41f6cc03ffad67745ac99b75219895fad
|
c37de1b37ea7f6e5d0e4b6715be6f6da342cba9a
|
/examples/vasp/wallet.py
|
794836a62040bbfc7b35e797ac4dca07f265240e
|
[
"Apache-2.0"
] |
permissive
|
fil-blue/client-sdk-python
|
6389d6b40c1af1587b23ecef96a4db5af66e34dd
|
2105e7362a35e69298de0896e17331006374de57
|
refs/heads/master
| 2023-02-15T02:54:40.512655
| 2021-01-05T23:42:40
| 2021-01-05T23:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,300
|
py
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass, field
from http import server
from diem import (
identifier,
jsonrpc,
diem_types,
stdlib,
testnet,
utils,
LocalAccount,
offchain,
)
import logging, threading, typing
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class User:
name: str
subaddresses: typing.List[str] = field(default_factory=lambda: [])
def kyc_data(self) -> offchain.KycDataObject:
return offchain.individual_kyc_data(
given_name=self.name,
surname=f"surname-{self.name}",
address=offchain.AddressObject(city="San Francisco"),
)
def additional_kyc_data(self) -> str:
return f"{self.name}'s secret"
class ActionResult(str):
def merge(self, ret: str) -> "ActionResult":
if ret == ActionResult.SEND_REQUEST_SUCCESS:
return self
return self + ", " + ret
# the following ActionResult is created for testing purpose to indicate specific task is executed
ActionResult.PASS = ActionResult("pass")
ActionResult.REJECT = ActionResult("reject")
ActionResult.SOFT_MATCH = ActionResult("soft_match")
ActionResult.SENT_ADDITIONAL_KYC_DATA = ActionResult("sent_additional_kyc_data")
ActionResult.TXN_EXECUTED = ActionResult("transaction_executed")
ActionResult.SEND_REQUEST_SUCCESS = ActionResult("send_request_success")
BgResult = typing.Union[ActionResult, typing.Tuple[offchain.Action, ActionResult]]
@dataclass
class WalletApp:
"""WalletApp is an example of custodial wallet application"""
@staticmethod
def generate(name: str, client: jsonrpc.Client) -> "WalletApp":
"""generate a WalletApp running on testnet"""
offchain_service_port = offchain.http_server.get_available_port()
account = testnet.gen_vasp_account(client, f"http://localhost:{offchain_service_port}")
w = WalletApp(
name=name,
jsonrpc_client=client,
parent_vasp=account,
offchain_service_port=offchain_service_port,
)
w.add_child_vasp()
return w
name: str
jsonrpc_client: jsonrpc.Client
parent_vasp: LocalAccount
offchain_service_port: int
hrp: str = field(default=identifier.TDM)
saved_commands: typing.Dict[str, offchain.Command] = field(default_factory=lambda: {})
child_vasps: typing.List[LocalAccount] = field(default_factory=lambda: [])
users: typing.Dict[str, User] = field(default_factory=lambda: {})
evaluate_kyc_data_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {})
manual_review_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {})
task_queue: typing.List[typing.Callable[["WalletApp"], BgResult]] = field(default_factory=lambda: [])
locks: typing.Dict[str, threading.Lock] = field(default_factory=lambda: {})
def __post_init__(self) -> None:
self.compliance_key = self.parent_vasp.compliance_key
self.offchain_client = offchain.Client(self.parent_vasp.account_address, self.jsonrpc_client, self.hrp)
# --------------------- end user interaction --------------------------
def pay(
self,
user_name: str,
intent_id: str,
desc: typing.Optional[str] = None,
original_payment_reference_id: typing.Optional[str] = None,
) -> typing.Tuple[(str, ActionResult)]:
"""make payment from given user account to intent_id"""
intent = identifier.decode_intent(intent_id, self.hrp)
command = offchain.PaymentCommand.init(
self.gen_user_account_id(user_name),
self.users[user_name].kyc_data(),
intent.account_id,
intent.amount,
intent.currency_code,
original_payment_reference_id=original_payment_reference_id,
description=desc,
)
self.save_command(command)
return command.reference_id()
def gen_intent_id(
self,
user_name: str,
amount: int,
currency: typing.Optional[str] = testnet.TEST_CURRENCY_CODE,
) -> str:
account_id = self.gen_user_account_id(user_name)
return identifier.encode_intent(account_id, currency, amount)
# --------------------- offchain integration --------------------------
def process_inbound_request(
self, x_request_id: str, request_sender_address: str, request_bytes: bytes
) -> typing.Tuple[int, bytes]:
inbound_command = None
try:
inbound_command = self.offchain_client.process_inbound_request(request_sender_address, request_bytes)
self.save_command(inbound_command)
resp = offchain.reply_request(inbound_command.cid)
code = 200
except offchain.Error as e:
logger.exception(e)
resp = offchain.reply_request(inbound_command.cid if inbound_command else None, e.obj)
code = 400
return (code, offchain.jws.serialize(resp, self.compliance_key.sign))
def run_once_background_job(
self,
) -> BgResult:
if len(self.task_queue) == 0:
return None
task = self.task_queue[0]
ret = task(self)
self.task_queue.remove(task)
return ret
# --------------------- admin --------------------------
def start_server(self) -> server.HTTPServer:
return offchain.http_server.start_local(self.offchain_service_port, self.process_inbound_request)
def add_child_vasp(self) -> jsonrpc.Transaction:
self.child_vasps.append(testnet.gen_child_vasp(self.jsonrpc_client, self.parent_vasp))
def add_user(self, name) -> None:
self.users[name] = User(name)
def vasp_balance(self, currency: str = testnet.TEST_CURRENCY_CODE) -> int:
balance = 0
for vasp in [self.parent_vasp] + self.child_vasps:
balance += utils.balance(self.jsonrpc_client.get_account(vasp.account_address), currency)
return balance
def clear_data(self) -> None:
self.evaluate_kyc_data_result = {}
self.manual_review_result = {}
self.users = {}
self.saved_commands = {}
self.task_queue = []
self.locks = {}
# -------- offchain business actions ---------------
def _send_additional_kyc_data(
self, command: offchain.Command
) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
account_id = command.my_actor_obj().address
_, subaddress = identifier.decode_account(account_id, self.hrp)
user = self._find_user_by_subaddress(subaddress)
new_cmd = command.new_command(additional_kyc_data=user.additional_kyc_data())
return (ActionResult.SENT_ADDITIONAL_KYC_DATA, new_cmd)
def _submit_travel_rule_txn(
self,
command: offchain.Command,
) -> ActionResult:
command = typing.cast(offchain.PaymentCommand, command)
child_vasp = self._find_child_vasp(command.sender_account_address(self.hrp))
testnet.exec_txn(
self.jsonrpc_client,
child_vasp,
stdlib.encode_peer_to_peer_with_metadata_script(
currency=utils.currency_code(command.payment.action.currency),
payee=command.receiver_account_address(self.hrp),
amount=command.payment.action.amount,
metadata=command.travel_rule_metadata(self.hrp),
metadata_signature=bytes.fromhex(command.payment.recipient_signature),
),
)
return ActionResult.TXN_EXECUTED
def _evaluate_kyc_data(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
op_kyc_data = command.opponent_actor_obj().kyc_data
ret = self.evaluate_kyc_data_result.get(op_kyc_data.given_name, ActionResult.PASS)
if ret == ActionResult.SOFT_MATCH:
return (ret, command.new_command(status=offchain.Status.soft_match))
return (ret, self._kyc_data_result("evaluate key data", ret, command))
def _manual_review(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]:
command = typing.cast(offchain.PaymentCommand, command)
op_kyc_data = command.opponent_actor_obj().kyc_data
ret = self.manual_review_result.get(op_kyc_data.given_name, ActionResult.PASS)
return (ret, self._kyc_data_result("review", ret, command))
def _kyc_data_result(
self, action: str, ret: ActionResult, command: offchain.PaymentCommand
) -> offchain.PaymentCommand:
if ret == ActionResult.PASS:
if command.is_receiver():
return self._send_kyc_data_and_receipient_signature(command)
return command.new_command(status=offchain.Status.ready_for_settlement)
return command.new_command(
status=offchain.Status.abort,
abort_code=offchain.AbortCode.reject_kyc_data,
abort_message=f"{action}: {ret}",
)
def _send_kyc_data_and_receipient_signature(
self,
command: offchain.PaymentCommand,
) -> offchain.PaymentCommand:
sig_msg = command.travel_rule_metadata_signature_message(self.hrp)
subaddress = command.receiver_subaddress(self.hrp)
user = self._find_user_by_subaddress(subaddress)
return command.new_command(
recipient_signature=self.compliance_key.sign(sig_msg).hex(),
kyc_data=user.kyc_data(),
status=offchain.Status.ready_for_settlement,
)
# ---------------------- offchain Command ---------------------------
def _send_request(self, command: offchain.PaymentCommand) -> ActionResult:
self.offchain_client.send_command(command, self.compliance_key.sign)
self._enqueue_follow_up_action(command)
return ActionResult.SEND_REQUEST_SUCCESS
def _enqueue_follow_up_action(self, command: offchain.PaymentCommand) -> None:
if command.follow_up_action():
self.task_queue.append(lambda app: app._offchain_business_action(command.reference_id()))
def _offchain_business_action(self, ref_id: str) -> BgResult:
command = self.saved_commands.get(ref_id)
action = command.follow_up_action()
if action == offchain.Action.SUBMIT_TXN:
return (action, self._submit_travel_rule_txn(command))
actions = {
offchain.Action.EVALUATE_KYC_DATA: self._evaluate_kyc_data,
offchain.Action.CLEAR_SOFT_MATCH: self._send_additional_kyc_data,
offchain.Action.REVIEW_KYC_DATA: self._manual_review,
}
ret, new_command = actions[action](command)
self.save_command(new_command)
# return action and action result for test
return (action, ret)
# ---------------------- commands ---------------------------
def save_command(self, command: offchain.Command) -> None:
"""save command locks prior command by reference id, validate and save new command.
in a production implementation, the lock should be database / distributed lock to ensure
atomic process(read and write) command by the reference id.
"""
lock = self.lock(command.reference_id())
if not lock.acquire(blocking=False):
msg = f"command(reference_id={command.reference_id()}) is locked"
raise offchain.command_error(offchain.ErrorCode.conflict, msg)
try:
prior = self.saved_commands.get(command.reference_id())
if command == prior:
return
command.validate(prior)
self.saved_commands[command.reference_id()] = command
if command.is_inbound():
self._enqueue_follow_up_action(command)
else: # outbound
self.task_queue.append(lambda app: app._send_request(command))
finally:
lock.release()
def lock(self, ref_id: str) -> threading.Lock:
return self.locks.setdefault(ref_id, threading.Lock())
# ---------------------- users ---------------------------
def _find_user_by_subaddress(self, subaddress: bytes) -> User:
for u in self.users.values():
if subaddress in u.subaddresses:
return u
raise ValueError(f"could not find user by subaddress: {subaddress.hex()}, {self.name}")
def gen_user_account_id(self, user_name: str) -> str:
subaddress = identifier.gen_subaddress()
self.users[user_name].subaddresses.append(subaddress)
return identifier.encode_account(self._available_child_vasp().account_address, subaddress, self.hrp)
# ---------------------- child vasps ---------------------------
def _available_child_vasp(self) -> LocalAccount:
return self.child_vasps[0]
def _find_child_vasp(self, address: diem_types.AccountAddress) -> LocalAccount:
for vasp in self.child_vasps:
if vasp.account_address == address:
return vasp
raise ValueError(f"could not find child vasp by address: {address.to_hex()}")
|
[
"ilx@fb.com"
] |
ilx@fb.com
|
c17cbfb454897e208edc74fb6406665a5bd37389
|
1debb684db5f2434de3793751afc45edcb2d584f
|
/apps/gtask/templatetags/datetime_tags.py
|
701d99da0ef2467c96ac5c4250f7b89bba8ee4e1
|
[] |
no_license
|
rosscdh/SuperDMon
|
2524aaa1429ce82558723ad5ea8833698380fb85
|
d0e6dd2f9d2237320b19b53b9be37c888f8c40ff
|
refs/heads/master
| 2016-09-05T13:33:55.294196
| 2012-02-07T14:52:34
| 2012-02-07T14:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from datetime import datetime
from django import template
register = template.Library()
@register.filter("timestamp")
def timestamp(value):
try:
return datetime.fromtimestamp(value)
except AttributeError:
return datetime.now()
|
[
"ross.crawford@sedo.com"
] |
ross.crawford@sedo.com
|
993148bc8da60f6cde60e4ddcf631c383dadd161
|
2a42392cf93deaccb39b357411c0b49abec0a132
|
/classcode/anim_and_sound/anim.py
|
840cb919d1038dfaea799ab71a28e4ca7a054444
|
[] |
no_license
|
AKilgore/CS112-Spring2012
|
89aa573b19f1c92055e4832d87c6e5fa0588bccf
|
9fe50b80d71b4dee92101b993c1f58265eb40ee2
|
refs/heads/master
| 2020-12-24T19:27:58.448474
| 2012-04-30T07:23:40
| 2012-04-30T07:23:40
| 3,266,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
#!/usr/bin/env/ python
import pygame
class AnimationFrames(object):
def __init__(self, frames, loops=-1):
self._times = []
self._data = []
total = 0
for t, data in frames:
total += t
self._times.append(total)
self._data.append(data)
self.end = total
self.loops = loops
def get(self, time):
if self.loops == -1 or time is < self.loops * self.end:
time %= self.end
if time > self.end:
return self._data[-1]
idx = 0
while self._times[idx] < t:
idx += 1
return self._data[idx]
class Animation(object):
def __init__(self, spritesheet, frames):
if not isinstance(frames, AnimationFrames):
frames = AnimationFrames(frames)
self.spritesheet = spritesheet
self.frames = frames
self.time = 0
self.update(0)
def get_frame_data(self, t):
return self.frame.get(t)
def update(self, dt):
self.time += dt
self.x, self.y = self.get_frame_data(self.time)
def get_current_frame(self):
return self.spritesheet.get(self.x, self.y)
|
[
"mak11@hampshire.edu"
] |
mak11@hampshire.edu
|
0a231f8117213d6f61ad97b649f38245442e0a0c
|
afd3464dd2c290b7db5fe379d4374183ea6bd0c3
|
/catkin_ws/build/pick_objects/catkin_generated/pkg.develspace.context.pc.py
|
fd44bd91cf37e798cac9f2a7cf2459aba475bc25
|
[
"MIT"
] |
permissive
|
thatting/thomas-hatting-home-service-robot
|
7d0750367e5b5bfa48ab697a8fd7796b1338a662
|
481490eec2d61303e694593f8f018858c82eaac3
|
refs/heads/master
| 2020-03-22T18:27:40.537755
| 2019-02-14T13:13:14
| 2019-02-14T13:13:14
| 140,461,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pick_objects"
PROJECT_SPACE_DIR = "/home/nvidia/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"thomashatting@gmail.com"
] |
thomashatting@gmail.com
|
58d23eb63af6add22016b753d43de7f6521fbfb1
|
279e26d880c2470d0b60fe55b52f36024ecb28b5
|
/address.py
|
f65092bd69fcdb218f7a868194846dc937236b2d
|
[] |
no_license
|
khang-le/unit5-05
|
0167d40d8070d5889c948a90f13d06ea53581690
|
c9b4afb6f1361dca227d915c7630ff7e5fe3b1cf
|
refs/heads/master
| 2020-09-22T03:51:35.589393
| 2019-11-30T16:27:46
| 2019-11-30T16:27:46
| 225,039,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
#!/usr/bin/env python3
# Created by : Khang Le
# Created on : September 2019
# This program prints out your name, using default function parameters
def full_address(first_name, last_name, street_address, city, province,
postal_code, apt_number=None):
# return full address format
full_address = first_name
if apt_number is not None:
full_address = ("\n" + full_address + " " + last_name + "" +
street_address + "" + city + " " +
province + " " + postal_code + " " + apt_number)
elif apt_number is None:
full_address = ("\n" + full_address + " " + last_name + "" +
street_address + "" + city + " " +
province + " " + postal_code)
return full_address.upper()
def main():
# get user informations
apt_number = None
first_name = input("Enter your first name: ")
last_name = input("Enter your last name: ") + "\n"
street_address = input("Enter your address: ") + "\n"
question = input("Do you have an ap.number? (y/n): ")
if question.upper() == "Y" or question.upper() == "YES":
apt_number = input("Enter your apt.number here: ") + "\n"
city = input("Enter your current city: ")
province = input("Enter your current province: ") + " "
postal_code = input("Enter your postal code: ")
if apt_number is not None:
address = full_address(first_name, last_name, street_address,
city, province, postal_code, apt_number)
else:
address = full_address(first_name, last_name, street_address,
city, province, postal_code)
print(("Your shipping informations: {}").format(address))
if __name__ == "__main__":
main()
|
[
"nguyen.khang.le@mths.ca"
] |
nguyen.khang.le@mths.ca
|
b8c045ccf9fbfd0be6b2357b5c866a6f5f8c45fb
|
1426511b59ad3e00a3e037ba3377e41828ae4680
|
/ca_unemp/serializers.py
|
eab56c3ab66c86dedab623a052c2279bdcf95514
|
[] |
no_license
|
hillarykhan/ca-unemp-api
|
4776ed104a026c2d39c44dbbfca60d27f57c50a4
|
7b27c4aebdfe72bb0282fc28abb60ede9e6f0813
|
refs/heads/main
| 2023-08-24T00:58:15.603062
| 2021-10-27T04:41:13
| 2021-10-27T04:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from rest_framework import serializers
from .models import Unemployment
class StatSerializer(serializers.ModelSerializer):
class Meta:
model = Unemployment
fields = '__all__'
|
[
"khan.hillary@gmail.com"
] |
khan.hillary@gmail.com
|
2cc9faf3e8e17c9e733a3ce6a37951dfcd9caabb
|
5602c3572852f8574dff7173fd19c32c48520b28
|
/rigify/rigs/basic/raw_copy.py
|
2ebbe13382bfcfe90dd4692ae3038b58086e1ad6
|
[] |
no_license
|
Dancingbubble/blender-addons
|
58be022f1d8f712ca83acdbd765336e74074a14d
|
a6ee5b0e6f6a945c33b6159fd0536d548b23ccb6
|
refs/heads/master
| 2023-02-19T22:19:53.125675
| 2021-01-01T20:54:21
| 2021-01-01T20:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,521
|
py
|
#====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ...utils.naming import strip_org, strip_prefix, choose_derived_bone, is_control_bone
from ...utils.mechanism import copy_custom_properties_with_ui
from ...utils.widgets import layout_widget_dropdown, create_registered_widget
from ...base_rig import BaseRig
from ...base_generate import SubstitutionRig
from itertools import repeat
'''
Due to T80764, bone name handling for 'limbs.raw_copy' was hard-coded in generate.py
class Rig(SubstitutionRig):
""" A raw copy rig, preserving the metarig bone as is, without the ORG prefix. """
def substitute(self):
# Strip the ORG prefix during the rig instantiation phase
new_name = strip_org(self.base_bone)
new_name = self.generator.rename_org_bone(self.base_bone, new_name)
return [ self.instantiate_rig(InstanceRig, new_name) ]
'''
class RelinkConstraintsMixin:
""" Utilities for constraint relinking. """
def relink_bone_constraints(self, bone_name):
if self.params.relink_constraints:
for con in self.get_bone(bone_name).constraints:
self.relink_single_constraint(con)
relink_unmarked_constraints = False
def relink_single_constraint(self, con):
if self.params.relink_constraints:
parts = con.name.split('@')
if len(parts) > 1:
self.relink_constraint(con, parts[1:])
elif self.relink_unmarked_constraints:
self.relink_constraint(con, [''])
def relink_move_constraints(self, from_bone, to_bone, *, prefix=''):
if self.params.relink_constraints:
src = self.get_bone(from_bone).constraints
dest = self.get_bone(to_bone).constraints
for con in list(src):
if con.name.startswith(prefix):
dest.copy(con)
src.remove(con)
def relink_bone_parent(self, bone_name):
if self.params.relink_constraints:
self.generator.disable_auto_parent(bone_name)
parent_spec = self.params.parent_bone
if parent_spec:
old_parent = self.get_bone_parent(bone_name)
new_parent = self.find_relink_target(parent_spec, old_parent or '') or None
self.set_bone_parent(bone_name, new_parent)
return new_parent
def relink_constraint(self, con, specs):
if con.type == 'ARMATURE':
if len(specs) == 1:
specs = repeat(specs[0])
elif len(specs) != len(con.targets):
self.raise_error("Constraint {} actually has {} targets", con.name, len(con.targets))
for tgt, spec in zip(con.targets, specs):
if tgt.target == self.obj:
tgt.subtarget = self.find_relink_target(spec, tgt.subtarget)
elif hasattr(con, 'subtarget'):
if len(specs) > 1:
self.raise_error("Only the Armature constraint can have multiple '@' targets: {}", con.name)
if con.target == self.obj:
con.subtarget = self.find_relink_target(specs[0], con.subtarget)
def find_relink_target(self, spec, old_target):
if spec == '':
return old_target
elif spec in {'CTRL', 'DEF', 'MCH'}:
result = choose_derived_bone(self.generator, old_target, spec.lower())
if not result:
result = choose_derived_bone(self.generator, old_target, spec.lower(), by_owner=False)
if not result:
self.raise_error("Cannot find derived {} bone of bone '{}' for relinking", spec, old_target)
return result
else:
if spec not in self.obj.pose.bones:
self.raise_error("Cannot find bone '{}' for relinking", spec)
return spec
@classmethod
def add_relink_constraints_params(self, params):
params.relink_constraints = bpy.props.BoolProperty(
name = "Relink Constraints",
default = False,
description = "For constraints with names formed like 'base@bonename', use the part after '@' as the new subtarget after all bones are created. Use '@CTRL', '@DEF' or '@MCH' to simply replace the prefix"
)
params.parent_bone = bpy.props.StringProperty(
name = "Parent",
default = "",
description = "Replace the parent with a different bone after all bones are created. Using simply CTRL, DEF or MCH will replace the prefix instead"
)
@classmethod
def add_relink_constraints_ui(self, layout, params):
r = layout.row()
r.prop(params, "relink_constraints")
if params.relink_constraints:
r = layout.row()
r.prop(params, "parent_bone")
layout.label(text="Constraint names have special meanings.", icon='ERROR')
class Rig(BaseRig, RelinkConstraintsMixin):
def find_org_bones(self, pose_bone):
return pose_bone.name
def initialize(self):
self.relink = self.params.relink_constraints
def parent_bones(self):
self.relink_bone_parent(self.bones.org)
def configure_bones(self):
org = self.bones.org
if is_control_bone(org):
copy_custom_properties_with_ui(self, org, org, ui_controls=[org])
def rig_bones(self):
self.relink_bone_constraints(self.bones.org)
def generate_widgets(self):
org = self.bones.org
widget = self.params.optional_widget_type
if widget and is_control_bone(org):
create_registered_widget(self.obj, org, widget)
@classmethod
def add_parameters(self, params):
self.add_relink_constraints_params(params)
params.optional_widget_type = bpy.props.StringProperty(
name = "Widget Type",
default = '',
description = "Choose the type of the widget to create"
)
@classmethod
def parameters_ui(self, layout, params):
col = layout.column()
col.label(text='This rig type does not add the ORG prefix.')
col.label(text='Manually add ORG, MCH or DEF as needed.')
self.add_relink_constraints_ui(layout, params)
pbone = bpy.context.active_pose_bone
if pbone and is_control_bone(pbone.name):
layout_widget_dropdown(layout, params, "optional_widget_type")
#add_parameters = InstanceRig.add_parameters
#parameters_ui = InstanceRig.parameters_ui
def create_sample(obj):
""" Create a sample metarig for this rig type.
"""
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('DEF-bone')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 0.2000
bone.roll = 0.0000
bone.use_connect = False
bones['DEF-bone'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['DEF-bone']]
pbone.rigify_type = 'basic.raw_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
return bones
|
[
"angavrilov@gmail.com"
] |
angavrilov@gmail.com
|
5c6efe87ee9b93f8027bf4a15335244acf89f525
|
ae2f3356ab79b77090f8eb927f692c23ee070278
|
/SMA_SES_DES.py
|
6ae165e143cee8c29eb017cdeffa048c74e8509c
|
[
"MIT"
] |
permissive
|
ImPHX13/Demand-Forecasting
|
5cfdbfdd712dc23834f702e347b39bcdf23d1d3d
|
078e58fed6fdd59e8fbae69e8f54d01e784d4be7
|
refs/heads/master
| 2022-11-25T22:07:23.255490
| 2022-11-18T04:26:40
| 2022-11-18T04:26:40
| 265,248,188
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,881
|
py
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#Import dataset
df = pd.read_csv('data.csv',parse_dates=True, dayfirst=True)
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%y')
print(df.dtypes)
df.head()
df=df.set_index('Date')
df.index
#Create a timeseseries
ts=df['Quantity']
ts.head()
#Rolling mean and standard deviation calculation to check for stationarity
rolling_mean = ts.rolling(window = 5).mean()
rolling_std = ts.rolling(window = 5).std()
plt.plot(ts, color = 'blue', label = 'Original')
plt.plot(rolling_mean, color = 'red', label = 'Rolling Mean')
plt.plot(rolling_std, color = 'black', label = 'Rolling Std')
plt.legend(loc = 'best')
plt.title('Rolling Mean & Rolling Standard Deviation')
plt.show()
#ADF test for checking stationarity of timeseries
result = adfuller(ts)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
#Timeseries log transformation
ts_log = np.log(ts)
plt.plot(ts_log)
result = adfuller(ts_log)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
#Function for ADF test
def get_stationarity(timeseries):
rolling_mean = timeseries.rolling(window=5).mean()
rolling_std = timeseries.rolling(window=5).std()
original = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')
std = plt.plot(rolling_std, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
result = adfuller(timeseries)
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
rolling_mean = ts_log.rolling(window=5).mean()
ts_log_minus_mean = ts_log - rolling_mean
ts_log_minus_mean.dropna(inplace=True)
get_stationarity(ts_log_minus_mean)
#Exponential Decay
rolling_mean_exp_decay = ts_log.ewm(halflife=5, min_periods=0, adjust=True).mean()
ts_log_exp_decay = ts_log - rolling_mean_exp_decay
ts_log_exp_decay.dropna(inplace=True)
get_stationarity(ts_log_exp_decay)
#Timeseries log shifted to make it stationary
ts_log_shift = ts_log - ts_log.shift()
ts_log_shift.dropna(inplace=True)
get_stationarity(ts_log_shift)
#Timeseries log differenced to make it stationary
ts_log_diff = ts_log - ts_log.shift()
plt.plot(ts_log_diff)
ts_log_diff.dropna(inplace=True)
get_stationarity(ts_log_diff)
#Seasonal Decomposition to check for seasonality and trends
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(ts_log,freq=7)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
plt.subplot(411)
plt.plot(ts_log, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
ts_log_decompose = residual
ts_log_decompose.dropna(inplace=True)
get_stationarity(ts_log_decompose)
#ACF and PACF plots to find p and q values
from statsmodels.tsa.stattools import acf, pacf
lag_acf = acf(ts_log_diff, nlags=20)
lag_pacf = pacf(ts_log_diff, nlags=20, method='ols')
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Autocorrelation Function')
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Partial Autocorrelation Function')
plt.tight_layout()
#Gridsearch for ideal p,q parameters based on lowest AIC value
import statsmodels.api as sm
resDiff = sm.tsa.arma_order_select_ic(ts_log, max_ar=7, max_ma=7, ic='aic', trend='c')
print('ARMA(p,q) =',resDiff['aic_min_order'],'is the best.')
#Fitting ARIMA model from the obtained (p,d,q) values
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(ts_log, order=(1, 1, 0))
results_ARIMA = model.fit(disp=-1)
plt.plot(ts_log_diff)
plt.plot(results_ARIMA.fittedvalues, color='red')
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
print(predictions_ARIMA_diff.head())
#Bring back the predictions to original scale
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
print(predictions_ARIMA_diff_cumsum.head())
predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
predictions_ARIMA_log.head()
#Plot of Actual vs Forecasted values
predictions_ARIMA = np.exp(predictions_ARIMA_log)
plt.plot(ts)
plt.plot(predictions_ARIMA)
plt.title('ARIMA MAPE: %.4f'% np.mean(np.abs(predictions_ARIMA-ts)/np.abs(ts)))
#RMSE and MAPE calculations
mape = np.mean(np.abs(predictions_ARIMA - ts)/np.abs(ts))
rmse = np.mean((predictions_ARIMA - ts)**2)**.5
print(mape)
print(rmse)
#Summary of ARIMA model
results_ARIMA.summary()
|
[
"noreply@github.com"
] |
noreply@github.com
|
73b6a55d16f9a0ddb2370537646877ecaa9d332e
|
464b6f3a8e3662ecc357735b17c5fe859aa9f3e3
|
/StanCode-Projects/searching_name_system/babygraphics.py
|
2ee9308af259ad456f5b8b65ffae016860eaec6b
|
[
"MIT"
] |
permissive
|
jennywei1995/sc-projects
|
a840f1fcb6e691999a6b8ac31a53c8a5b0f260b8
|
ec192434a967d68fee4f772ae907e5ef5fa556d2
|
refs/heads/main
| 2022-12-30T13:06:44.186249
| 2020-10-20T07:56:43
| 2020-10-20T07:56:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,451
|
py
|
"""
File: babygraphics.py
Name: Jenny Wei
-----------------
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
-----------------
This file will create a canvas and enable user to
use the program to search for babes' names' ranks
over decades.
Once the user search a name, the corresponding rank in a specific year
will be found and added to the canvas, there will also be lines to connect
each years' rank and draw a run chart.
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
x_range = (width - (GRAPH_MARGIN_SIZE * 2)) / len(YEARS)
x_coordinate = int(GRAPH_MARGIN_SIZE + (x_range * year_index))
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# to draw the peripheral line
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE,
CANVAS_WIDTH - GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, 0, GRAPH_MARGIN_SIZE, CANVAS_HEIGHT)
# to draw the line that evenly divided the according to how many years are provided
for i in range(len(YEARS)):
line_x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(line_x, 0, line_x, CANVAS_HEIGHT)
canvas.create_text(line_x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i], anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# once the user click enter, the data will be shown
y_position = ((CANVAS_HEIGHT - (GRAPH_MARGIN_SIZE * 2)) / (MAX_RANK-1))
for i in range(len(lookup_names)):
# to determine the color of data's text and line
if i <= len(COLORS)-1:
color = COLORS[i]
else:
# while the given colors are all used, the color data used will start from the first color
color = COLORS[int((i % len(COLORS)))]
# to find the dic of the name that contains its rank over the years
baby_dic = name_data[lookup_names[i]]
# to create a year list to check if the year matches the constant year list
new_year_lst = []
for year, rank in baby_dic.items():
new_year_lst.append(year)
# if the names' data doesn't exit in the given file
for k in range(len(YEARS)):
# assign these names' rank as 1001
if f'{YEARS[k]}' not in new_year_lst:
baby_dic[f'{YEARS[k]}'] = '1001'
# a list that will contain the y value
y_list = []
for j in range(len(YEARS)):
for year in baby_dic:
# to find the rank of the given name in specific year
rank = baby_dic[year]
# to add the text of name and its rank of a specific year to the canvas
line_x = get_x_coordinate(CANVAS_WIDTH, j)
if int(year) == YEARS[j]:
if int(rank) > MAX_RANK:
new_rank = '*'
else:
new_rank = rank
canvas.create_text(line_x + TEXT_DX,
int(y_position * int(rank) + GRAPH_MARGIN_SIZE),
text=f'{lookup_names[i]} {new_rank}', anchor=tkinter.SW, fill=color)
y_list.append(int(y_position * int(rank) + GRAPH_MARGIN_SIZE))
# to draw the line that connects each year's rank data on the canvas
for j in range(len(YEARS) - 1):
line_x = get_x_coordinate(CANVAS_WIDTH, j)
line_x1 = get_x_coordinate(CANVAS_WIDTH, j + 1)
line_y = y_list[j]
line_y1 = y_list[j + 1]
canvas.create_line(line_x, line_y, line_x1, line_y1, width=LINE_WIDTH, fill=color)
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
848a890e8baab9228465b85ff2aaf300a3bd3890
|
59835adaceb26614d0aa51cf8dda2be5be79bcfb
|
/run_menu.py
|
91721ab25d52dd5a240b4d7c8ac9c851985b7866
|
[] |
no_license
|
Farah-H/python_menu
|
b438e11d649729611ec4aa8ca3a8c9bd0106c3b6
|
7401eb938a71c03a89da30667ebda4d59f75d4ac
|
refs/heads/master
| 2023-01-07T03:28:06.934944
| 2020-11-08T19:43:54
| 2020-11-08T19:43:54
| 310,585,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from waitstaff_class import Waitstaff
# This part of the program will actually execute taking an order, saving it, and printing it back to the user
#instantiating the waitstaff class
jenny = Waitstaff()
all_orders = [] # a list to store all orders in (could increment by making this csv output)
# prompting the user for which part of the menu they would like to see
category = input('Would you like to see our starters, mains, desserts or drinks? Please enter "nothing" if you do not want to see the menu.').lower()
# if they are done with (or don't want to read) the menu, they can start to place an order
if input('Are you ready to make an order?').lower() == 'yes':
this_order = jenny.get_order()
print(this_order)
print(jenny.print_order(this_order))
else:
jenny.display_menu(category)
|
[
"61236001+farahmh@users.noreply.github.com"
] |
61236001+farahmh@users.noreply.github.com
|
fa4650d4a8f4d6e62f671e455d2f45eaa553ced4
|
d9b0e4be5b29c6bdb806eeb2b6df340aa26d1152
|
/payloads/shop2.py
|
016c15fba07f2ae5dd9a4a3ca6bbe7da515a824f
|
[
"MIT"
] |
permissive
|
opoudel/sculptbf-bot
|
ba5a4fb3550ffd51620d38d5171413cb89fbe136
|
3d9307bc4506844c8a693db68217d37fe2e76130
|
refs/heads/master
| 2020-12-02T11:34:19.539736
| 2017-07-21T15:39:20
| 2017-07-21T15:39:20
| 96,653,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
# -*- coding: utf-8 -*-
import json
def shop(recipient_id):
return json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "list",
"top_element_style": "compact",
"elements": [
{
"title": "Lypo - Spheric Vitamin C",
"image_url": "https://sculptbf-bot.herokuapp.com/static/lypo.png",
"subtitle": "Price: $48",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/lypo-spheric-vit-c/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "ASAP Moisturizer Sun Screen 50+",
"image_url": "https://sculptbf-bot.herokuapp.com/static/asap.png",
"subtitle": "Price: $65",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/asap-moisturizer-sun-screen-50/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "Cosmedix Purity Clean",
"image_url": "https://sculptbf-bot.herokuapp.com/static/cosmedix.png",
"subtitle": "Price: Not in Stock!!",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/cosmedix-purity-clean/",
"webview_height_ratio": "tall"
}
]
},
{
"title": "Skin Medica TNS Essential Serum",
"image_url": "https://sculptbf-bot.herokuapp.com/static/skin_medica.png",
"subtitle": "Price: $250",
"buttons": [
{
"type": "web_url",
"title": "Buy",
"url": "http://sculptbf.co.nz/index.php/product/skin-medica-tns-essential-serum/",
"webview_height_ratio": "tall"
}
]
}
],
"buttons": [
{
"title": "View More",
"type": "postback",
"payload": "MORE_SHOPPING_3"
}
]
}
}
}
})
|
[
"opoudel@me.com"
] |
opoudel@me.com
|
7be70ac3312c262cb16fc7fdd8dcb45124a48f14
|
d2b2023261ccdcaf560a2e7b0bab13ecdedacfc9
|
/03/fullbackup.py
|
00cb6631683557864d36d5b2b9b06ca824c29799
|
[] |
no_license
|
lilyef2000/lesson
|
a9d96ffc19f68fa3f044f240de6496b6d69394f6
|
2a5abb00b9bbb8bb36602ea6e1e8c464accc0759
|
refs/heads/master
| 2021-01-10T08:41:14.524421
| 2016-01-01T18:04:04
| 2016-01-01T18:04:04
| 46,460,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
#!/usr/bin/python
import sys,os,time,logger
source_file = sys.argv[1]
formated_source_file = source_file.split('/')[-1]
backup_dir = '/home/Administrator/lesson/backup/'
backup_to_file = '''%s%s_%s.tgz'''% (backup_dir,formated_source_file,time.strftime("%Y%m%d%H%M%S",time.localtime()))
def run_backup(runtime='now',exclude_file_name='None'):
if len(sys.argv) == 4:
print '--------exclude file mode--------'
if sys.argv[2] == '-X':
exclude_file_name = sys.argv[3]
backup_cmd = "tar -cvzfX %s %s %s " %(backup_to_file,exclude_file_name,source_file)
else:
print '--------Normal mode:--------'
backup_cmd = "tar -cvzf %s %s |wc -l" %(backup_to_file,source_file)
run_command = os.system(backup_cmd)
if run_command == 0:
logger.record_log('Full Backup','Success','N/A','test')
else:
logger.record_log('Full Backup','Failure','N/A','test')
run_backup()
|
[
"lilyef2000@gmail.com"
] |
lilyef2000@gmail.com
|
994164e610d278fe042d18fcfb17557acddd8a41
|
47a496e0c7ea9adf35c006d193a88357006a370e
|
/algorithm/TopicB2/TreePagoda.py
|
fcce3df7058ac52a7a5bc94496f5eb20ed821fda
|
[] |
no_license
|
Curious-chen/curriculum-design
|
01ea5aff12c3097f7283571befd7bcfe68149817
|
036f78a62b15ec8e5c8e1013d124f726fd2bebe4
|
refs/heads/master
| 2020-12-06T14:19:29.026158
| 2020-01-08T06:30:50
| 2020-01-08T06:30:50
| 232,483,805
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
"""
将正整数排成等边三角形(也叫数塔),三角形的底边有个数,
下图给出了的一个例子。从三角形顶点出发通过一系列相邻整数(在图中用正方形表示),
如何使得到达底边时的总和最大
"""
import numpy as np
"""
https://www.jianshu.com/p/2a7f5cac0d58
"""
"""
动态规划
dp[i][j] = max(dp[i+1][j],dp[i+1][j+1])+date[i][j]
"""
"""
(1) 初始化距离数组dp,令距离dp的最后一行复制树塔的最后一行的值
(2) 从树塔倒数第二行开始,自底向上计算
(3) 判断x点的左右孩子的大小,对应的距离dp = 左右孩子中的较大值加上树塔对应位置值
(4) 重复2、3步骤,直到计算完树塔顶端
"""
class TreePagoda(object):
def __init__(self, pagoda):
self.pagoda = np.array(pagoda)
# 初始化节点到树塔底的距离
dp = self.pagoda.copy()
dp[:-1, :] = 0
self.dp = dp
# 下一坐标
self.next = dict()
def run(self):
index = len(self.pagoda) - 1
for j, value in enumerate(self.pagoda[-1]):
yield self.getIndex(index, j), 0, value
for i in range(len(self.pagoda) - 2, -1, -1): # 自底向上求得最优值
layer = self.pagoda[i]
for j in range(len(layer)):
if layer[j] == 0:
break
self.find(i, j)
yield self.getIndex(i, j), self.getIndex(*self.next[(i, j)]), self.dp[i, j]
def getIndex(self, i, j):
return int(i * (i + 1) / 2 + j)
def find(self, i, j):
if self.dp[i + 1, j] > self.dp[i + 1, j + 1]:
self.dp[i, j] = self.dp[i + 1, j] + self.pagoda[i, j]
self.next[(i, j)] = (i + 1, j)
else:
self.dp[i, j] = self.dp[i + 1, j + 1] + self.pagoda[i, j]
self.next[(i, j)] = (i + 1, j + 1)
def createdPath(self):
cu = (0, 0)
yield self.getIndex(*cu)
while True:
cu = self.next[cu]
yield self.getIndex(*cu)
if cu[0] == len(self.pagoda) - 1:
break
def Test():
treePagoda = np.array(((9, 0, 0, 0, 0),
(12, 15, 0, 0, 0),
(10, 6, 8, 0, 0),
(2, 18, 9, 5, 0),
(19, 7, 10, 4, 16)))
t = TreePagoda(treePagoda)
y = t.run()
for i in range(15):
x = next(y)
print(x)
t.createdPath()
print(t.dp)
for i in t.createdPath():
print(i)
if __name__ == '__main__':
Test()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d047e999cc18d2f81e6f7afc24a22551af5b8e21
|
c96f923cba05f4bfefafa24c02818cc98e8caa14
|
/sum.py
|
86724be39016ee75ac99bd413acdd8c139cca37c
|
[] |
no_license
|
saviorseelf/test
|
0178865ff0fbafe37ee286301669876ecb5e7ae6
|
7438be19b185e16a92a1c3e72cad402b987edc01
|
refs/heads/master
| 2021-05-30T11:11:31.744015
| 2016-01-21T19:15:18
| 2016-01-21T19:15:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
from threading import Thread
i = 0
def add():
global i
for j in range(0,1000000):
i += 1
def sub():
global i
for j in range(0,1000000):
i -= 1
def main():
thread1 = Thread(target = add, args = (),)
thread2 = Thread(target = sub, args = (),)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print i
main()
|
[
"andershanssen92@gmail.com"
] |
andershanssen92@gmail.com
|
c82afac573bf870007f2a26a2677f45d8e51d99c
|
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
|
/1233/solution.py
|
c47461e1a3ab14eb3051ffb577ac9f8ff8d4de5e
|
[] |
no_license
|
zhangruochi/leetcode
|
6f739fde222c298bae1c68236d980bd29c33b1c6
|
cefa2f08667de4d2973274de3ff29a31a7d25eda
|
refs/heads/master
| 2022-07-16T23:40:20.458105
| 2022-06-02T18:25:35
| 2022-06-02T18:25:35
| 78,989,941
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
class Node():
def __init__(self, str_):
self.str_ = str_
def __eq__(self, other):
return self.str_ == other.str_
def __repr__(self):
return self.str_
def __repr__(self):
return self.str_
def __hash__(self):
return hash(self.str_)
def __call__(self,str_):
return Node(str_)
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
trie = {}
res = []
def transfrom(f):
return list(map(Node, f.strip("/").split("/")))
folder = list( map(transfrom, folder))
print(folder)
for f in folder:
trie_pointer = trie
for char in f:
trie_pointer = trie_pointer.setdefault(char, {})
trie_pointer["#"] = "#"
def combine(path):
return "/"+"/".join([str(node) for node in path])
def dfs(trie, path):
nonlocal res
if "#" in trie:
res.append(combine(path))
return
for char in trie:
path.append(char)
dfs(trie[char],path)
path.pop()
dfs(trie, [])
return res
|
[
"zrc720@gmail.com"
] |
zrc720@gmail.com
|
82405e9839e46249f460ed4e84143cc38d8ef32b
|
55e31bc59b435ccfb60da178d560dedd6248b593
|
/resources/store.py
|
b1c85be12f356d4d78b91c85e8bebff15149a086
|
[] |
no_license
|
kenHsieh25053/flaskapi
|
a456c2ae28127ba422582693949fcb79bff71977
|
da1130585fc722910db3c503946ee5d3b8d66591
|
refs/heads/master
| 2020-03-10T22:44:36.581563
| 2018-04-22T09:07:09
| 2018-04-22T09:07:09
| 129,625,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
from flask_restful import Resource
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
return {'message': 'Store not found'}, 404
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': 'A store with name {} already exits.'.format(name)}, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return {'message': 'An error occured while creating the store'}, 500
return store.json(), 201
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {'message': 'Store deleted'}
class StoreList(Resource):
def get(self):
return {'stores': [store.json() for store in StoreModel.query.all()]}
|
[
"kw1984@livemail.tw"
] |
kw1984@livemail.tw
|
e9413bfa3cd627adaf3cf6bb968577c84e905767
|
2b84bd7cdcfe9c921fa60fefa2ee1257df33ce38
|
/utils/email_util.py
|
713ad3955e46c9b1c3cf07d310a5c6f928855407
|
[] |
no_license
|
webclinic017/market_monitor
|
f9cfa4a8443b81830abd9e5900509c7dfdab3e37
|
9a8a9b6181e1ab4f5d3dad32641ac941c5e4fabf
|
refs/heads/main
| 2023-07-28T00:39:34.481170
| 2021-09-15T12:39:38
| 2021-09-15T12:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
# Copyright (c) 2015 Shiye Inc.
# All rights reserved.
#
# Author: zsx <zhaoshouxin@shiyejinrong.com>
# Date: 2019-03-07
import smtplib
from email.mime.text import MIMEText
from docs.config.email_cfg.config import mail_info as m
class SchedulerError(RuntimeError):
def __init__(self, time):
self.time = time
class EmailUtil(object):
def __init__(self):
self.__mail_host = m.mail_host
self.__mail_user = m.mail_user
self.__mail_pass = m.mail_pass
self.__mail_to = m.mail_to
def send_email(self, email_title, email_content):
if email_content is None or len(email_content) == 0:
return
email_struct = MIMEText(email_content, _subtype="plain",
_charset="gb2312")
email_struct["Subject"] = email_title
email_struct["From"] = "".join(["市场预警", "<", self.__mail_user, ">"])
email_struct["To"] = ";".join(self.__mail_to)
# server = smtplib.SMTP()
#linux
server = smtplib.SMTP_SSL(self.__mail_host, 465)
server.connect(self.__mail_host)
server.login(self.__mail_user, self.__mail_pass)
server.sendmail(
email_struct["From"], self.__mail_to, email_struct.as_string())
server.close()
def send_email(err_info, email_title="市场预警测试邮件"):
email_content = err_info
email_util = EmailUtil()
email_util.send_email(email_title, email_content)
if __name__ == '__main__':
send_email("2222")
|
[
"1125191117@qq.com"
] |
1125191117@qq.com
|
b55e30d6f12b49a52c2c808328cfba62b35668cb
|
71711bd2c11a3c0cbbc99bcfa78384d005e07828
|
/puct_mcts/datasets.py
|
f2aa99600a387a45d927073b70ec24d3e7ff95c7
|
[
"BSD-3-Clause"
] |
permissive
|
kastnerkyle/exploring_species_counterpoint
|
9365b2485cd227e375521f769ba1bfbd62c7b629
|
dda762463e64036adeba7efd46c51daaaf906019
|
refs/heads/master
| 2021-09-13T10:55:03.096300
| 2018-04-28T19:00:21
| 2018-04-28T19:00:21
| 103,225,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14
|
py
|
../datasets.py
|
[
"kastnerkyle@gmail.com"
] |
kastnerkyle@gmail.com
|
85dbdd459b8e5552ad1d55043b0a1f5779b84c91
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_20926.py
|
194a6671b01c6bb8bdc4a0d1f301faf7b48d8ed5
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
# Modifying sys.path
PYTHONPATH
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
aa300723ff8030d337ad1c65d8905af0053a9077
|
760578355ed00ce758591b9a0b4929a3105de530
|
/query/protocols/Gamespy.py
|
ed4437c4d9ffc4c0f2cfdf96f9a4022703cf0062
|
[
"MIT"
] |
permissive
|
SanSource/GameQuery
|
6c385e7607d7ad7fca0782ef8eea839f838268a7
|
b10845bffc872e9ce3d3d5d4016fd1905b3b8b0c
|
refs/heads/master
| 2020-12-29T16:07:39.179677
| 2017-08-20T22:59:33
| 2017-08-20T22:59:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
from ..connection import BaseUDP
from ..helpers import async_raise_on_timeout
from ..parser.helpers import QueryBytes
class Gamespy1(BaseUDP):
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
query = QueryBytes()
query.append(b'\\info\\', None)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
list_info = list()
list_split = response.buffer[1:].split(b'\\')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy2(BaseUDP):
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
query = QueryBytes()
query.append(b'\xFE\xFD\x00\x43\x4F\x52\x59\xFF\x00\x00', None)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
# if response[0:5] != b'\x00CORY':
# list_commands = response[5:].split(b'\x00\x00\x00')[0].split(b'\x00')
list_info = list()
list_split = response.buffer[5:].split(b'\x00\x00\x00')[0].split(b'\x00')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy3(BaseUDP):
is_challenge = False
@async_raise_on_timeout
async def get_info(self):
reader, writer = await self._connection.connect()
timestamp = b'\x04\x05\x06\x07' # timestamp
query = QueryBytes()
query.append(b'\xFE\xFD\x09', None)
query.append(timestamp, None)
if self.is_challenge:
writer.write(query.buffer)
response = QueryBytes(await reader.readline())
if response.buffer[:5] != b'\x09' + timestamp:
raise Exception() # fixme
challange_int = int(response.buffer[5:-1]).to_bytes(4, 'big', signed=True)
query.append(challange_int, None)
query.append(b'\xFF\x00\x00\x01', None)
query.set(b'\x00', QueryBytes.BIG_TYPE_BYTE, 1, offset=2)
writer.write(query.buffer)
return self.parse_info(QueryBytes(await reader.readline()))
def parse_info(self, response):
# if response[0] != 0x00 or response[1:5] != timestamp or response[15] != 0x00:
# list_commands = response
# list_commands.remove('p1073741829') # fix for Unreal Tournament 3 because he have invalid data ?
list_info = list()
list_split = response.buffer[16:-2].split(b'\x00\x00\x01')[0].split(b'\x00')
list_info = list(zip(list_split[::2], list_split[1::2]))
return list_info
class Gamespy4(Gamespy3):
is_challenge = True
|
[
"patryk.sondej@gmail.com"
] |
patryk.sondej@gmail.com
|
1c990786b09382998bcbe64210b2d6960dcbb44f
|
6691d0c71ddb92422fddb5d5994b660ee88a2435
|
/SDP_Assignments/Game_of_life/game_of_life_vishnu/GolLogic.py
|
0bbc877b4cafbc1f8997045ede5e4138c3d71dd9
|
[] |
no_license
|
dadi-vardhan/SDP
|
fb1b2e49c014d769add0e6244ca302e4b6939de5
|
f692837c2cda68d8b16d57727d4b727acf545bf2
|
refs/heads/master
| 2023-03-13T10:28:49.060533
| 2021-03-08T16:45:38
| 2021-03-08T16:45:38
| 310,674,824
| 0
| 1
| null | 2020-11-23T09:06:31
| 2020-11-06T18:23:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import time
import numpy as np
import matplotlib.pyplot as plt
class Logic(object):
def __init__(self, console):
self.state = console.state
def neighbour_cell_count(self):
'''
Counts the number of cells present at time 't'
on the console and returns it.
Parameters: none
Returns: cell [int]
'''
state = self.state
cell = (state[0:-2,0:-2] + state[0:-2,1:-1] + state[0:-2,2:] +
state[1:-1,0:-2] + state[1:-1,2:] + state[2:,0:-2] +
state[2:,1:-1] + state[2:,2:])
return cell
def cell_propogation_rules(self):
'''
function that defines the rules for the cell-propogation.
Parameters: none
Returns : state
'''
cell = self.neighbour_cell_count()
state = self.state
cell_birth = (cell == 3) & (state[1:-1,1:-1] == 0)
survive = ((cell == 2) | (cell == 3)) & (state[1:-1,1:-1] == 1)
state[...] = 0
state[1:-1,1:-1][cell_birth | survive] = 1
total_cell_birth = np.sum(cell_birth)
self.total_cell_birth = total_cell_birth
total_cell_survived = np.sum(survive)
self.total_cell_survived = total_cell_survived
return state
|
[
"vishnu.dadi@smail.h-brs.de"
] |
vishnu.dadi@smail.h-brs.de
|
4fcd5f7a94f65e8208038c8f3ad8ad80fbf84495
|
0e531fa04060ca129a1c3323c7c403a373e6c00d
|
/pca2tracks.py
|
144f71e0fac0d9de660813931366a2c86113f2fa
|
[] |
no_license
|
zhipenglu/xist_structure
|
6b71f4f718991d22d00d5b0fc8008b6e97581b62
|
0dfb910d0b303fc94d421c66bb2e484b8e72297e
|
refs/heads/master
| 2020-04-22T23:17:33.781817
| 2019-02-14T18:22:33
| 2019-02-14T18:22:33
| 170,736,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
"""
pca2tracks.py
This script converts the PCA analysis results for RIP/CLIP enrichment to a
minimal number of tracks for display on IGV. This approach provides more useful
information than the heatmap. The input file is *pca_array.pc.txt, and output
are the first few tracks that explain the most variance (e.g. *pc1.bedgraph).
Input format:
Interval NAME MEAN PC1 PC2 ...
hsXIST_0_100 hsXIST_0_100 value value value ...
Example:
cd /Users/lu/Documents/chang/eCLIP/fripsum
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_array.pc.txt 7 \
frip_gap_hsXIST_geometric_100nt_pca_array
For the PCA results from gene level, need to transpose the matrix###############
python ~/Documents/scripts/pca2tracks.py \
frip_gap_hsXIST_geometric_100nt_pca_gene.pc.txt 7 array \
frip_gap_hsXIST_geometric_100nt_pca_gene
"""
import sys
if len(sys.argv) < 4:
print "Usage: python pca2tracks.py pca_file track_number dim output_prefix"
print "dim: gene or array"
sys.exit()
pcafile = open(sys.argv[1], 'r')
ntracks = int(sys.argv[2])
dimension = sys.argv[3]
outputprefix = sys.argv[4]
pcadata = pcafile.readlines()[1:] #input as a list, remove the header line
pcamatrix = [line.strip('\n').split() for line in pcadata]
meanbedgraph = open(outputprefix + "_mean.bedgraph", 'w') #output mean bedgraph
meanout = ''
for row in pcamatrix: meanout += ('\t'.join(row[0].split('_') + row[2:3]) +'\n')
meanbedgraph.write(meanout)
meanbedgraph.close()
for i in range(ntracks): #output major principal component tracks
pctrack = open(outputprefix + '_pc' + str(i+1) + '.bedgraph', 'w')
pctrackout = ''
for row in pcamatrix:
pctrackout += ('\t'.join(row[0].split('_') + row[3+i:4+i]) + '\n')
pctrack.write(pctrackout)
pctrack.close()
pcafile.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
eb66be29af0e15d10254c571bd6fd7164a88478f
|
3b0a27a6fbaed8a3cba81a70f0142e99b8ce60c7
|
/blender/io_import_sprites/export_scripts.py
|
8a16ddcd39c4a45f817cbd941ce7ef358f390af0
|
[] |
no_license
|
sangohan/flumpkit
|
43b263bdf8076c5e02234b1ccd644370a93ec2d0
|
017a3f94b9363b719a6a502a4c42e66bfc305223
|
refs/heads/master
| 2021-01-16T20:31:51.898801
| 2013-08-07T18:03:58
| 2013-08-07T18:03:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,996
|
py
|
## Author: Daniel Gerson
##GPL 3.0 unless otherwise specified.
import bpy
from bpy.types import Operator
from bpy.types import Menu, Panel
import mathutils
import math
import os
import collections
import json
import re
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
IntProperty,
FloatProperty,
CollectionProperty,
)
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from bpy_extras.image_utils import load_image
from mathutils import Vector
from mathutils import Quaternion
#the from part represents directory and filenames
#the import part represents a class or method name etc
from bl_ui.space_view3d_toolbar import View3DPanel
print("LOADING: import_scripts.py!!!!")
from io_import_sprites.common import (
SpritesFunctions,
FlumpProps
)
class EXPORT_OT_flump_to_json(Operator, SpritesFunctions):
bl_idname = "export_sprites.flump_to_json"
bl_label = "Export Json"
bl_options = {'REGISTER', 'UNDO'}
props = bpy.props.PointerProperty(type=FlumpProps)
def execute(self, context):
## self.props = bpy.context.scene.FlumpProps
self.export_to_json(context)
return {'FINISHED'}
#inverts y axis
def transform_point(self, x, y, width, height):
return (x, height - y)
def transform_location(self, x, y):
return (x, -y)
#take transform of plane and convert into pivot
def get_pivot(self, arm_name, bone_name, obj, width, height):
#use relative
#TODO, find by armature name
if not bpy.data.armatures[0].bones[bone_name].use_relative_parent:
tx = width /2.0
ox = -obj.location.x +tx
oy = -obj.location.y
return self.transform_point(ox, oy, width, height)
tx = width /2.0
ox = -obj.location.x +tx
oy = -obj.location.y + (height /2.0)
return self.transform_point(ox, oy, width, height)
def export_to_json(self, context):
#~ jsonFile = get_json_file();
#~ print(jsonFile)
props = bpy.context.scene.FlumpProps
jsonFile = props.flump_library
json_data=open(jsonFile)
data = json.load(json_data)
json_data.close()
#we now have the file in data.
#now create a new movies area
movies = []
data['movies'] = movies
data['frameRate'] = bpy.context.scene.render.fps
movie = {}
movies.append(movie)
movie['id'] = props.movie_id
movie['layers'] = []
#get layers
armature_name = 'Armature'
bpy.context.scene.objects.active = bpy.context.scene.objects[armature_name] #context now armature
arm = bpy.context.scene.objects.active
ob_act = bpy.context.scene.objects.active.animation_data.action
curves = ob_act.fcurves
bone_keys = bpy.context.object.pose.bones.keys() #some of these bones are layers
layers = (b for b in bone_keys if 'flump_layer' in bpy.context.object.pose.bones[b])
#Assumes one symbol per layer
symbols = {}
for child in arm.children:
symbols[child.parent_bone] = child #object, not name
layer_frames ={}
#loop through curves, add keyframes to ALL bones that are influenced by this bone
for curve_id, curve in enumerate(curves) :
obj_name =re.search(r'"(\w+)"', curve.data_path).group(1)
if obj_name not in layer_frames:
layer_frames[obj_name] = []
for key in curve.keyframe_points :
frame, value = key.co
#add frame to ALL objects that share obj_name TODO (parents)
layer_frames[obj_name].append(int(frame))
# do something with curve_id, frame and value
## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(curve_id, frame, value))
layer_frames[obj_name] = sorted(list(set(layer_frames[obj_name])))
#add parents keyframes to child
for bone in bpy.data.armatures[0].bones[:]:
parents = [p.name for p in bone.parent_recursive]
for parent in parents:
layer_frames[bone.name].extend(layer_frames[parent])
layer_frames[bone.name] = sorted(list(set(layer_frames[bone.name])))
sequence_length = int(bpy.context.scene.frame_end)
layer_zdict = {}
#loop through layer_frames
for bone_name in layers:
frames = layer_frames[bone_name]
#add json layer
json_layer = {}
json_layer['name'] = bone_name
json_keyframes = []
json_layer['keyframes'] = json_keyframes
zdepth = None
keyframe_container = {}
#old way, straight
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
json_frame, loc_z = self.create_keyframe(frames[i], bone_name,
armature_name, symbols)
keyframe_container[frames[i]] = json_frame
#fit to curve
constants = (sequence_length, armature_name, bone_name, symbols)
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
self.fit_to_curve(frames[i], nextframe,
keyframe_container, constants)
#sort, add duration, add json to final list,
#rotation hack
frames = sorted(list(set(keyframe_container.keys())))
rot_adjust = 0
for i in range(len(frames)):
nextframe = sequence_length
if (i+1 < len(frames)):
nextframe = frames[i+1]
json_frame = keyframe_container[frames[i]]
json_keyframes.append(json_frame)
json_frame['duration'] = nextframe - frames[i]
#rotation hack (fixes smooth >360 flips, dislikes long transitions).
json_frame['skew'][0] += rot_adjust
json_frame['skew'][1] += rot_adjust
if nextframe is not sequence_length:
rotation1 = json_frame['skew'][0]
rotation2 = keyframe_container[frames[i+1]]['skew'][0] + rot_adjust
if rotation1 - rotation2 > math.pi:
rot_adjust += 2*math.pi
if rotation1 - rotation2 < -math.pi:
rot_adjust -= 2*math.pi
#find z depth order (useful to do this at the same time
loc, rotQ, scale = self.get_bone_transform(0, bone_name)
if zdepth is None: #only run on first keyframe
zdepth = loc[2]
if zdepth not in layer_zdict:
layer_zdict[zdepth] = []
layer_zdict[zdepth].append(json_layer)
#add json layers in correct zdepth order, as determined by first keyframe.
for z in sorted(list(layer_zdict.keys())): #not thread safe ;-)
for item in layer_zdict[z]:
movie['layers'].append(item) #json_layer
## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(loc,rotQ.to_euler(),scale))
with open(jsonFile, 'w') as outfile:
json.dump(data, outfile)
return
#adds keyframes to match linear to curve.
def fit_to_curve(self, start_frame, end_frame,
keyframe_container, constants):
#extract constants
sequence_length, armature_name, bone_name, symbols = constants
for i in range(start_frame, end_frame):
transform_start = None
transform_end = None
#generate start keyframe
if start_frame not in keyframe_container:
json_start, transform_start = self.create_keyframe(start_frame, bone_name,
armature_name, symbols)
keyframe_container[start_frame] = json_start
else: #TODO redundant sometimes.
transform_start = self.get_bone_transform(start_frame, bone_name)
#generate end keyframe
if end_frame not in keyframe_container:
json_end, transform_end = self.create_keyframe(end_frame, bone_name,
armature_name, symbols)
keyframe_container[end_frame] = json_end
else: #TODO redundant sometimes.
transform_end = self.get_bone_transform(end_frame, bone_name)
#get transform of frame i
transform_i = self.get_bone_transform(i, bone_name)
#interpolate start and end transforms at i
percent = (i - start_frame)/ (end_frame - start_frame)
loc = transform_start[0] + (transform_end[0] - transform_start[0]) * percent
rz_start = transform_start[1].to_euler().z
rz_end = transform_end[1].to_euler().z
rz = rz_start + (rz_end - rz_start) * percent
scale = transform_start[2] + (transform_end[2] - transform_start[2]) * percent
#test
match = True
if (abs(loc[0] - transform_i[0][0]) > 1): match = 1
if (abs(loc[1] - transform_i[0][1]) > 1): match = 2
ri = transform_i[1].to_euler().z
angle_diff = ((ri - rz)/math.pi*180) % 360
if (angle_diff > 1 and angle_diff < 359): match = 3
#TODO scale
## if match is not True:
## self.report({'INFO'}, 'match {0}'.format(angle_diff))
if match is True: #matches where it is supposed to be
continue
mid_frame = int((start_frame + end_frame)/2)
if mid_frame in [start_frame, end_frame]: return
#recursion
self.fit_to_curve(start_frame, mid_frame,
keyframe_container, constants)
self.fit_to_curve(mid_frame, end_frame,
keyframe_container, constants)
return
def get_bone_transform(self, frame, bone_name):
bpy.context.scene.frame_set(frame)
pose_bone = bpy.context.object.pose.bones[bone_name]
obj = pose_bone.id_data
matrix = obj.matrix_world * pose_bone.matrix
## loc, rotQ, scale = matrix.decompose()
return matrix.decompose()
def create_keyframe(self, frame, bone_name, armature_name, symbols):
json_frame = {}
json_frame['index'] = frame
#store frame values
loc, rotQ, scale = self.get_bone_transform(frame, bone_name)
#bounding box
local_coords = symbols[bone_name].bound_box[:]
coords = [p[:] for p in local_coords]
width = coords[0][0] * -2
height = coords[0][1] * -2
x, y = self.transform_location(loc[0], loc[1])
json_frame['loc'] =[x, y]
angle = -rotQ.to_euler().z #* math.pi / 180
json_frame['skew'] = [angle, angle]
json_frame['scale'] = [scale[0], scale[1]]
json_frame['pivot'] = self.get_pivot(armature_name, bone_name, symbols[bone_name],
width, height)
json_frame['ref'] = symbols[bone_name].name
return json_frame, (loc, rotQ, scale)
|
[
"daniel@mambo.co.za"
] |
daniel@mambo.co.za
|
3259d0615171353e16e44fb0506a5558587028c0
|
d037002f9d2b383ef84686bbb9843dac8ee4bed7
|
/tutorials/Trash/Distributed-DRL/torch/sac_test/utils/environment.py
|
c86069ea34cea9e7eb5b64d4846270b3babd3d96
|
[
"MIT"
] |
permissive
|
ICSL-hanyang/Code_With_RL
|
4edb23ca24c246bb8ec75fcf445d3c68d6c40b6d
|
1378996e6bf6da0a96e9c59f1163a635c20b3c06
|
refs/heads/main
| 2023-08-15T18:37:57.689950
| 2021-10-18T07:31:59
| 2021-10-18T07:31:59
| 392,944,467
| 0
| 0
| null | 2021-08-05T07:20:57
| 2021-08-05T07:20:56
| null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import gym
class Environment:
def __init__(self,env_name):
self.env = gym.make(env_name)
self.state_dim = self.env.observation_space.shape[0]
self._max_episode_steps = self.env._max_episode_steps
self.can_run = False
self.state = None
if type(self.env.action_space) == gym.spaces.box.Box : #Continuous
self.action_dim = self.env.action_space.shape[0]
self.is_discrete = False
else :
self.action_dim = self.env.action_space.n
self.is_discrete = True
def reset(self):
assert not self.can_run
self.can_run = True
self.state = self.env.reset()
return self.state
def step(self,action):
assert self.can_run
next_state, reward, done, info = self.env.step(action)
self.state = next_state
if done == True:
self.can_run = False
return next_state, reward, done, info
|
[
"nzy1414117007@gmail.com"
] |
nzy1414117007@gmail.com
|
28849c5633fc880b6e4043d6ee95027eb192b0fe
|
d77b363dd92fd61ff0f1fc75ffb9836dea201524
|
/main.py
|
f44ecfc93f61f7c047c6480e45d49d15fc1f7556
|
[] |
no_license
|
BalticPinguin/ArgonMD
|
6309ac8cf2aceb115f2615c81b62eaeacb5bf286
|
ddb723e7b34ec8b150acf187aaff3d61df9c0f08
|
refs/heads/master
| 2016-09-03T07:39:16.201770
| 2015-08-10T19:42:32
| 2015-08-10T19:42:32
| 39,498,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
#!/usr/bin/python3
import physics as ph
import sys
def frange(start, stop, step): #imitate range, but with floats.
r = start
i=0
while r <= stop:
yield r
i+=1
r =start + i*step
def main(argv):
assert len(argv)==1, "only temperature and alpha are allowed as input-parameter!"
N=256 #number of particles
#N=32 #number of particles
#length in angstroem, integer required
#L=10 #size of box; needs to be cubic for the method to be working
T=float(argv[0])
dt=10 #10 ps per step
t=3e3 #3 fs of simulation time.
alpha=0.02
L=21 #--> density of rho=1.8 g/cm^3
#L=10.5 #--> density of rho=1.8 g/cm^3
#time-step (in ps)
#t=30e3
output="box.dat"
output2="pairDist.dat"
#now, start simulation
#particle,mass=ph.testBox(N,L, T)
#particle,mass=ph.testForce(N,L, T)
#particle,mass=ph.seed_fcc(N,L,T)
particle,mass=ph.seed_small(N,L,T)
#particle,mass=ph.seed(N,L, T)
force=ph.update_force(particle,L) #get forces
#ph.print_conf(particle,output, output2,0, L)
for time in frange(dt,t,dt):
force,particle=ph.propagate(force,particle,L, dt,mass, alpha,T)
if time >2e3: # don't waste time, printing dumb data.
ph.print_conf(particle,output, output2, time, L)
if __name__=="__main__":
main(sys.argv[1:])
|
[
"tobias.moehle@uni-rostock.de"
] |
tobias.moehle@uni-rostock.de
|
b71909c9661e6baf2be15d0e61a3055456d35d1a
|
290b4c7ca63a975b38e55018cc38bd2766e14639
|
/ORC_app/jni-build/jni/include/tensorflow/tensorflow.bzl
|
bb0e46adddd64bf4473131cda060e9cc6eee198f
|
[
"MIT"
] |
permissive
|
luoabd/EMNIST-ORC
|
1233c373abcc3ed237c2ec86491b29c0b9223894
|
8c2d633a9b4d5214e908550812f6a2489ba9eb72
|
refs/heads/master
| 2022-12-27T14:03:55.046933
| 2020-01-16T15:20:04
| 2020-01-16T15:20:04
| 234,325,497
| 0
| 1
|
MIT
| 2022-12-11T13:32:42
| 2020-01-16T13:25:23
|
C++
|
UTF-8
|
Python
| false
| false
| 20,524
|
bzl
|
# -*- Python -*-
# Parse the bazel version string from `native.bazel_version`.
def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split('-', 1)
# Turn "release" into a tuple of integers
version_tuple = ()
for number in parts[0].split('.'):
version_tuple += (int(number),)
return version_tuple
# Check that a specific bazel version is being used.
def check_version(bazel_version):
if "bazel_version" in dir(native) and native.bazel_version:
current_bazel_version = _parse_bazel_version(native.bazel_version)
minimum_bazel_version = _parse_bazel_version(bazel_version)
if minimum_bazel_version > current_bazel_version:
fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
native.bazel_version, bazel_version))
pass
# Return the options to use for a C++ library or binary build.
# Uses the ":optmode" config_setting to pick the options.
load("//tensorflow/core:platform/default/build_config_root.bzl",
"tf_cuda_tests_tags")
# List of proto files for android builds
def tf_android_core_proto_sources():
return ["//tensorflow/core:" + p
for p in tf_android_core_proto_sources_relative()]
# As tf_android_core_proto_sources, but paths relative to
# //third_party/tensorflow/core.
def tf_android_core_proto_sources_relative():
return [
"example/example.proto",
"example/feature.proto",
"framework/allocation_description.proto",
"framework/attr_value.proto",
"framework/device_attributes.proto",
"framework/function.proto",
"framework/graph.proto",
"framework/kernel_def.proto",
"framework/log_memory.proto",
"framework/op_def.proto",
"framework/step_stats.proto",
"framework/summary.proto",
"framework/tensor.proto",
"framework/tensor_description.proto",
"framework/tensor_shape.proto",
"framework/tensor_slice.proto",
"framework/types.proto",
"framework/versions.proto",
"lib/core/error_codes.proto",
"protobuf/config.proto",
"protobuf/saver.proto",
"util/memmapped_file_system.proto",
"util/saved_tensor_slice.proto",
"util/test_log.proto",
]
# Returns the list of pb.h headers that are generated for
# tf_android_core_proto_sources().
def tf_android_core_proto_headers():
return ["//tensorflow/core/" + p.replace(".proto", ".pb.h")
for p in tf_android_core_proto_sources_relative()]
def if_cuda(a, b=[]):
return select({
"//third_party/gpus/cuda:cuda_crosstool_condition": a,
"//conditions:default": b,
})
def if_android_arm(a, b=[]):
return select({
"//tensorflow:android_arm": a,
"//conditions:default": b,
})
def tf_copts():
return (["-fno-exceptions", "-DEIGEN_AVOID_STL_ARRAY",] +
if_cuda(["-DGOOGLE_CUDA=1"]) +
if_android_arm(["-mfpu=neon"]) +
select({"//tensorflow:android": [
"-std=c++11",
"-DMIN_LOG_LEVEL=0",
"-DTF_LEAN_BINARY",
"-O2",
],
"//tensorflow:darwin": [],
"//conditions:default": ["-pthread"]}))
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate a library for that file.
def tf_gen_op_libs(op_lib_names):
# Make library out of each op so it can also be used to generate wrappers
# for various languages.
for n in op_lib_names:
native.cc_library(name=n + "_op_lib",
copts=tf_copts(),
srcs=["ops/" + n + ".cc"],
deps=(["//tensorflow/core:framework"]),
visibility=["//visibility:public"],
alwayslink=1,
linkstatic=1,)
def tf_gen_op_wrapper_cc(name, out_ops_file, pkg=""):
# Construct an op generator binary for these ops.
tool = out_ops_file + "_gen_cc"
native.cc_binary(
name = tool,
copts = tf_copts(),
linkopts = ["-lm"],
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = (["//tensorflow/cc:cc_op_gen_main",
pkg + ":" + name + "_op_lib"])
)
# Run the op generator.
if name == "sendrecv_ops":
include_internal = "1"
else:
include_internal = "0"
native.genrule(
name=name + "_genrule",
outs=[out_ops_file + ".h", out_ops_file + ".cc"],
tools=[":" + tool],
cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
"$(location :" + out_ops_file + ".cc) " + include_internal))
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate individual C++ .cc and .h
# files for each of the ops files mentioned, and then generate a
# single cc_library called "name" that combines all the
# generated C++ code.
#
# For example, for:
# tf_gen_op_wrappers_cc("tf_ops_lib", [ "array_ops", "math_ops" ])
#
#
# This will ultimately generate ops/* files and a library like:
#
# cc_library(name = "tf_ops_lib",
# srcs = [ "ops/array_ops.cc",
# "ops/math_ops.cc" ],
# hdrs = [ "ops/array_ops.h",
# "ops/math_ops.h" ],
# deps = [ ... ])
def tf_gen_op_wrappers_cc(name,
op_lib_names=[],
other_srcs=[],
other_hdrs=[],
pkg=""):
subsrcs = other_srcs
subhdrs = other_hdrs
for n in op_lib_names:
tf_gen_op_wrapper_cc(n, "ops/" + n, pkg=pkg)
subsrcs += ["ops/" + n + ".cc"]
subhdrs += ["ops/" + n + ".h"]
native.cc_library(name=name,
srcs=subsrcs,
hdrs=subhdrs,
deps=["//tensorflow/core:core_cpu"],
copts=tf_copts(),
alwayslink=1,)
# Invoke this rule in .../tensorflow/python to build the wrapper library.
def tf_gen_op_wrapper_py(name, out=None, hidden=[], visibility=None, deps=[],
require_shape_functions=False):
# Construct a cc_binary containing the specified ops.
tool_name = "gen_" + name + "_py_wrappers_cc"
if not deps:
deps = ["//tensorflow/core:" + name + "_op_lib"]
native.cc_binary(
name = tool_name,
linkopts = ["-lm"],
copts = tf_copts(),
linkstatic = 1, # Faster to link this one-time-use binary dynamically
deps = (["//tensorflow/core:framework",
"//tensorflow/python:python_op_gen_main"] + deps),
visibility = ["//tensorflow:internal"],
)
# Invoke the previous cc_binary to generate a python file.
if not out:
out = "ops/gen_" + name + ".py"
native.genrule(
name=name + "_pygenrule",
outs=[out],
tools=[tool_name],
cmd=("$(location " + tool_name + ") " + ",".join(hidden)
+ " " + ("1" if require_shape_functions else "0") + " > $@"))
# Make a py_library out of the generated python file.
native.py_library(name=name,
srcs=[out],
srcs_version="PY2AND3",
visibility=visibility,
deps=[
"//tensorflow/python:framework_for_generated_wrappers",
],)
# Define a bazel macro that creates cc_test for tensorflow.
# TODO(opensource): we need to enable this to work around the hidden symbol
# __cudaRegisterFatBinary error. Need more investigations.
def tf_cc_test(name, deps, linkstatic=0, tags=[], data=[], size="medium",
suffix="", args=None):
name = name.replace(".cc", "")
native.cc_test(name="%s%s" % (name.replace("/", "_"), suffix),
size=size,
srcs=["%s.cc" % (name)],
args=args,
copts=tf_copts(),
data=data,
deps=deps,
linkopts=["-lpthread", "-lm"],
linkstatic=linkstatic,
tags=tags,)
def tf_cuda_cc_test(name, deps, tags=[], data=[], size="medium"):
tf_cc_test(name=name,
deps=deps,
tags=tags + ["manual"],
data=data,
size=size)
tf_cc_test(name=name,
suffix="_gpu",
deps=deps + if_cuda(["//tensorflow/core:gpu_runtime"]),
linkstatic=if_cuda(1, 0),
tags=tags + tf_cuda_tests_tags(),
data=data,
size=size)
# Create a cc_test for each of the tensorflow tests listed in "tests"
def tf_cc_tests(tests, deps, linkstatic=0, tags=[], size="medium", args=None):
for t in tests:
tf_cc_test(t, deps, linkstatic, tags=tags, size=size, args=args)
def tf_cuda_cc_tests(tests, deps, tags=[], size="medium"):
for t in tests:
tf_cuda_cc_test(t, deps, tags=tags, size=size)
# Build defs for TensorFlow kernels
# When this target is built using --config=cuda, a cc_library is built
# that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional
# libraries needed by GPU kernels.
def tf_gpu_kernel_library(srcs, copts=[], cuda_copts=[], deps=[], hdrs=[],
**kwargs):
cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1",
"-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true",
"--gcudacc_flag=-ftz=true"] + cuda_copts
native.cc_library(
srcs = srcs,
hdrs = hdrs,
copts = copts + if_cuda(cuda_copts),
deps = deps + if_cuda([
"//tensorflow/core:cuda",
"//tensorflow/core:gpu_lib",
]),
alwayslink=1,
**kwargs)
def tf_cuda_library(deps=None, cuda_deps=None, copts=None, **kwargs):
"""Generate a cc_library with a conditional set of CUDA dependencies.
When the library is built with --config=cuda:
- both deps and cuda_deps are used as dependencies
- the gcudacc runtime is added as a dependency (if necessary)
- The library additionally passes -DGOOGLE_CUDA=1 to the list of copts
Args:
- cuda_deps: BUILD dependencies which will be linked if and only if:
'--config=cuda' is passed to the bazel command line.
- deps: dependencies which will always be linked.
- copts: copts always passed to the cc_library.
- kwargs: Any other argument to cc_library.
"""
if not deps:
deps = []
if not cuda_deps:
cuda_deps = []
if not copts:
copts = []
native.cc_library(
deps = deps + if_cuda(cuda_deps + ["//tensorflow/core:cuda"]),
copts = copts + if_cuda(["-DGOOGLE_CUDA=1"]),
**kwargs)
def tf_kernel_library(name, prefix=None, srcs=None, gpu_srcs=None, hdrs=None,
deps=None, alwayslink=1, **kwargs):
"""A rule to build a TensorFlow OpKernel.
May either specify srcs/hdrs or prefix. Similar to tf_cuda_library,
but with alwayslink=1 by default. If prefix is specified:
* prefix*.cc (except *.cu.cc) is added to srcs
* prefix*.h (except *.cu.h) is added to hdrs
* prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
With the exception that test files are excluded.
For example, with prefix = "cast_op",
* srcs = ["cast_op.cc"]
* hdrs = ["cast_op.h"]
* gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
* "cast_op_test.cc" is excluded
With prefix = "cwise_op"
* srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
* hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
* gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
"cwise_ops.h", "cwise_ops_common.h", "cwise_ops_gpu_common.cu.h"]
* "cwise_ops_test.cc" is excluded
"""
if not srcs:
srcs = []
if not hdrs:
hdrs = []
if not deps:
deps = []
if prefix:
if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]):
if not gpu_srcs:
gpu_srcs = []
gpu_srcs = gpu_srcs + native.glob([prefix + "*.cu.cc", prefix + "*.h"],
exclude = ["*test*"])
srcs = srcs + native.glob([prefix + "*.cc"],
exclude = ["*test*", "*.cu.cc"])
hdrs = hdrs + native.glob([prefix + "*.h"], exclude = ["*test*", "*.cu.h"])
cuda_deps = ["//tensorflow/core:gpu_lib"]
if gpu_srcs:
tf_gpu_kernel_library(
name = name + "_gpu",
srcs = gpu_srcs,
deps = deps,
**kwargs)
cuda_deps.extend([":" + name + "_gpu"])
tf_cuda_library(
name = name,
srcs = srcs,
hdrs = hdrs,
copts = tf_copts(),
cuda_deps = cuda_deps,
linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669
alwayslink = alwayslink,
deps = deps,
**kwargs)
def tf_kernel_libraries(name, prefixes, deps=None, **kwargs):
"""Makes one target per prefix, and one target that includes them all."""
for p in prefixes:
tf_kernel_library(name=p, prefix=p, deps=deps, **kwargs)
native.cc_library(name=name, deps=[":" + p for p in prefixes])
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
srcs = ctx.files.srcs
if len(srcs) != 1:
fail("Exactly one SWIG source file label must be specified.", "srcs")
module_name = ctx.attr.module_name
cc_out = ctx.outputs.cc_out
py_out = ctx.outputs.py_out
src = ctx.files.srcs[0]
args = ["-c++", "-python"]
args += ["-module", module_name]
args += ["-l" + f.path for f in ctx.files.swig_includes]
cc_include_dirs = set()
cc_includes = set()
for dep in ctx.attr.deps:
cc_include_dirs += [h.dirname for h in dep.cc.transitive_headers]
cc_includes += dep.cc.transitive_headers
args += ["-I" + x for x in cc_include_dirs]
args += ["-I" + ctx.label.workspace_root]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += [src.path]
outputs = [cc_out, py_out]
ctx.action(executable=ctx.executable.swig_binary,
arguments=args,
mnemonic="PythonSwig",
inputs=sorted(set([src]) + cc_includes + ctx.files.swig_includes +
ctx.attr.swig_deps.files),
outputs=outputs,
progress_message="SWIGing {input}".format(input=src.path))
return struct(files=set(outputs))
_py_wrap_cc = rule(attrs={
"srcs": attr.label_list(mandatory=True,
allow_files=True,),
"swig_includes": attr.label_list(cfg=DATA_CFG,
allow_files=True,),
"deps": attr.label_list(allow_files=True,
providers=["cc"],),
"swig_deps": attr.label(default=Label(
"//tensorflow:swig")), # swig_templates
"module_name": attr.string(mandatory=True),
"py_module_name": attr.string(mandatory=True),
"swig_binary": attr.label(default=Label("//tensorflow:swig"),
cfg=HOST_CFG,
executable=True,
allow_files=True,),
},
outputs={
"cc_out": "%{module_name}.cc",
"py_out": "%{py_module_name}.py",
},
implementation=_py_wrap_cc_impl,)
# Bazel rule for collecting the header files that a target depends on.
def _transitive_hdrs_impl(ctx):
outputs = set()
for dep in ctx.attr.deps:
outputs += dep.cc.transitive_headers
return struct(files=outputs)
_transitive_hdrs = rule(attrs={
"deps": attr.label_list(allow_files=True,
providers=["cc"]),
},
implementation=_transitive_hdrs_impl,)
def transitive_hdrs(name, deps=[], **kwargs):
_transitive_hdrs(name=name + "_gather",
deps=deps)
native.filegroup(name=name,
srcs=[":" + name + "_gather"])
# Create a header only library that includes all the headers exported by
# the libraries in deps.
def cc_header_only_library(name, deps=[], **kwargs):
_transitive_hdrs(name=name + "_gather",
deps=deps)
native.cc_library(name=name,
hdrs=[":" + name + "_gather"],
**kwargs)
def tf_custom_op_library_additional_deps():
return [
"//google/protobuf",
"//third_party/eigen3",
"//tensorflow/core:framework_headers_lib",
]
# Helper to build a dynamic library (.so) from the sources containing
# implementations of custom ops and kernels.
def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[]):
cuda_deps = [
"//tensorflow/core:stream_executor_headers_lib",
"//third_party/gpus/cuda:cudart_static",
]
deps = deps + tf_custom_op_library_additional_deps()
if gpu_srcs:
basename = name.split(".")[0]
cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1",
"-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true",
"--gcudacc_flag=-ftz=true"]
native.cc_library(
name = basename + "_gpu",
srcs = gpu_srcs,
copts = if_cuda(cuda_copts),
deps = deps + if_cuda(cuda_deps))
cuda_deps.extend([":" + basename + "_gpu"])
native.cc_binary(name=name,
srcs=srcs,
deps=deps + if_cuda(cuda_deps),
linkshared=1,
linkopts = select({
"//conditions:default": [
"-lm",
],
"//tensorflow:darwin": [],
}),
)
def tf_extension_linkopts():
return [] # No extension link opts
def tf_extension_copts():
return [] # No extension c opts
def tf_py_wrap_cc(name, srcs, swig_includes=[], deps=[], copts=[], **kwargs):
module_name = name.split("/")[-1]
# Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
# and use that as the name for the rule producing the .so file.
cc_library_name = "/".join(name.split("/")[:-1] + ["_" + module_name + ".so"])
extra_deps = []
_py_wrap_cc(name=name + "_py_wrap",
srcs=srcs,
swig_includes=swig_includes,
deps=deps + extra_deps,
module_name=module_name,
py_module_name=name)
native.cc_binary(
name=cc_library_name,
srcs=[module_name + ".cc"],
copts=(copts + ["-Wno-self-assign", "-Wno-write-strings"]
+ tf_extension_copts()),
linkopts=tf_extension_linkopts(),
linkstatic=1,
linkshared=1,
deps=deps + extra_deps)
native.py_library(name=name,
srcs=[":" + name + ".py"],
srcs_version="PY2AND3",
data=[":" + cc_library_name])
def tf_py_test(name, srcs, size="medium", data=[], main=None, args=[],
tags=[], shard_count=1, additional_deps=[]):
native.py_test(
name=name,
size=size,
srcs=srcs,
main=main,
args=args,
tags=tags,
visibility=["//tensorflow:internal"],
shard_count=shard_count,
data=data,
deps=[
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/python:kernel_tests/gradient_checker",
] + additional_deps,
srcs_version="PY2AND3")
def cuda_py_test(name, srcs, size="medium", data=[], main=None, args=[],
shard_count=1, additional_deps=[]):
test_tags = tf_cuda_tests_tags()
tf_py_test(name=name,
size=size,
srcs=srcs,
data=data,
main=main,
args=args,
tags=test_tags,
shard_count=shard_count,
additional_deps=additional_deps)
def py_tests(name,
srcs,
size="medium",
additional_deps=[],
data=[],
tags=[],
shard_count=1,
prefix=""):
for src in srcs:
test_name = src.split("/")[-1].split(".")[0]
if prefix:
test_name = "%s_%s" % (prefix, test_name)
tf_py_test(name=test_name,
size=size,
srcs=[src],
main=src,
tags=tags,
shard_count=shard_count,
data=data,
additional_deps=additional_deps)
def cuda_py_tests(name, srcs, size="medium", additional_deps=[], data=[], shard_count=1):
test_tags = tf_cuda_tests_tags()
py_tests(name=name, size=size, srcs=srcs, additional_deps=additional_deps,
data=data, tags=test_tags, shard_count=shard_count)
|
[
"abdellah.lahnaoui@gmail.com"
] |
abdellah.lahnaoui@gmail.com
|
cdc9c0fe13be7945a2a837c9dfa2b6ee764b8977
|
8b881e5a11a4b69362edf70929570964644aab75
|
/src/ai/AlphaBetaOwnSeeds.py
|
eb7539b7632553ff9285607c9d3a507bb67ba13b
|
[] |
no_license
|
BpGameHackSoc/kalahai
|
18b84bf528c6e5e12e2ac0b0abb3052fec4b81c8
|
abc2ce1aa4c766fd1cadb62bf3bf4d92b9fe5f56
|
refs/heads/master
| 2021-09-02T10:16:27.051245
| 2017-12-23T10:23:52
| 2017-12-23T10:25:30
| 110,739,312
| 2
| 1
| null | 2017-11-19T14:39:11
| 2017-11-14T20:10:11
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
import numpy as np
from . import AlphaBeta
from model import Side
class AlphaBetaOwnSeeds(AlphaBeta.AlphaBeta):
def evaluate(self,state):
south_holes = state.board.get_holes(Side.SOUTH)
north_holes = state.board.get_holes(Side.NORTH)
south_store = state.board.get_store(Side.SOUTH)
north_store = state.board.get_store(Side.NORTH)
val = (self.keepable_seeds(south_holes) -self.keepable_seeds(north_holes)) *0.25
val += south_store - north_store
return val
def keepable_seeds(self,buckets):
size = len(buckets)
clipper = np.array(range(size,0,-10))-np.ones(size)
return np.sum(np.clip(buckets,None,clipper))
def move(self, state):
return super().move(state)
|
[
"gergely.halacsy@gmail.com"
] |
gergely.halacsy@gmail.com
|
d930901a91772e4d664bb3b770867aa984a3e77f
|
08aadcd04337ee45b01e6bd7f5cc9d87cd433bfd
|
/basic_projects/2D lists and nested loops.py
|
3098b812d793bcbe6b1a711c279db30a87fbdf59
|
[] |
no_license
|
AnthonyPerugini/Training_projects
|
eb12acc36f0c2562ea9da6ca76221ea32bd73d38
|
bf4d8027740abedbcce296675a7484fae5e1095f
|
refs/heads/master
| 2021-03-10T04:43:14.473043
| 2020-03-16T18:31:32
| 2020-03-16T18:31:32
| 246,419,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
number_grid = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[0]
]
for row in number_grid:
for col in row:
print(col)
|
[
"Anthony.r.perugini@gmail.com"
] |
Anthony.r.perugini@gmail.com
|
fcafef610287029b1a2c87cfaac8bd9b6790c9b0
|
285f136156a925b05b5d51f3a4021813a455b971
|
/backend/handlers/__init__.py
|
73a855f9a4321fcd162797bf6c8a09cc1dbcc598
|
[
"Apache-2.0"
] |
permissive
|
kubikvid/weather-this-day
|
41185aacbbdcf65578576bf6f5974d00a00a3275
|
ada662f191ee122190168265d3d50e925ef26630
|
refs/heads/master
| 2020-05-21T21:24:21.959135
| 2019-05-13T01:42:14
| 2019-05-13T01:42:14
| 186,151,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Copyright (c) 2019. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
# Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
# Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
# Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
# Vestibulum commodo. Ut rhoncus gravida arcu.
from handlers import history
|
[
"moonquiz@ya.ru"
] |
moonquiz@ya.ru
|
fe5b26c41e27f960c84721814d918ba912d334fe
|
2aee7676daad10456a34fe23ce952966c05718ff
|
/regular_expression/q3.py
|
0b17a5f0733331c189c670e8f860c6394bec5ba8
|
[] |
no_license
|
sharonsabu/pythondjango2021
|
405b45bc08717301315016d7ccb9b4a03c631475
|
1dfb60b92296bc85248bad029a3fd370745623a6
|
refs/heads/master
| 2023-04-18T19:39:40.378956
| 2021-05-02T05:48:56
| 2021-05-02T05:48:56
| 333,471,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
from re import *
pattern="a{2,3}" #checks for min 2 and max 3 no of "a"
matcher=finditer(pattern,"aaaacaabbaaab")
for match in matcher:
print(match.start())
print(match.group())
|
[
"sharonsabu100@gmail.com"
] |
sharonsabu100@gmail.com
|
a77ed31a71760f495bdfed54cbe1295c506714c3
|
6dd65ba20f60ee02e5d449d1bbe61865a993ab3b
|
/Monthly_Bussiest_Route.py
|
9d0eb6636f660d57d29a3156c505f071b3bb0262
|
[] |
no_license
|
subhanshugpt07/Aviation_Big_Data_2017
|
a065aa52afaa287d489b88cd89c2df3544521fb0
|
fd5d68c6f9dfa92853ba67e2cfceda8d15f602bb
|
refs/heads/master
| 2021-07-03T08:20:12.739348
| 2017-09-24T23:21:44
| 2017-09-24T23:21:44
| 104,683,057
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
from pyspark.sql.functions import *
import csv
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkContext
from pyspark.sql import HiveContext
from pyspark.sql.functions import *
from pyspark.sql.functions import udf
from pyspark.sql.types import BooleanType
from pyspark.sql import Row
import csv
from pyspark.sql import SQLContext
def parseCSV(idx, part):
if idx==0:
part.next()
for p in csv.reader(part):
if p[14] < p[23]:
if p[0] == '2014':
yield Row(YEAR = p[0],
MONTH = int(p[2]),
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
elif p[0] == '2015':
yield Row(YEAR = p[0],
MONTH = int(p[2])+12,
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
elif p[0] == '2016':
yield Row(YEAR = p[0],
MONTH = int(p[2])+24,
ORIGIN=p[14],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[23],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[14],p[23]))
else:
pass
else:
if p[0] == '2014':
yield Row(YEAR = p[0],
MONTH = int(p[2]),
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
elif p[0] == '2015':
yield Row(YEAR = p[0],
MONTH = int(p[2])+12,
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
elif p[0] == '2016':
yield Row(YEAR = p[0],
MONTH = int(p[2])+24,
ORIGIN=p[23],
ORIGIN_AIRPORT_ID = p[11],
DEST = p[14],
DEST_AIRPORT_ID = p[20],
ROUTE = (p[23],p[14]))
else:
pass
def main(sc):
spark = HiveContext(sc)
sqlContext = HiveContext(sc)
print "holaaaaa"
rows = sc.textFile('../lmf445/Flight_Project/Data/864625436_T_ONTIME_2*.csv').mapPartitionsWithIndex(parseCSV)
df = sqlContext.createDataFrame(rows)
busiest_route_month_pivot = \
df.select('ORIGIN_AIRPORT_ID', 'ROUTE', 'MONTH') \
.groupBy('ROUTE').pivot('MONTH').count()
busiest_route_month_pivot.toPandas().to_csv('Output/MonthlyRoutes.csv')
if __name__ == "__main__":
sc = SparkContext()
main(sc)
# In[ ]:
|
[
"sg4595@nyu.edu"
] |
sg4595@nyu.edu
|
a254ecc9342fa1c6acec1d6dd7d1b9ee994945ee
|
8ae3e86fd736b65825a8c810560a73d17da74575
|
/solrdataimport/dataload/cqlbuilder.py
|
ee377653b8dbfc43530f90d778226878ff1f73fd
|
[
"Apache-2.0"
] |
permissive
|
pisceanfoot/solrdataimport
|
68d12e6ab96f7ed856e8187806981af8635920d6
|
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
|
refs/heads/master
| 2021-01-10T06:17:16.154994
| 2018-03-17T07:14:09
| 2018-03-17T07:14:09
| 49,885,709
| 2
| 1
|
Apache-2.0
| 2018-03-17T07:14:10
| 2016-01-18T15:29:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals, \
with_statement
import logging
from solrdataimport.cass.cassClient import CassandraClient
from solrdataimport.cass.cassSchema import CassSchema
logger = logging.getLogger(__name__)
class CqlBuilder(object):
@classmethod
def buildCacheKey(cls, cql, params):
return cql + '_'.join(map(str, params))
@classmethod
def buildCql(cls, fullDataImport, table, table_key, rowKey=None):
cql = 'select * from {0}'.format(table)
appendKey = []
if not fullDataImport and table_key:
appendKey = table_key
if rowKey:
for key in rowKey:
appendKey.append(key)
if appendKey:
key = ' = ? and '.join(appendKey)
cql = cql + ' where ' + key + ' = ?;'
return cql
@classmethod
def buildParam(cls, fullDataImport, table, table_key, row=None, rowKey=None, **kwargs):
if fullDataImport:
return None
params = []
if table_key:
for x in table_key:
if x not in kwargs:
raise Exception('key %s not found in param', x)
column_type = cls.__fetchFieldType(table, x)
params.append(CassandraClient.wrapper(column_type, kwargs.pop(x)))
if row and rowKey:
for key in rowKey:
fetchKey = rowKey[key].lower()
column_type = cls.__fetchFieldType(table, key)
params.append(CassandraClient.wrapper(column_type, row[fetchKey]))
return params
@classmethod
def __fetchFieldType(cls, table, field):
logger.debug('fetch filed type for table "%s" field "%s"', table, field)
schema = CassSchema.load(table)
field_name_lower = field.lower()
if field_name_lower in schema:
return schema[field_name_lower]
else:
logger.error('field "%s" not in table "%s"', field, table)
raise Exception('field "%s" not in table "%s"', field, table)
|
[
"pisceanfoot@gmail.com"
] |
pisceanfoot@gmail.com
|
07bf5e876ec76acc417629cf2befc0a819977d2d
|
4a4d727cab138c5a3bf3bfb05d48084ba06bd5d4
|
/Python master/MODULO 7 - API/primeiro_api.py
|
1bd2268ba3f4021a784fbde76a9f426ed37e5ca4
|
[] |
no_license
|
RoniNunes/python
|
9e9d61e69deab02ee9e9955a5e95c7e6ef610e7a
|
52f6b068f469fc63907b84f67e6005f9b7964442
|
refs/heads/master
| 2023-06-16T11:37:35.046750
| 2021-07-02T11:37:32
| 2021-07-02T11:37:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
from flask import Flask, jsonify, request
app = Flask(__name__)
postagens = [
{
'titulo': 'Api com Flask',
'autor': 'Roni nunes'
},
{
'titulo': 'Voce ja usou o Selenium?',
'autor': 'Roni nunes'
},
{
'titulo': 'Como instalar o python',
'autor': 'Roni nunes'
}
]
nova_postagem = [
{
'titulo': 'Nova postagem com Flask',
'autor': 'Roni nunes'
}]
@app.route('/postagens', methods=['GET'])
def obter_todas_postagens():
return jsonify(postagens), 200
@app.route('/postagens/<int:postagem_id>', methods=['GET'])
def obter_postagens_por_id(postagem_id): #Passamos o ID que queremos consultar.
return jsonify(postagens[postagem_id]), 200
@app.route('/postagens', methods=['POST'])
def nova_postagem():
postagem = request.get_json()
postagens.append(postagem)
return jsonify({'mensagem': 'Recurso criado com sucesso'}), 200
@app.route('/postagens/<int:postagem_id>', methods=['PUT'])
def atualizar_postagem(postagem_id):#Passamos o ID que queremos consultar.
resultado = request.get_json()
postagens[postagem_id].update(resultado)
return jsonify(postagens[postagem_id]), 200
@app.route('/postagens/<int:postagem_id>', methods=['DELETE'])
def excluir_postagem(postagem_id):#Passamos o ID que queremos consultar.
postagem = postagens[postagem_id]
del postagens[postagem_id]
return jsonify({'mensagem': 'A postagem foi excluida com sucesso'}), 200
if __name__ == '__main__':
app.run(port=5000, host='localhost',debug=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7fd9141000ee1b4be8b4f5dd9b969abf33c9eac9
|
d5dbae52bbfded54436a665f614a2793029371ea
|
/models/model2csv.py
|
64ec1c92bead5bf32dd3801e0c4c0df6e534c482
|
[
"Apache-2.0"
] |
permissive
|
bmarggraff/allie
|
88b97acffebe2c1876b379d478b293bfb9edfefb
|
2e2f8780f0a42229b582703455e9ce1d42cf9f96
|
refs/heads/master
| 2022-11-28T02:27:55.100030
| 2020-08-07T19:55:46
| 2020-08-07T19:55:46
| 285,911,411
| 1
| 0
| null | 2020-08-07T20:03:08
| 2020-08-07T20:03:07
| null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Creates an excel sheet of all currently trained models with their model performances;
useful to summarize all modeling sessions quickly; outputs to current directory.
Usage: python3 model2csv.py
'''
import os, json
import pandas as pd
def id_folder():
curdir=os.getcwd()
directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models']
metrics_list=list()
model_names=list()
for i in range(len(directories)):
try:
os.chdir(curdir)
os.chdir(directories[i])
listdir=os.listdir()
folders=list()
for j in range(len(listdir)):
if listdir[j].find('.') < 0:
folders.append(listdir[j])
curdir2=os.getcwd()
for j in range(len(folders)):
os.chdir(curdir2)
os.chdir(folders[j])
os.chdir('model')
listdir2=os.listdir()
jsonfile=folders[j]+'.json'
for k in range(len(listdir2)):
if listdir2[k] == jsonfile:
g=json.load(open(jsonfile))
metrics_=g['metrics']
metrics_list.append(metrics_)
model_names.append(jsonfile[0:-5])
except:
pass
# print(directories[i])
# print('does not exist...')
return metrics_list, model_names
metrics_list, model_names=id_folder()
accuracies=list()
roc_curve=list()
for i in range(len(model_names)):
accuracies.append(metrics_list[i]['accuracy'])
roc_curve.append(metrics_list[i]['roc_auc'])
data={'model names': model_names,
'accuracies': accuracies,
'roc_auc': roc_curve}
print(model_names)
print(accuracies)
print(roc_curve)
df=pd.DataFrame.from_dict(data)
df.to_csv('models.csv')
|
[
"noreply@github.com"
] |
noreply@github.com
|
31b58b74e967def34fcd7730cc4170cb953bf04e
|
d23ddee7237f138d003b44d859d12a9f8385cfce
|
/app.py
|
acc6da3dca2bd6c32be0c58631cb41c1bbe758e2
|
[] |
no_license
|
Kelby-Wilson/sqlalchemy_challenge
|
a6497bde709e8edf838949b75cf1e2a7fa011074
|
3b8ba0e3a1ac237ae319532eba892445b5be4912
|
refs/heads/master
| 2022-12-03T17:29:22.003559
| 2020-08-26T16:52:57
| 2020-08-26T16:52:57
| 262,371,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,189
|
py
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
import datetime as dt
# Relative Date
###
# Database Setup
###
engine = create_engine("sqlite:///hawaii.sqlite", connect_args={'check_same_thread': False}, echo=True)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
###
# Flask Setup
###
app = Flask(__name__)
###
# Flask Routes
###
@app.route("/")
def welcome():
"""List all available api routes."""
return"""<html>
<h1>List of all available Honolulu, HI API routes</h1>
<ul>
<br>
<li>
Return a list of precipitations from last year:
<br>
<a href="/api/v1.0/precipitation">/api/v1.0/precipitation</a>
</li>
<br>
<li>
Return a JSON list of stations from the dataset:
<br>
<a href="/api/v1.0/stations">/api/v1.0/stations</a>
</li>
<br>
<li>
Return a JSON list of Temperature Observations (tobs) for the previous year:
<br>
<a href="/api/v1.0/tobs">/api/v1.0/tobs</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided:
<br>Replace <start> with a date in Year-Month-Day format.
<br>
<a href="/api/v1.0/2017-01-01">/api/v1.0/2017-01-01</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive:
<br>
Replace <start> and <end> with a date in Year-Month-Day format.
<br>
<br>
<a href="/api/v1.0/2017-01-01/2017-01-07">/api/v1.0/2017-01-01/2017-01-07</a>
</li>
<br>
</ul>
</html>
"""
@app.route("/api/v1.0/precipitation")
def precipitation():
# Docstring
"""Return a list of precipitations from last year"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from today
# The days are equal 366 so that the first day of the year is included
year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Perform a query to retrieve the data and precipitation scores
results_precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all()
# Convert list of tuples into normal list
precipitation_dict = dict(results_precipitation)
return jsonify(precipitation_dict)
@app.route("/api/v1.0/stations")
def stations():
# Docstring
"""Return a JSON list of stations from the dataset."""
# Query stations
results_stations = session.query(Measurement.station).group_by(Measurement.station).all()
# Convert list of tuples into normal list
stations_list = list(np.ravel(results_stations))
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
# Docstring
"""Return a JSON list of Temperature Observations (tobs) for the previous year."""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from today
# The days are equal 366 so that the first day of the year is included
year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Query tobs
results_tobs = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_ago).all()
# Convert list of tuples into normal list
tobs_list = list(results_tobs)
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start(start=None):
# Docstring
"""Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided"""
from_start = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all()
from_start_list=list(from_start)
return jsonify(from_start_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start=None, end=None):
# Docstring
"""Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive"""
between_dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all()
between_dates_list=list(between_dates)
return jsonify(between_dates_list)
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
800613bb979e2a651e7833167d3b6536f748963a
|
699add6df73ad158b8ebeb5f9de4aada5820f205
|
/facebook/app/posts/models/comments.py
|
51bab010f0aef4c5c779bd1f65e15e568916fbfe
|
[] |
no_license
|
ricagome/Api-Facebook-Clone
|
4f035ad280e6cb48d375fd87a9f62eecce67eb51
|
fae5c0b2e388239e2e32a3fbf52aa7cfd48a7cbb
|
refs/heads/main
| 2023-08-17T12:34:33.379017
| 2021-10-05T21:23:32
| 2021-10-05T21:23:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
"""Comment model."""
# Django
from django.db import models
# Utilities
from app.utils.models import FbModel
class Comment(FbModel):
"""Comment model."""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
post = models.ForeignKey('posts.Post', on_delete=models.CASCADE)
text = models.TextField(help_text='write a comment', max_length=250)
reactions = models.IntegerField(default=0)
def __str__(self):
"""Return username, post title and comment."""
return '@{} has commented {} on {}'.format(
self.user.username,
self.text, self.post)
|
[
"juliancamilohermida@hotmail.com"
] |
juliancamilohermida@hotmail.com
|
9c398ed840e6c2bc5aa61edeb589e34f35fb1ef5
|
c36d43dc3ebb5ab987bda1cd7329a6fab58af45b
|
/semnet/interp/evaluator.py
|
5575ddf4edf6448507606f8f00ce119d1381ded7
|
[] |
no_license
|
patgrasso/semnet
|
e37cacfdab0903b0b5aed5ac010e071f24decb65
|
e5fd8912a1768f3f59dee937199feaa2158c925c
|
refs/heads/master
| 2021-01-12T06:38:10.173145
| 2016-12-31T00:04:35
| 2016-12-31T00:04:35
| 77,401,845
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
class Evaluator(object):
def __init__(self, env):
self.env = env
def valueof(self, node, node_list):
self.env.get(node["word"])
|
[
"pgrasso@stevens.edu"
] |
pgrasso@stevens.edu
|
5483a62a0289eaf03b82b517c8e78dd11f7e8a9d
|
4a2f163e603f90d5b9a4b2a100d7bc7bc77d1c95
|
/predicting_biological_response/hemy_example.py
|
401b7f3d5dd2f883930c7bfdf5ca5cfa2b058519
|
[] |
no_license
|
tusonggao/data_cck
|
d781334bd1d425f6ecd613ebdb194835846e3adb
|
91d48589e8431fd00d70348dcb049c52fdcd2c7f
|
refs/heads/master
| 2020-04-09T03:59:09.931284
| 2020-01-26T15:54:14
| 2020-01-26T15:54:14
| 160,005,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# https://blog.csdn.net/data_scientist/article/details/79036382
# https://blog.csdn.net/Gin077/article/details/84339790
# https://github.com/rushter/heamy
|
[
"tusonggao@163.com"
] |
tusonggao@163.com
|
91b43cda449292a11f4a69bb1dffb18b7872d0b9
|
32349a7406af3f6926e508dd4154a9042cd8a0b6
|
/DAA/Dynammic Programming/edit_distance.py
|
3fd4b208d55bfa264a9def65b8de2f5664df98c2
|
[] |
no_license
|
anumehaagrawal/LabWork-Sem-4
|
d78b95b61b2ec94d1ad143768200b739d40c2105
|
782430f67bb423b84749295a3fef61f241293032
|
refs/heads/master
| 2021-05-12T07:30:30.746342
| 2018-04-17T03:11:12
| 2018-04-17T03:11:12
| 117,244,508
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
def edit_distance(str1,str2,n,m):
dp_array =[[0 for i in range(m)] for k in range(n)]
for i in range(n):
for k in range(m):
if i==0:
dp_array[i][k] = k
elif k==0 :
dp_array[i][k] = i
if str1[i] ==str2[k]:
dp_array[i][k] = dp_array[i-1][k-1]
else:
dp_array[i][k] = 1+ min(dp_array[i][k-1],dp_array[i-1][k],dp_array[i-1][k-1])
print(dp_array[n-1][m-1])
def main():
str1 = "hello"
str2 = "heeeee"
edit_distance(str1,str2,len(str1),len(str2))
main()
|
[
"anuzenith29@gmail.com"
] |
anuzenith29@gmail.com
|
e5679a098872822f28be752dec6bb6519196d5b7
|
8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac
|
/PySide/QtCore/QTimer.py
|
5e91243992b9f324a3a089a65f93db3242e8a538
|
[
"Apache-2.0"
] |
permissive
|
sonictk/python-skeletons
|
be09526bf490856bb644fed6bf4e801194089f0d
|
49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d
|
refs/heads/master
| 2020-04-06T04:38:01.918589
| 2016-06-09T20:37:43
| 2016-06-09T20:37:43
| 56,334,503
| 0
| 0
| null | 2016-04-15T16:30:42
| 2016-04-15T16:30:42
| null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# encoding: utf-8
# module PySide.QtCore
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtCore.so
# by generator 1.138
# no doc
# no imports
from QObject import QObject
class QTimer(QObject):
# no doc
def interval(self, *args, **kwargs): # real signature unknown
pass
def isActive(self, *args, **kwargs): # real signature unknown
pass
def isSingleShot(self, *args, **kwargs): # real signature unknown
pass
def killTimer(self, *args, **kwargs): # real signature unknown
pass
def setInterval(self, *args, **kwargs): # real signature unknown
pass
def setSingleShot(self, *args, **kwargs): # real signature unknown
pass
def singleShot(self, *args, **kwargs): # real signature unknown
pass
def start(self, *args, **kwargs): # real signature unknown
pass
def startTimer(self, *args, **kwargs): # real signature unknown
pass
def stop(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def timerId(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
staticMetaObject = None
timeout = None
__new__ = None
|
[
"yliangsiew@blizzard.com"
] |
yliangsiew@blizzard.com
|
88e7be6d96ec8e784aba5e12b0692d4c5beb1949
|
2db7597686f33a0d700f7082e15fa41f830a45f0
|
/Python/LeetCode2.0/DP/72.Edit Distance.py
|
b071302d4d3bdf3daf32936c19f8404f75c65131
|
[] |
no_license
|
Leahxuliu/Data-Structure-And-Algorithm
|
04e0fc80cd3bb742348fd521a62bc2126879a70e
|
56047a5058c6a20b356ab20e52eacb425ad45762
|
refs/heads/master
| 2021-07-12T23:54:17.785533
| 2021-05-17T02:04:41
| 2021-05-17T02:04:41
| 246,514,421
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/05/09
'''
input: two words: str; the length of word is from 0 to inf
output: int; the number of modify steps
corner case:
one of the word is ‘’ → len(word2)
both words are ‘’ → 0
Method - DP
Steps:
build DP table; the size of table is (len(word1) + 1)* (len(word2) + 1)
dp[i][j]: the optimal solution when the size of word1 is i, the size of word2 is j
dp[i][j] = dp[i-1][j-1], word1[i - 1] != word2[j - 1]
= min(dp[i][j-1], dp[i-1][j],dp[i-1][j-1]) + 1, word1[i - 1] == word2[j - 1]
result is dp[len(word2)][len(word1)]
base case:
dp[0][j] = j
dp[i][0] = i
Time Complexity: O(NM), N is the length of word1 and M is the length of word2
Space Complexity: O(NM), DP table’s size
'''
# 易错点,注意哪个word是行,哪个word是列; word1[i - 1] != word2[j - 1], 减1不能忘
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m = len(word1)
n = len(word2)
if m == 0:
return n
if n == 0:
return m
dp = [[0] * (m + 1) for _ in range(n + 1)]
for i in range(n + 1):
for j in range(m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif word2[i - 1] == word1[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1
return dp[n][m]
|
[
"58391184+Leahxuliu@users.noreply.github.com"
] |
58391184+Leahxuliu@users.noreply.github.com
|
20cb0d0b09a6ffefdcad9798b490f37d638c9fec
|
73ffeccb2b50320536e375c255c1a48f5dfa4493
|
/quantified_self_project/settings.py
|
080b42090d5b843aa1ce6c8b14cd8290e86b11be
|
[] |
no_license
|
justinetroyke/qs-django
|
0db7737b96d5deb1e3c6f81a25097b87a4da61c5
|
095524f8d0e8e83e702bfb02dbab8fb6bd650d17
|
refs/heads/master
| 2020-03-27T13:34:49.323224
| 2018-08-29T15:02:51
| 2018-08-29T15:02:51
| 146,617,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
"""
Django settings for quantified_self_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zztez77xu&)++b!lnr+1yeis@sqced!id%6g-n%v6y3)64z9=9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quantified_self_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quantified_self_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
]
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
[
"jjtroyke@gmail.com"
] |
jjtroyke@gmail.com
|
5b8829efc99be0d97be1f033a445e8090d9021fe
|
7c0a5b40e86c876e72d3a635a60978dbf1c79c8b
|
/__init__.py
|
dbc8e29cabf9ba156d1b1396ed22dcd9204f2a28
|
[] |
no_license
|
BlenderCN-Org/selection_logic
|
a48e396f2ebfaf6f750bfa5871f33d49c69b15ba
|
7d240d626d699e5b41f1b45728730f41a360fc77
|
refs/heads/master
| 2020-05-23T21:20:40.599162
| 2018-10-10T17:35:25
| 2018-10-10T17:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
bl_info = {
"name": "Selection Logic",
"description": "Advanced selections based on logical conditions.",
"author": "Omar Ahmad",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D",
"warning": "",
"category": "Mesh"
}
import bpy
from . import ui
from . import operators
class SelectByExpressionOperator(bpy.types.Operator):
bl_idname = "mesh.select_by_expression"
bl_label = "Select By Expression"
def execute(self, context):
operators.selectVertices(context)
return {'FINISHED'}
def register():
ui.register()
operators.register()
bpy.utils.register_class(SelectByExpressionOperator)
def unregister():
ui.unregister()
operators.unregister()
bpy.utils.unregister_class(SelectByExpressionOperator)
|
[
"omar.squircleart@gmail.com"
] |
omar.squircleart@gmail.com
|
23cfee1ada500316d73bc8ad4983d16ddaefb85b
|
c71ad354837830987f17ab93ca3f7ceb6d405311
|
/khajuri/bin/pipeline_test.py
|
772f8726ae18693915e513ab87a0fac87cf3679f
|
[] |
no_license
|
zigvu/samosa
|
f353248a75fe7a83a8a59b375b104abec8d1d855
|
3962b3c7bab9d26bf871d257e15dd39c45ffaddd
|
refs/heads/master
| 2021-03-30T18:12:58.441901
| 2016-02-20T00:12:30
| 2016-02-20T00:12:30
| 50,481,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
#!/usr/bin/env python
import logging
import os
import glob
import argparse
import _init_paths
from khajuri.pipeline.run_pipeline import RunPipeline
from khajuri.multi.clip import Clip
from tools.files.file_utils import FileUtils
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Test on zigvu model on clips')
parser.add_argument('--clip_folder', dest='clip_folder',
help='Path to clips', required=True)
parser.add_argument('--output_path', dest='output_path',
help='Output folder path', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.debug('Start testing.')
args = parse_args()
runPipeline = RunPipeline()
allClipFiles = glob.glob("{}/*.mp4".format(args.clip_folder))
for clipFile in allClipFiles:
clipNumber = os.path.splitext(os.path.basename(clipFile))[0]
clipOutPath = os.path.join(args.output_path, clipNumber)
clip = Clip()
clip.clip_id = clipNumber
clip.clip_path = clipFile
clip.result_path = os.path.join(clipOutPath, 'clip.pkl')
runPipeline.clipdbQueue.put(clip)
logging.debug('RabbitToClip: process clip: {}'.format(clip.clip_id))
runPipeline.start()
runPipeline.join()
|
[
"eacharya@gmail.com"
] |
eacharya@gmail.com
|
d0e2832e8ee5e98f43faaa16e7637d13c046db78
|
29fc564df8ee16a2d140cbd150260e04f4ddc5c5
|
/0x0A-python-inheritance/10-square.py
|
6c8775c89972ea3d1271833f32c5db7e5717d8fe
|
[] |
no_license
|
ChristianAgha/holbertonschool-higher_level_programming
|
9359fdf4e3f30ed4422a0af59672ac5ff397d4a2
|
cce59b31aba3e2a09cb4bf76a6fcfeefa7ab5031
|
refs/heads/master
| 2021-01-20T07:15:31.258319
| 2017-09-27T05:53:25
| 2017-09-27T05:53:25
| 89,984,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
#!/usr/bin/python3
"""Geometry Module"""
class BaseGeometry:
"""class BaseGeometry"""
def area(self):
"""raises an Exception with the message area() is not implemented"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""validates value"""
if type(value) is not int:
raise TypeError("{} must be an integer".format(name))
if value <= 0:
raise ValueError("{} must be greater than 0".format(name))
class Rectangle(BaseGeometry):
"""class Rectangle"""
def __init__(self, width, height):
"""initialization"""
self.__width__ = width
self.__height__ = height
BaseGeometry.integer_validator(self, "width", width)
BaseGeometry.integer_validator(self, "height", height)
def __str__(self):
"""for print"""
return("[Rectangle] {}/{}".format(self.__width__, self.__height__))
def area(self):
"""return area"""
return self.__width__ * self.__height__
class Square(Rectangle):
"""class Square"""
def __init__(self, size):
"""initialization"""
self.__size__ = size
Rectangle.__init__(self, size, size)
def area(self):
"""return area"""
return self.__size__ ** 2
|
[
"christianagha@gmail.com"
] |
christianagha@gmail.com
|
24c336f380a817f634b1f446450fdffa2ad476f9
|
7ace4c9742af543db1965afec55b115b38d70aea
|
/programs/classconsrtuctor.py
|
1d91c88cb6d14bb1fccabcfaaa58948dd4b781f5
|
[] |
no_license
|
abhis021/C-DAC
|
8a7472517fb9d664cdcf1d6b33146219da970943
|
cd002a5740f63aa6fd25b982a4c7f2942877f12d
|
refs/heads/main
| 2023-08-25T03:36:35.726671
| 2021-10-17T07:41:27
| 2021-10-17T07:41:27
| 416,581,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
class partyanimal:
x=0
name=' '
def __init__(self,name1):
self.name=name1
def party(self):
self.x=self.x+1
print(self.name,'party count',self.x)
an=partyanimal('sally')
an.party()
na=partyanimal('jim')
na.party()
na.party()
|
[
"abhisheku722@gmail.com"
] |
abhisheku722@gmail.com
|
2df9cffd7c706f44089b51dd1178e45e110bfbc7
|
8149d1030b5bc62cc82d5afedbe7486daedbf8c5
|
/[829][Consecutive Numbers Sum][Medium].py
|
4810671219d8327bd315d73d7fbaf90d1a403a40
|
[] |
no_license
|
guofei9987/leetcode_python
|
faef17bb59808197e32ed97e92e2222862e2ba8c
|
23703a6fb5028d982b3febc630e28f9bb65a82a6
|
refs/heads/master
| 2020-03-21T18:24:33.014579
| 2019-10-12T13:29:03
| 2019-10-12T13:29:03
| 138,889,760
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
# https://leetcode.com/problems/consecutive-numbers-sum
|
[
"guofei9987@foxmail.com"
] |
guofei9987@foxmail.com
|
9488c0f83f1e5752703d6f5e72ddae45c675c8e9
|
86095e9590db8bab47b95752b967d9dbb88647da
|
/client.py
|
6ee40913d22e329ed34554e2633080860679cf5e
|
[] |
no_license
|
jrestuccio/python-udp-filetransfer
|
3cb2e4ec5d0751d133e648fefc20db73755e75c4
|
0c9e4cf278279a0fb980749eb9a3a2a8ca5796e9
|
refs/heads/master
| 2020-12-03T08:13:27.254990
| 2014-05-05T11:41:08
| 2014-05-05T11:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,013
|
py
|
"""
:title: client.py
:author: Josephine Lim
:description: Client to download files from server
Summary of packet types:
1 = 0b0001 = read request = \x00\x00\x00\x01
2 = 0b0010 = read response = \x00\x00\x00\x02
4 = 0b0100 = open request = \x00\x00\x00\x04
8 = 0b1000 = open response = \x00\x00\x00\x08
9 = 0b1001 = close request = \x00\x00\x00\x09
"""
from socket import *
import sys
import select
import struct
import random
class Client(object):
NUM_BYTES_TO_READ = 1400 #Total bytes sent inc header will be <1500 to prevent fragmentation over Ethernet links
epoch_no = 0
handle_no = 0
def __init__(self):
"""Sets up UDP socket, obtains 5 values at command line:
Filename to be read from server
Filename under which received file is to be stored locally
IP address or hostname of server (localhost if client is run on same machine)
Port number of server
Probability of packet loss, p
"""
self.client_socket = socket(AF_INET, SOCK_DGRAM)
# Value for number of bytes socket can receive. ( For best match with hardware and network realities,
# the value should be a relatively small power of 2, for example, 4096)
self.buffer_ = 2048
self.file_read = self.get_file_read_arg()
self.local_filename = self.get_local_filename_arg()
self.ip = self.get_ip_arg()
self.port = self.get_port_arg()
self.p = self.get_p_arg()
self.address = (self.ip, self.port)
# Create file on local system with name provided, to write our received file to
self.file_write = open(self.local_filename, 'wb')
self.eof = False
def get_file_read_arg(self):
"""Gets the name of the file to receive from the command line.
Throws an error if it is empty or more than 100 characters."""
try:
arg = sys.argv[1]
file_read = str(arg)
except IndexError:
print "Please provide the name of the file that you wish to receive."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if (len(file_read) > 100):
print "Name of file must be equal to or less than 100 characters."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return file_read
def get_local_filename_arg(self):
"""Gets the name under which received file is to be stored locally, from the command line.
Throws an error if it is empty."""
try:
arg = sys.argv[2]
local_filename = str(arg)
except IndexError:
print "Please provide the name under which the received file is to be stored locally."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return local_filename
def get_ip_arg(self):
"""Gets the ip number or hostname of the server from the command line.
Throws an error if it is empty."""
try:
arg = sys.argv[3]
ip = str(arg)
except IndexError:
print "The IP address or hostname of the server must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return ip
def get_port_arg(self):
"""Gets the port number of the server from the command line.
Throws an error if it is empty, not an integer, or not in the range of 1024 - 60000."""
try:
arg = sys.argv[4]
port = int(arg)
except ValueError:
print "Port must be a number only."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
except IndexError:
print "Port number must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if any([port < 1024, port > 60000]):
print "Port must be between 1024 and 60000"
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return port
def get_p_arg(self):
"""Gets the probability of packet loss, p, from the command line.
Throws an error if it is empty, or not a float in the range of 0.0 - 1.0."""
try:
arg = sys.argv[5]
p = float(arg)
except IndexError:
print "The probability of packet loss, p, must be provided."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
if (p < 0.0 or p > 1.0):
print "p value must be between 0.0 and 1.0 inclusive."
sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0")
else:
return p
def recv_invalid_response(self, recv_data, invalid_type = ""):
"""When bit signature is invalid or wrong packet type is received,
discard packet and print error message."""
if (invalid_type == "bit_signature"):
print("Error: Packet received from outside our network (wrong bit signature)")
recv_data = ""
elif (invalid_type == "response_type"):
print("Error: Wrong response type in packet received.")
recv_data = ""
return
def send_open_request(self):
"""Sends an open-request packet to the server in binary.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - open request type - 0b0100
100 bytes - filename to be read as ASCII string
"""
print "Sending open request for file named ", self.file_read
send_data = struct.pack("!2I100s", 0b1101, 0b0100, self.file_read)
self.client_socket.sendto(send_data, self.address)
return
def recv_open_response(self, recv_payload):
"""When client receives an (already-validated) open-response packet from the server,
it unpacks the payload and saves the received fields as instance variables if file found."""
unpacked_payload = struct.unpack("!?Q2I", recv_payload)
# Read status field. If set to False, ignore remaining fields and
# generate error msg (file not found) before exiting.
# Each unpacked value is a tuple, so [0] accesses the value that we want
status = unpacked_payload[0:1][0]
if status == False:
print "Error: File not found."
sys.exit()
#If set to True, read remaining fields.
elif status == True:
print("File found.")
self.file_length = unpacked_payload[1:2][0]
self.epoch_no = unpacked_payload[2:3][0]
self.handle_no = unpacked_payload[3:][0]
return
def send_read_request(self, start_position):
"""Sends a read request packet to the server in binary.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - read request type - 0b0001
4 bytes - epoch number - provided by server in open response
4 bytes - handle number - provided by server in open response
4 bytes - start position of the block to be read from the file - incremented sequentially
4 bytes - number of bytes to read - 1400
"""
send_data = struct.pack("!6I", 0b1101, 0b0001, self.epoch_no, self.handle_no, start_position, self.NUM_BYTES_TO_READ)
self.client_socket.sendto(send_data, self.address)
return
def recv_read_response(self, recv_payload):
"""When client receives an (already-validated) read-response packet from the server, it unpacks payload,
checks that epoch number and handle number are correct and status field is 'OK',
and appends file data received to the local file at the given start position."""
#Only unpack the headers because we want to store the file data as binary
unpacked_payload = struct.unpack('!H3IQ', recv_payload[:22])
status = unpacked_payload[0:1][0]
epoch_no = unpacked_payload[1:2][0]
handle_no = unpacked_payload[2:3][0]
#Check that file handle is the same, to make sure it is the same file request.
if (self.epoch_no == epoch_no and self.handle_no == handle_no):
start_position = unpacked_payload[3:4][0]
num_bytes_been_read = unpacked_payload[4:5][0]
# If we receive less bytes than the number we requested to read, this means that
# end of file has been reached
if (num_bytes_been_read < self.NUM_BYTES_TO_READ):
self.eof = True
data_to_write = recv_payload[22:]
#If status field says that response contains real data: Append to file. Otherwise react
#depending on error code received.
#Status 00 = OK
#Status 01 = Epoch no. of file handle doesnt match epoch no. of current invocation
#Status 10 = No context found for file-handle and no data has been read
#Status 11 = Context could be found but start position out of range
if (status == 0b00):
self.file_append.seek(start_position)
self.file_append.write(data_to_write)
elif (status == 0b01):
print("Error: Epoch no. of file handle doesnt match epoch no. of current invocation")
sys.exit()
elif (status == 0b10):
print("Error: No context found for file-handle and no data has been read")
sys.exit()
elif(status == 0b11):
print("Error: Context could be found but start position out of range")
sys.exit()
else:
print("Error: File handle does not match file handle stored in client. Wrong file received.")
sys.exit()
#Then return control to read_service_loop() method so that next iteration of send_read_request
#from new start position is called.
return
def send_close_request(self):
"""Sends a close request packet to the server to close the file object.
Format of packet is:
4 bytes - bit signature - 0b1101
4 bytes - close request type - 0b1001
4 bytes - epoch number
4 bytes - handle number
"""
data = struct.pack("!4I", 0b1101, 0b1001, self.epoch_no, self.handle_no)
self.client_socket.sendto(data, self.address)
self.client_socket.close()
return
def open_service_loop(self):
"""Loop that governs the timing and retransmission of open request packets,
then checks packets received for the bit signature and response type fields to ensure that they are correct."""
print "Attempting to receive file", self.file_read, "from", self.ip, "at port", self.port, "."
recv_data = None
num_retransmits = 0
#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step.
#Limit number of retransmits to 60 so as not to enter infinite loop.
while(num_retransmits < 60):
num_retransmits += 1
self.send_open_request()
input_socket = [self.client_socket]
inputready,outputready,exceptready = select.select(input_socket,[],[], 1)
#if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit)
if (inputready == []):
continue
else:
try:
recv_data = self.client_socket.recv(self.buffer_)
except Exception as exception_:
print("Wrong port number or IP address provided, or server is not available at the moment.")
sys.exit()
print("Received a packet.")
#Generate a random number between 0 and 1 with uniform distribution to simulate packet loss.
if (random.uniform(0,1) < self.p):
recv_data = None
print("Packet dropped randomly to simulate packet losses")
continue
bit_signature = recv_data[0:4]
response_type = recv_data[4:8]
recv_payload = recv_data[8:]
#Check that bit signature is valid (packet is from our network)
if bit_signature != "\x00\x00\x00\r":
recv_invalid_response(recv_data, "bit_signature")
continue
else:
#We have only ever sent a open_request, so the only viable response at this point is an open_response.
#If this field contains anything else, it is an invalid packet. Retransmit request.
if response_type != "\x00\x00\x00\x08":
self.recv_invalid_response(recv_data, "response_type")
continue
else:
#Bit signature and response type fields are both valid.
print("Received open response from server...")
self.recv_open_response(recv_payload)
break
if (num_retransmits >= 60):
print ("Exceeded number of retransmissions allowed. Exiting program.")
sys.exit()
return
def read_service_loop(self):
"""Loop that governs the timing and retransmission of read request packets,
then checks packets received for the bit signature and response type fields to ensure that they are correct."""
#Increment start_position each time packet sent, send a read request packet for each new position.
#Expect to receive a read_response packet for each time read request sent.
recv_data = None
print("Sending request to server to read and receive file...")
start_position = 0
while(self.eof == False):
print("Reading from byte " + str(start_position))
num_retransmits = 0
#Loop for retransmissions of the same start position
while(num_retransmits < 60):
num_retransmits = num_retransmits + 1
self.send_read_request(start_position)
input_socket = [self.client_socket]
inputready,outputready,exceptready = select.select(input_socket,[],[], 1)
if (inputready == []):
continue
else:
recv_data = self.client_socket.recv(self.buffer_)
if (random.uniform(0,1) < self.p):
recv_data = None
print("Packet dropped randomly to simulate packet losses")
continue
bit_signature = recv_data[0:4]
response_type = recv_data[4:8]
recv_payload = recv_data[8:]
if bit_signature != "\x00\x00\x00\r":
self.recv_invalid_response(recv_data, "bit_signature")
continue
else:
if response_type == "\x00\x00\x00\x02":
#Packet is valid, proceed to recv_read_response to append this bit of file received into local_filename
self.file_append = open(self.local_filename, 'r+b')
self.recv_read_response(recv_payload)
break
else:
self.recv_invalid_response(recv_data, "response_type")
continue
start_position = start_position + self.NUM_BYTES_TO_READ
if (num_retransmits >= 60):
print ("Exceeded number of retransmissions allowed. Exiting program.")
sys.exit()
return
client = Client()
client.open_service_loop()
client.read_service_loop()
client.send_close_request()
print ("File received successfully. Program will now exit.")
sys.exit()
|
[
"thecodeman66@hotmail.com"
] |
thecodeman66@hotmail.com
|
9aa84188689bfa3d627c30002874472a97dc229a
|
499ff5445e2017d042690c0429cf2e767a7f623f
|
/coral/io/_abi.py
|
b19a2ab0ec287ad6d000026ece9b71f749677f3a
|
[
"MIT"
] |
permissive
|
blthree/coral
|
b6ab934c10271d7b790130fe45e622b7c66921b4
|
30514735d9a51487583535a3a7e3fbfd0fe15ed8
|
refs/heads/master
| 2021-01-22T10:14:52.018579
| 2017-02-19T00:28:33
| 2017-02-19T00:28:33
| 81,997,699
| 0
| 0
| null | 2017-02-14T22:58:59
| 2017-02-14T22:58:59
| null |
UTF-8
|
Python
| false
| false
| 3,069
|
py
|
'''Read and write DNA sequences.'''
import coral as cr
import numpy as np
import os
from . import parsers
from .exceptions import UnsupportedFileError
def read_abi(path, trim=True, attach_trace=True):
'''Read a single ABI/AB1 Sanger sequencing file.
:param path: Full path to input file.
:type path: str
:param trim: Determines whether the sequence will be trimmed using Richard
Mott's algorithm (trims based on quality).
:type trim: bool
:param attach_trace: Determines whether to attach the trace result as a
.trace attribute of the returned sequence and the
trace peak locations as a .tracepeaks attribute. The
trace attribute is a 2D numpy array with 4 columns in
the order GATC.
:type attach_trace: bool
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
abi_exts = ['.abi', '.ab1']
if ext in abi_exts:
with open(path) as f:
abi = parsers.ABI(f)
else:
raise UnsupportedFileError('File format not recognized.')
seq = abi.seq_remove_ambig(abi.seq)
# Attach the trace results to the seq
if attach_trace:
order = abi.data['baseorder'].upper()
trace = [abi.data['raw' + str(order.index(b) + 1)] for b in 'GATC']
trace = np.array(trace)
tracepeaks = np.array(abi.data['tracepeaks'])
if trim:
try:
sequence = cr.DNA(abi.trim(seq))
except ValueError:
# A ValueError is raised if the sequence is too short
pass
trim_start = seq.index(str(sequence))
# Adjust trace data based on trimming
idx = (trim_start, trim_start + len(sequence))
peaks = tracepeaks[idx[0]:idx[1]]
sequence.trace = trace[peaks[0]:peaks[-1], :]
sequence.tracepeaks = peaks
else:
sequence = cr.DNA(seq)
sequence.name = abi.name
return sequence
def read_abis(directory, trim=True, attach_trace=True):
'''Read all ABI sequences files in a directory.
:param directory: Path to directory containing sequencing files.
:type directory: str
:param trim: Determines whether the sequence will be trimmed using Richard
Mott's algorithm (trims based on quality).
:type trim: bool
:param attach_trace: Determines whether to attach the trace result as a
.trace attribute of the returned sequence. The trace
attribute is a 2D numpy array with 4 columns in the
order GATC.
:type attach_trace: bool
:returns: A list of DNA sequences.
:rtype: coral.DNA list
'''
dirfiles = os.listdir(directory)
abis = []
for dirfile in dirfiles:
path = os.path.join(directory, dirfile)
try:
abis.append(read_abi(path, trim=trim, attach_trace=attach_trace))
except UnsupportedFileError:
pass
return abis
|
[
"nbolten@gmail.com"
] |
nbolten@gmail.com
|
0d3b60023a60eed6ae0274a83fd1daecbd04b513
|
95749b75c446df3ce4aabb03d5aec90de793e207
|
/gemini/taskapp/celery.py
|
722f621c5679f886e12c4c93ba9692df4ba43474
|
[] |
no_license
|
Hawk94/gemini
|
8288a11499c4cc12c8c79641a51b5e99afe268c5
|
3a4d0b13488b8e9fbc40dc3cde338b61bc04b494
|
refs/heads/master
| 2020-06-24T11:37:22.204269
| 2017-07-12T20:33:21
| 2017-07-12T20:33:21
| 96,935,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('gemini')
class CeleryConfig(AppConfig):
name = 'gemini.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
|
[
"x99tom.miller@gmail.com"
] |
x99tom.miller@gmail.com
|
abedc4c120a71cfaac46c76124d5f686290bce4b
|
2255a4eb151b85df055d3b66455bd788b6928592
|
/lcs.py
|
24442756a9a6765b6654a20e307ef03c08b6fd1c
|
[] |
no_license
|
mloo3/LocalHooks
|
fcfe073d6be32b54421b860920a3de59a948282c
|
9ff07384e544150d2677906683a7f55c31ebd4dc
|
refs/heads/master
| 2021-01-01T19:22:27.343172
| 2017-07-28T19:35:33
| 2017-07-28T19:35:33
| 98,575,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
def lcs(x,y):
m = len(x)
n = len(y)
l = [[None]*(n+1) for i in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0:
l[i][j] = 0
elif x[i-1] == y[j-1]:
l[i][j] = l[i-1][j-1]+1
else:
l[i][j] = max(l[i-1][j],l[i][j-1])
index = l[m][n]
lcs = [""]*(index+1)
lcs[index]="\0"
i=m
j=n
while i > 0 and j > 0:
if x[i-1]==y[j-1]:
lcs[index-1]=x[i-1]
i-=1
j-=1
index-=1
elif l[i-1][j] > l[i][j-1]:
i-=1
else:
j-=1
#print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in l]))
return "".join(lcs)
x = "aggtab"
y = "gxtxayb"
print(lcs(x,y))
|
[
"slayer71432@gmail.com"
] |
slayer71432@gmail.com
|
ddad2ca9b7b59fdf640e2b0a0f29fdc4854b3efb
|
a1a789f14eb2d5c039fbf61283b03f2f1e0d2651
|
/jeopardy/migrations/0002_auto_20150622_0957.py
|
7c525a02cd0795373e6b581937c6f647021a3936
|
[
"MIT"
] |
permissive
|
codefisher/web_games
|
279bf5be5a348951e6ae3361c24b696ac841e01c
|
d09ffb8f86b24e04568b2a33c94aa49d80455715
|
refs/heads/master
| 2021-01-10T13:10:33.097712
| 2017-07-12T05:46:37
| 2017-07-12T05:46:37
| 36,868,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jeopardy', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='points',
options={'verbose_name': 'Points', 'verbose_name_plural': 'Points'},
),
migrations.AddField(
model_name='question',
name='bonus',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='topic',
field=models.ForeignKey(related_name='topicopi', verbose_name='Topic', to='jeopardy.Topic', on_delete=models.CASCADE),
),
]
|
[
"mail@codefisher.org"
] |
mail@codefisher.org
|
ab0c049cca67cdb3f90aa2e8ce48ecceed5f6ce8
|
83acd2e879b8d1dfbd7d735193539b8537e86d08
|
/pyropod/ropod/ftsm/ftsm_base.py
|
b5a56108172f6d678c51713eb4724ab28dca21d7
|
[] |
no_license
|
HBRS-SDP/ropod_common
|
89b296e6bb56dc225319850036d3a63efd46ace9
|
5ce24b8ae79239f4fd5d2249fd33d1b1061eaceb
|
refs/heads/master
| 2020-05-09T23:39:11.209722
| 2019-03-12T12:59:48
| 2019-03-12T12:59:48
| 181,508,576
| 0
| 0
| null | 2019-04-15T14:52:18
| 2019-04-15T14:52:18
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
from pyftsm.ftsm import FTSM, FTSMStates, FTSMTransitions
class FTSMBase(FTSM):
'''ROPOD-specific implementation of a fault-tolerant state machine
@author Alex Mitrevski
@maintainer Alex Mitrevski, Santosh Thoduka, Argentina Ortega Sainz
@contact aleksandar.mitrevski@h-brs.de, santosh.thoduka@h-brs.de, argentina.ortega@h-brs.de
'''
def __init__(self, name, dependencies, max_recovery_attempts=1):
super(FTSMBase, self).__init__(name, dependencies, max_recovery_attempts)
def init(self):
'''Method for component initialisation; returns FTSMTransitions.INITIALISED by default
'''
return FTSMTransitions.INITIALISED
def configuring(self):
'''Method for component configuration/reconfiguration;
returns FTSMTransitions.DONE_CONFIGURING by default
'''
return FTSMTransitions.DONE_CONFIGURING
def ready(self):
'''Method for the behaviour of a component when it is ready
for operation, but not active; returns FTSMTransitions.RUN by default
'''
return FTSMTransitions.RUN
def running(self):
'''Abstract method for the behaviour of a component during active operation
'''
pass
def recovering(self):
'''Abstract method for component recovery
'''
pass
|
[
"aleksandar.mitrevski@h-brs.de"
] |
aleksandar.mitrevski@h-brs.de
|
89a66584f244256442569d26ef92908874f586c1
|
7da8913218b6450e83c3833f21315630717c7d88
|
/thomasStudents/odu.py
|
c3de060757e7fb196b013215e6237d1f35168bf4
|
[] |
no_license
|
andrefisch/PythonScripts
|
b028bec4ebf0f4442face3602dd136235efc32fa
|
bd68981ac931ab9ea7b44761647f5e2fff04e4c8
|
refs/heads/master
| 2021-01-17T13:10:49.227307
| 2017-07-06T04:30:43
| 2017-07-06T04:30:43
| 57,985,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import pandas
import openpyxl
import re
import math
import pygame, time
'''
1. import spreadsheet
2. for loop:
A. find value in cell C(row)
B. make a request to database
C. find student in response using last name C(row) and first name B(row)
D. if name exists in database:
a. replace empty cell D(row) with email address
3. save file
HTTPError: HTTP Error 500: Internal Server Error
'''
# 1.
# Open the file for editing
xfile = openpyxl.load_workbook('odu.xlsx')
# Open the worksheet we want to edit
sheet = xfile.get_sheet_by_name('students')
# Open the finished playing sound
pygame.init()
pygame.mixer.music.load('note.mp3')
# Some servers get annoyed if you make too many requests so dont do them all at once
# Start here
start = 17978
# End here
end = sheet.max_row + 1
# end = 6000
for row in range (start, end):
if (row % 999 == 0):
print ("GIVING THE SERVER A FIVE MINUTE BREAK")
xfile.save('odu.xlsx')
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
time.sleep(300)
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
print ("BREAK IS OVER, BACK TO WORK!")
# A.
firstName = sheet['B' + str(row)].value
lastName = sheet['C' + str(row)].value
# B.
if ((' ' in firstName) or (' ' in lastName)):
continue
else:
url = 'https://www.odu.edu/directory/?F_NAME=' + firstName + "&L_NAME=" + lastName + "&SEARCH_IND=S"
# post_fields = {'L_NAME': lastName, "F_NAME": firstName, "SEARCH_IND": "S"}
request = Request(url)#, urlencode(post_fields).encode())
json = urlopen(request).read()
# Make sure there are any results for the search
if "<table" in str(json):
try:
html = pandas.read_html(json)
email = html[0][1][3]
for i in range(2, len(html[0][1])):
if lastName in html[0][0][i] and firstName in html[0][0][i]:
p = re.compile('\w*@odu\.edu')
# print (isinstance(html[0][2][i], str))
if (isinstance(html[0][1][i], str)):
m = p.search(html[0][1][i])
if (m):
sheet['D' + str(row)] = m.group()
# Keep track of how close we are to being done
print (str(format((row - start) / (end - start) * 100.00, '.2f')) + "%: " + m.group())
except Exception:
pass
xfile.save('odu.xlsx')
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
pygame.mixer.music.play()
time.sleep(3)
pygame.mixer.music.stop()
|
[
"anfischl@gmail.com"
] |
anfischl@gmail.com
|
d24b0c9ae9dcf47759d369bdaf972fc87c046577
|
8dfd0de8519bf29565cf44ac342587a2b93fb086
|
/sonar.py
|
6dbb5b8b2981536bd1f86487a0012dbc577fb58c
|
[] |
no_license
|
ThePfarrer/Invent-Your-Own-Games
|
d058fdbb5f7408ab5ac3b4a301298fda62b0d458
|
ae13a457277f0cad53185bb1d611203eb78c22b0
|
refs/heads/master
| 2023-02-09T17:57:11.661474
| 2021-01-06T18:46:41
| 2021-01-06T18:46:41
| 323,171,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,797
|
py
|
# Sonar Treasure Hunt
import random
import sys
import math
def get_new_board():
# Create a new 60x15 board data structure.
board = []
for x in range(60): # The main list is list of 60 lists.
board.append([])
# Each list in the main list has 15 single-character strings.
for y in range(15):
# Use different characters for the ocean to make it more readable.
if random.randint(0, 1) == 0:
board[x].append('~')
else:
board[x].append('`')
return board
def draw_board(board):
# Draw the board data structure.
# Initial space for the numbers down the left side of the board
tens_digits_line = ' '
for i in range(1, 6):
tens_digits_line += (' ' * 9) + str(i)
# Print the numbers across the top of the board.
print(tens_digits_line)
print(' ' + ('0123456789' * 6))
print()
# Print each of the 15 rows.
for row in range(15):
# Single-digit numbers need to be padded with an extra space.
if row < 10:
extra_space = ' '
else:
extra_space = ''
# Create the string for this row on the board.
board_row = ''
for column in range(60):
board_row += board[column][row]
print(f'{extra_space}{row} {board_row} {row}')
# Print the numbers across the bottom of the board.
print()
print(' ' + ('0123456789' * 6))
print(tens_digits_line)
def get_random_chests(num_chests):
# Create a list of chest data structures (two-item lists of x, y int coordinates).
chests = []
while len(chests) < num_chests:
new_chest = [random.randint(0, 59), random.randint(0, 14)]
if new_chest not in chests: # Make sure a chest is not already here.
chests.append(new_chest)
return chests
def is_on_board(x, y):
# Return True if the coordinates are on the board; otherwise, return False.
return x >= 0 and x <= 59 and y >= 0 and y <= 14
def make_move(board, chests, x, y):
# Change the board data structure with a sonar device character. Remove treasure chests from the chests list as they are found.
# Return False if this is an invalid move.
# Otherwise, return the string of the result of this move.
smallest_distance = 100 # Any chest will be closer than 100.
for cx, cy in chests:
distance = math.sqrt((cx - x)**2 + (cy - y)**2)
if distance < smallest_distance: # We want the closest treasure chest.
smallest_distance = distance
smallest_distance = round(smallest_distance)
if smallest_distance == 0:
# xy is directly on a treasure chest!
chests.remove([x, y])
return 'You have found a sunken treasure chest!'
else:
if smallest_distance < 10:
board[x][y] = str(smallest_distance)
return f'Treasure detected at a distance of {smallest_distance} from the sonar device.'
else:
board[x][y] = 'X'
return 'Sonar did not detect anything. All treasure chests out of range.'
def enter_player_move(previous_moves):
# Let the player enter their move. Return a two-item list of int xy coordinates.
print('Where do you want to drop the next sonar device? (0-59 0-14) (or type quit)')
while True:
move = input()
if move.lower() == 'quit':
print('Thanks for playing!')
sys.exit()
move = move.split()
if len(move) == 2 and move[0].isdigit() and move[1].isdigit() and is_on_board(int(move[0]), int(move[1])):
if [int(move[0]), int(move[1])] in previous_moves:
print('You already moved there.')
continue
return [int(move[0]), int(move[1])]
print('Enter a number from 0 to 59, a space, then a number from 0 to 14.')
def show_instructions():
print('''Instructions:
You are the captain of the Simon, a treasure-hunting ship. Your current mission
is to use sonar devices to find three sunken treasure chests at the bottom of
the ocean. But you only have cheap sonar that finds distance, not direction.
Enter the coordinates to drop a sonar device. The ocean map will be marked with
how far away the nearest chest is, or an X if it is beyond the sonar device's
range. For example, the C marks are where chests are. The sonar device shows a
3 because the closest chest is 3 spaces away.
1 2 3
012345678901234567890123456789012
0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0
1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1
2 ``X~~3~~~`~C~````````~~~`~```~``` 2
3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3
4 ~~~~~`````~`C```~`~`~~`~~```~```` 4
012345678901234567890123456789012
1 2 3
(In the real game, the chests are not visible in the ocean.)
Press enter to continue...''')
input()
print('''
When you drop a sonar device directly on a chest, you retrieve it and the other
sonar devices update to show how far away the next nearest chest is. The chests
are beyond the range of the sonar device on the left, so it shows an X.
1 2 3
012345678901234567890123456789012
0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0
1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1
2 ``X~~7~~~`~C~````````~~~`~```~``` 2
3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3
4 ~~~~~`````~`C```~`~`~~`~~```~```` 4
012345678901234567890123456789012
1 2 3
The treasure chests don't move around. Sonar devices can detect treasure chests
up to a distance of 9 spaces. Try to collect all 3 chests before running out of
sonar devices. Good luck!
Press enter to continue...''')
input()
print('S O N A R !')
print()
print('Would you like to view the instructions? (yes/no)')
if input().lower().startswith('y'):
show_instructions()
while True:
# Game setup
sonar_devices = 20
the_board = get_new_board()
the_chests = get_random_chests(3)
draw_board(the_board)
previous_moves = []
while sonar_devices > 0:
# Show sonar device and chest statuses.
print(
f'You have {sonar_devices} sonar device(s) left. {len(the_chests)} treasure chest(s) remaining.')
x, y = enter_player_move(previous_moves)
# We must track all moves so that sonar devices can be updated.
previous_moves.append([x, y])
move_result = make_move(the_board, the_chests, x, y)
if move_result == False:
continue
else:
if move_result == 'You have found a sunken treasure chest!':
# Update all the sonar devices currently on the map.
for x, y in previous_moves:
make_move(the_board, the_chests, x, y)
draw_board(the_board)
print(move_result)
if len(the_chests) == 0:
print(
'You have found all the sunken treasure chests! Congratulations and good game!')
break
sonar_devices -= 1
if sonar_devices == 0:
print(
'We\'ve run out of sonar devices! Now we have to tunr the ship around and head')
print('for home with treasure chests still out there! Game over.')
print(' The remaining chests were here:')
for x, y in the_chests:
print(f' {x}, {y}')
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
sys.exit()
|
[
"orezpablo@gmail.com"
] |
orezpablo@gmail.com
|
92d9d24d3beb5ec8799d88be94123456d4805482
|
9da1a3470d60a667167ecba0a49915296de2fbc8
|
/server/app/utils/token_util.py
|
f57cff5e5ac332cd85b84fb05b608e2dbac6f71e
|
[
"MIT"
] |
permissive
|
csu-xiao-an/web_info_monitor
|
5d01d296b2fc9583a1029df30af1cd89feff4419
|
5f39254a4ae014e1a2017006290585b4648cc013
|
refs/heads/master
| 2020-07-27T08:46:16.882741
| 2019-09-09T14:08:44
| 2019-09-09T14:08:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
from flask import jsonify
from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, \
BadSignature
from app.models import User
import os
secret_key = os.environ.get("secret_key", "recar")
#返回token字符串
def generate_auth_token(uid, is_amdin=False, scope=None,
expiration=5000):
#通过flask提供的对象,传入过期时间和flask的SECRET_KEY
"""生成令牌"""
s = TimedJSONWebSignatureSerializer(secret_key,
expires_in=expiration)
#token里面的值,是技术方案需要订的,做相关的业务逻辑验证,uid唯一值表示当前请求的客户端
#type表示客户端类型,看业务场景进行增删
#scope权限作用域
#设置过期时间,这个是必须的,一般设置两个小时
return s.dumps({
'uid': uid,
'is_amdin': is_amdin,
'scope':scope
}).decode('ascii')
# token验证
def verify_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
# except SignatureExpired:
# raise MyHttpAuthFailed('token expired')
# # return {'message': 'token expired'}, return_code.Unauthorized#token_expired() # valid token, but expired
# except BadSignature:
# raise MyHttpAuthFailed('token invalid')
# # return {'message':'token invalid'}, return_code.Unauthorized #invalid_token() # invalid token
except:
return None
user = User.query.filter_by(id=data['uid']).first()
return user
|
[
"yansiyu@360.net"
] |
yansiyu@360.net
|
b534f887f4eef332a9a1d5dc5f0a6b197b40df84
|
29ad238bedc14b3c268b22777391b25fb8701858
|
/config.py
|
5c010a83ee93ad59c31d5579d15039b6b2d83b60
|
[] |
no_license
|
chiris-ye/-
|
2c079efe602f390fc5fdfd2d3a74d73d840c3cfd
|
84b836a637c6647ab13d801caff03359956536c0
|
refs/heads/master
| 2021-12-10T02:36:58.345901
| 2021-11-03T06:40:14
| 2021-11-03T06:40:14
| 262,729,476
| 0
| 0
| null | 2020-05-10T06:52:20
| 2020-05-10T06:52:19
| null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
class config():
embed_dim = 300
hidden_dim = 300
layers = 1
dropout = 0.1
seq_in_size=7200
fc_dim=100
out_dim=2
mind_dim = 600
man_dim=16
|
[
"noreply@github.com"
] |
noreply@github.com
|
89e6f9abf269be06d699b31d7a18f80d863cd0af
|
ea57b713f59d2e2a8d6f4b0b6938c20a8ae6d67d
|
/fetchQzone/iszhi.py
|
96f87ced87c63b0bdd70fb54d9775a8bf09cc8d9
|
[] |
no_license
|
guoyu07/fetchQzone
|
9919f9fad3d44a4643ebaba61d534f3d99c95f8f
|
db0d69b7d4369bd8aaafc2af8f14fdbe6316d294
|
refs/heads/master
| 2021-05-28T20:50:23.052035
| 2015-03-06T05:10:56
| 2015-03-06T05:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
def iszhi(x):
cnt=0
if x<=1:
return False
for m in range(1,x+1):
if x%m==0:
++cnt
if cnt>2:
return True
return False
|
[
"zhangxu1573@qq.com"
] |
zhangxu1573@qq.com
|
2d0fe84cfd8f2ee9d2079fa3b668038f362c4362
|
e48faca9b6e2016ae936a77e8acc2f9bce08d207
|
/series_in_func.py
|
d6418cb68aeeb2c92160d18f6dd99020f817e56e
|
[] |
no_license
|
ramachitikineddy/becomecoder
|
84d7315e7f99c1e18855350c9f14729ba8e57087
|
d4e9611bb8a82dd0fb85a33e9b00443daee1e781
|
refs/heads/main
| 2023-05-06T20:59:51.022901
| 2021-05-29T10:33:17
| 2021-05-29T10:33:17
| 367,065,138
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
def seq(n):
if n%2:
return 3*n+1
return n//2
n=int(input())
print(n,end=" ")
while (n:=seq(n)):
print(n,end=" ")
if n==1:
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
1bb19df97eb432adc4d8988bc491abf66979b71f
|
babf32f611200957e4e2a6bd3c156916b891c43f
|
/mysite/settings.py
|
b9d178757d2991419be0a8125ff4f05d4507fd1a
|
[] |
no_license
|
Tawfiq-Abu/new_blog
|
5faffc2f569d4cc4f7e56ea9207d5ac97c64e5cd
|
10743b8ac6ef665a928e909aba8f4c1d4557964f
|
refs/heads/main
| 2023-02-19T16:54:43.002556
| 2021-01-19T11:44:10
| 2021-01-19T11:44:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g7o50)e+v6(d)n&jxt@zfg$_^p!0)ub&v6n735=ysw*e+#okaf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myblog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"tawfiqabubakr7@gmail.com"
] |
tawfiqabubakr7@gmail.com
|
3947c1886e64b2e14da5a55a34c8661ff9cdde6c
|
ed8c7fba9c5592b14ab79eac399813d9d0537b7d
|
/website/migrations/0001_initial.py
|
fac9f699cda3d22a2ad1bac05bf8e333a9cb5fe6
|
[] |
no_license
|
OpenWebCurtin/Catching-out-corruption
|
4834f7d95393b71009347237aff08f7726049a7a
|
33617c4d01dd33f118aaac4c562948598f6206ba
|
refs/heads/main
| 2023-01-18T19:21:26.723344
| 2020-11-23T13:07:19
| 2020-11-23T13:07:19
| 315,027,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,429
|
py
|
# Generated by Django 2.2.6 on 2019-10-31 16:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='AsyncJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('status', models.IntegerField(choices=[(0, 'Unprocessed'), (1, 'Finished'), (2, 'Error'), (3, 'Unsupported')], default=0)),
],
),
migrations.CreateModel(
name='DocumentResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.CharField(max_length=128)),
('occurs_total', models.IntegerField(default=0)),
('occurs_agenda_items', models.IntegerField(default=0)),
('normalised_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(max_length=128)),
],
options={
'permissions': [('upload', 'Can upload documents using the PDF upload service.'), ('delete', 'Can delete documents using the file deletion service.'), ('recover', 'Can recover deleted documents using the file recovery service.')],
},
),
migrations.CreateModel(
name='FileDeletionRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('delete_by', models.IntegerField(choices=[(0, 'Delete files by filename.'), (1, 'Delete files by uploader.')], default=0)),
('target_file', models.CharField(blank=True, max_length=128, null=True)),
('target_uploader', models.CharField(blank=True, max_length=128, null=True)),
],
),
migrations.CreateModel(
name='FileRecoveryRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('recover_by', models.IntegerField(choices=[(0, 'Recover files by filename.'), (1, 'Recover files by uploader.')], default=0)),
('target_file', models.CharField(blank=True, max_length=128, null=True)),
('target_uploader', models.CharField(blank=True, max_length=128, null=True)),
],
),
migrations.CreateModel(
name='KeyPhraseOptionSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key_phrase', models.CharField(blank=True, default='', max_length=128)),
('key_phrase_type', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_importance', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
],
),
migrations.CreateModel(
name='PrivilegeModification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin', models.CharField(max_length=128)),
('target_user', models.CharField(max_length=128)),
('target_group', models.CharField(choices=[('regular user', 'Regular user'), ('privileged user', 'Privileged user'), ('administrator', 'Administrator')], default=0, max_length=32)),
],
),
migrations.CreateModel(
name='RelationResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kp1', models.CharField(max_length=128)),
('kp2', models.CharField(max_length=128)),
('kp3', models.CharField(max_length=128)),
('kp4', models.CharField(max_length=128)),
('kp5', models.CharField(max_length=128)),
('document', models.CharField(blank=True, default='', max_length=128)),
('agenda_item_file', models.CharField(blank=True, default='', max_length=128)),
('agenda_item', models.CharField(blank=True, default='', max_length=128)),
('description', models.CharField(blank=True, default='', max_length=128)),
('search_type', models.IntegerField()),
],
),
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search_by', models.IntegerField(choices=[(0, 'Search by relation'), (1, 'Search by document')], default=0)),
('search_t', models.IntegerField(choices=[(0, 'Search minutes'), (1, 'Search non-minutes')], default=0)),
('fbm', models.BooleanField(default=False)),
('fbm_filename', models.CharField(blank=True, default='', max_length=128)),
('fbm_uploader', models.CharField(blank=True, default='', max_length=128)),
('fbm_upload_date_start', models.DateField(null=True)),
('fbm_upload_date_end', models.DateField(null=True)),
('fbc', models.BooleanField(default=False)),
('fbc_council', models.CharField(blank=True, default='', max_length=128)),
('fbc_publish_date_start', models.DateField(null=True)),
('fbc_publish_date_end', models.DateField(null=True)),
('key_phrase1', models.CharField(blank=True, default='', max_length=128)),
('key_phrase2', models.CharField(blank=True, default='', max_length=128)),
('key_phrase3', models.CharField(blank=True, default='', max_length=128)),
('key_phrase4', models.CharField(blank=True, default='', max_length=128)),
('key_phrase5', models.CharField(blank=True, default='', max_length=128)),
('key_phrase_type1', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type2', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type3', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type4', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_type5', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)),
('key_phrase_importance1', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance2', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance3', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance4', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
('key_phrase_importance5', models.DecimalField(decimal_places=2, max_digits=3, null=True)),
],
options={
'permissions': [('search', 'Can search using the document search feature.')],
},
),
migrations.CreateModel(
name='AsyncJobType',
fields=[
('job_base', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='website.AsyncJob')),
],
),
migrations.CreateModel(
name='UploadedFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='uploads/')),
('filename', models.CharField(blank=True, default='', max_length=128)),
('type', models.IntegerField(choices=[(0, 'Public minutes document.'), (1, 'Public non-minutes document.'), (2, 'Private non-minutes document.')], default=0)),
('document_category', models.CharField(default='generic', max_length=128)),
('uploader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RecoveryRequestItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')),
('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')),
],
),
migrations.CreateModel(
name='DeletionRequestItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')),
('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')),
],
),
migrations.CreateModel(
name='ProcessingJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('file_name', models.CharField(max_length=128)),
],
bases=('website.asyncjobtype',),
),
migrations.CreateModel(
name='FileRecoveryJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')),
],
bases=('website.asyncjobtype',),
),
migrations.CreateModel(
name='FileDeletionJob',
fields=[
('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')),
('scheduled_time', models.FloatField()),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')),
],
bases=('website.asyncjobtype',),
),
]
|
[
"r.a.clydesdale+bb-ccp@gmail.com"
] |
r.a.clydesdale+bb-ccp@gmail.com
|
edc33e4a7d63438dd82b67c0afebd70a4f1e0c49
|
6fb6a62a33b13690f3c95c166f07a736836308b6
|
/functions/cartupdate/main.py
|
483369e75fa95ce5f8173cd6d6f83a32c2c1ff5e
|
[] |
no_license
|
Dualic/petshop
|
88172ed47d65ccef79342524262b4de26995a463
|
03443c0b8c2a3a12e9552a5924b99745fb4b6465
|
refs/heads/master
| 2023-07-16T08:53:22.961111
| 2021-09-03T12:16:17
| 2021-09-03T12:16:17
| 401,612,658
| 0
| 2
| null | 2021-09-03T09:40:14
| 2021-08-31T07:29:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
def getsecret(secretname):
import google.cloud.secretmanager as secretmanager
client = secretmanager.SecretManagerServiceClient()
name = f"projects/week10-1-324606/secrets/{secretname}/versions/latest"
response = client.access_secret_version(request={"name": name})
return response.payload.data.decode("UTF-8")
def cartupdate(request):
import psycopg2
dbname = getsecret("dbname")
user = "postgres"
password = getsecret("dbpassword")
host = getsecret("host")
conn = None
request_json = request.get_json(silent=True)
id = request_json.get("id")
customer_id = request_json.get("customer_id")
product_id = request_json.get("product_id")
amount = request_json.get("amount")
SQL = "UPDATE cart SET customer_id = %s, product_id = %s, amount = %s WHERE id = %s;"
result = "Update failed"
try:
conn = psycopg2.connect(host=host, dbname=dbname, user=user, password=password)
cursor = conn.cursor()
cursor.execute(SQL, (customer_id, product_id, amount, id))
conn.commit()
cursor.close()
result = "Update success"
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return result
|
[
"ilkka.o.pekkala@gmail.com"
] |
ilkka.o.pekkala@gmail.com
|
005779a57f96302b20a3bcde3152d53965d436f1
|
d496d504bf4ccdb59fbbeeee7b5d70ae7ab136b8
|
/ts_development/version1/ts__development/models/models.py
|
7448e94a50dfbd30c0d9546155d7beafbc13bcf3
|
[] |
no_license
|
taybahsoftegy-dev/ts-modules
|
6e92bb0748238fcde38df146ab73ae311f16df55
|
cf4ec549943a0ba29d203ef1611a337040389d64
|
refs/heads/master
| 2022-11-08T23:18:37.734241
| 2020-06-29T12:45:15
| 2020-06-29T12:45:15
| 275,809,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api,_
import time
class Development_Tracking(models.Model):
_name = 'development.tracking'
_inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin']
_mail_post_access = 'read'
_check_company_auto = True
# _mail_post_access = 'read'
serial = fields.Integer(string='Serial',tracking=True)
Date = fields.Date(default=lambda *a: time.strftime('%Y-%m-%d'),tracking=True)
module = fields.Char(string='Module',tracking=True)
form = fields.Char(string = 'Form',tracking=True)
report = fields.Char(string='Report',tracking=True)
new = fields.Char(string='New',tracking=True)
description = fields.Text(string= 'Description',tracking=True)
status = fields.Selection([
('open', 'Open'),
('closed', 'Closed'),
('rejected', 'Rejected')], default='open', tracking=True)
time_consumed = fields.Char('Time Consumed',tracking=True)
user_id = fields.Many2one('res.users',
default=lambda self: self.env.uid,
index=True, tracking=True)
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
partner_id = fields.Many2one('res.partner',
string='Customer',
default=lambda self: self.env.uid,)
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
# class Development_Tracking(models.Model):
# _name = 'Development.Tracking'
# _description = 'For Tracking Development for TaybahSoft'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
|
[
"dev.mohamedfci@gmail.com"
] |
dev.mohamedfci@gmail.com
|
cfd392a9079699ee6d0b693e945546b5a1178576
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_301/ch41_2019_04_04_16_40_15_344267.py
|
6c41a0d67bc67884cf85bc1629a7262fa142531b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
a=input('que palavra? ')
while a!='desisto':
a=input('que palavra? ')
print(voce acertou)
|
[
"you@example.com"
] |
you@example.com
|
e1e3124cd44931303505037d6d88f51555fb555a
|
403e7f22b8dd4119fc83d153d6dc6e3520ac1922
|
/python-scripts/S3/awsS3PutBigFiles.py
|
b1f76cdc33db30acf3ebdecee8ddf7bb2eea8edd
|
[] |
no_license
|
vincedgy/AWS-Scripts
|
1e56c13245b38f5c520a4207acf544f1d01ac5cb
|
f350167c200700daea23ad9dcbe609ab1d7b90d9
|
refs/heads/master
| 2020-03-29T00:39:13.453200
| 2017-11-01T20:34:25
| 2017-11-01T20:34:25
| 94,635,738
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
""" """
# Create a big file (100 Mb):
# dd if=/dev/zero of=/tmp/bigfile bs=1024 count=0 seek=$[1024*100]
import os
import sys
import threading
import boto3
from boto3.s3 import transfer
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename,
self._seen_so_far,
self._size,
percentage))
sys.stdout.flush()
# ---------------------------------------------------------------------
# Main
if __name__ == '__main__':
client = boto3.client('s3', 'eu-west-1')
config = transfer.TransferConfig(
multipart_threshold=8 * 1024 * 1024,
max_concurrency=10,
num_download_attempts=10,
)
uploading = transfer.S3Transfer(client, config)
uploading.upload_file(
'/tmp/bigfile',
'e-attestations-ova',
'bigfile',
callback=ProgressPercentage('/tmp/bigfile')
)
|
[
"vincent.dagoury@gmail.com"
] |
vincent.dagoury@gmail.com
|
732ef0438ed7f6a4a45a2ba312e54337afc3e84a
|
c7f8193a80d68b6144af8d9b2e2f012bf463af6a
|
/busstop.py
|
e02b0e8415b6ff5dc87ad36202eda1409cd94c78
|
[] |
no_license
|
marcteale/DAKboard-OneBusAway-integration
|
8c060360062f07d1be4e1f88e7d0759b3efd8a8d
|
9803aa8568828e8b0533e5c4f452b50f424805b1
|
refs/heads/master
| 2020-03-21T08:29:31.120908
| 2018-10-03T19:36:04
| 2018-10-03T19:36:04
| 138,347,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,173
|
py
|
#!/usr/bin/env python3.6
import configparser
import json
import os
import sys
from datetime import datetime
import requests
def get_departures_for_stop(departures, stop_id, routes, minutes_before, minutes_after, server, apikey):
"""Fetch the departures for the requested stop and return them as a dict."""
r = requests.get('{}/api/where/arrivals-and-departures-for-stop/{}.json'.format(server, stop_id),
params={'key': apikey, 'minutesBefore': minutes_before, 'minutesAfter': minutes_after})
rj = r.json()
stop_name = ''
if r.ok:
for stop in rj['data']['references']['stops']:
if stop['id'] == stop_id:
stop_name = stop['name']
break
current_time = datetime.fromtimestamp(rj['currentTime'] / 1000)
if rj['data']['entry']['arrivalsAndDepartures']:
for a in rj['data']['entry']['arrivalsAndDepartures']:
if a['departureEnabled'] and (routes is None or a['routeShortName'] in routes):
if a['predicted'] and a['predictedDepartureTime'] != 0:
departure_string = 'predictedDepartureTime'
else:
departure_string = 'scheduledDepartureTime'
departure_time = datetime.fromtimestamp(a[departure_string] / 1000)
delta = int((departure_time - current_time).seconds / 60)
value = "{} - {} minute{}".format(a['routeShortName'], delta, '' if abs(delta) == 1 else 's')
subtitle = '{}'.format(departure_string.replace('DepartureTime', ''))
departures.append({'value': value, 'title': stop_name, 'subtitle': subtitle})
else:
departures.append(
{'value': 'No scheduled departures', 'title': stop_name,
'subtitle': 'No departures schedule or predicted in the next {} minutes.'.format(minutes_after)}
)
else:
departures.append({'value': 'Failed to fetch data', 'title': '', 'subtitle': rj['text']})
return departures
def get_config():
"""Read the config file."""
config = configparser.ConfigParser(allow_no_value=True)
configfile = os.path.abspath(os.path.dirname(__file__)) + '/busstop.conf'
config.read(configfile)
routes = [unicode(r.strip()) for r in config.get('defaults', 'routes').split(',')] \
if config.has_option('defaults', 'routes') else None
defaults = {'minutesbefore': config.get('defaults', 'minutesbefore'),
'minutesafter': config.get('defaults', 'minutesafter'),
'routes': routes,
'apikey': os.environ['APIKEY'],
'server': config.get('defaults', 'server')}
config.remove_section('defaults')
config.remove_section('defaults')
return config, defaults
def app(environ, start_response):
status = "200 OK"
try:
config, defaults = get_config()
results = []
ok = True
except Exception as e:
status = "500 Internal Server Error"
results = json.dumps({'title': 'Error', 'value': e.message, 'subtitle': ''})
ok = False
if ok:
for section in config.sections():
minsBefore = config.get(section, 'minutesbefore') \
if config.has_option(section, 'minutesbefore') else defaults['minutesbefore']
minsAfter = config.get(section, 'minutesafter') \
if config.has_option(section, 'minutesafter') else defaults['minutesafter']
routes = [unicode(r.strip()) for r in config.get(section, 'routes').split(',')] \
if config.has_option(section, 'routes') else defaults['routes']
stopId = section
results = get_departures_for_stop(results, stopId, routes, minsBefore, minsAfter, defaults['server'],
defaults['apikey'])
data = str.encode(json.dumps(results))
response_headers = [
("Content-Type", "application/json"),
("Content-Length", str(len(data)))
]
start_response(status, response_headers)
return iter([data])
|
[
"marc.teale@openmarket.com"
] |
marc.teale@openmarket.com
|
8ccd44a76e64b8cc0ad921f213460c409e895266
|
cc7b4e71b3c27240ec650a75cc6f6bbab5e11387
|
/crdb/templatetags/email_tags.py
|
b13eedd6c32b7950e6ee3313c89e155c42547e14
|
[
"MIT"
] |
permissive
|
jsayles/CoworkingDB
|
0cdada869d950a28cfef20d1b9c1eb3eb4d7b1c2
|
78776910eba0354a7fd96b2e2c53a78e934d8673
|
refs/heads/master
| 2023-02-22T23:11:19.040799
| 2021-12-28T19:13:39
| 2021-12-28T19:13:39
| 883,951
| 3
| 0
|
MIT
| 2023-02-15T17:59:10
| 2010-09-02T18:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
import os
from django.template import Library
from django import template
from django.conf import settings
from django.utils.html import format_html
from django.urls import reverse
from crdb.models import EmailAddress
register = template.Library()
@register.simple_tag
def email_verified(email):
if not email:
return None
if not isinstance(email, EmailAddress):
# Got a string so we should pull the object from the database
email = EmailAddress.objects.get(email=email)
if email.is_verified():
return ""
html = '<span style="color:red;">( <a target="_top" style="color:red;" href="{}">{}</a> )</span>'
link = email.get_send_verif_link()
label = "Not Verified"
return format_html(html, link, label)
|
[
"jsayles@gmail.com"
] |
jsayles@gmail.com
|
20ba1ba73360f4befafe0351c226f32696426e2f
|
a8163b09c4b4a58fc82cdb6ff8df29197fd15945
|
/_OldVersion/index.py
|
2756bb9c27b13e6d15761ae94704a0c1842d6512
|
[] |
no_license
|
zhudonlin/Fuck_HENUDC
|
bd78a78f0807e96fdfda36a727c2c017cab7ad9c
|
0aa398333c1d8e42c4820f6b80292509af46cfd0
|
refs/heads/main
| 2023-07-14T13:43:51.803980
| 2021-09-03T08:08:13
| 2021-09-03T08:08:13
| 402,689,111
| 1
| 0
| null | 2021-09-03T07:46:57
| 2021-09-03T07:46:56
| null |
UTF-8
|
Python
| false
| false
| 16,520
|
py
|
# -*- coding: utf-8 -*-
import sys
import json
import uuid
import oss2
import yaml
import base64
import requests
import time
import random
import uanalyse
from pyDes import des, CBC, PAD_PKCS5
from datetime import datetime, timedelta, timezone
from urllib.parse import urlparse
from urllib3.exceptions import InsecureRequestWarning
import notification
# debug模式
debug = True
if debug:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
rand_lon = str(random.randint(0, 9))
rand_lat = str(random.randint(0, 9))
# 读取yml配置
def getYmlConfig(yaml_file='config.yml'):
file = open(yaml_file, 'r', encoding="utf-8")
file_data = file.read()
file.close()
config = yaml.load(file_data, Loader=yaml.FullLoader)
return dict(config)
# 全局配置
config = getYmlConfig(yaml_file='config.yml')
# 获取当前utc时间,并格式化为北京时间
def getTimeStr():
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
return bj_dt.strftime("%Y-%m-%d %H:%M:%S")
# 输出调试信息,并及时刷新缓冲区
def log(content):
print(getTimeStr() + ' ' + str(content))
sys.stdout.flush()
# 获取今日校园api
def getCpdailyApis(user):
apis = {}
user = user['user']
if 'cpdaily' in user['ua']:
print('你UA输入的有问题,请看说明书!')
exit(2)
if 'Android' not in user['ua']:
print('你UA输入的有问题,请看说明书!')
exit(2)
schools = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/list', verify=not debug).json()[
'data']
flag = True
for one in schools:
if one['name'] == user['school']:
if one['joinType'] == 'NONE':
log(user['school'] + ' 未加入今日校园')
exit(-1)
flag = False
params = {
'ids': one['id']
}
res = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/info', params=params,
verify=not debug)
data = res.json()['data'][0]
joinType = data['joinType']
idsUrl = data['idsUrl']
ampUrl = data['ampUrl']
if 'campusphere' in ampUrl or 'cpdaily' in ampUrl:
parse = urlparse(ampUrl)
host = parse.netloc
res = requests.get(parse.scheme + '://' + host)
parse = urlparse(res.url)
apis[
'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin'
apis['host'] = host
ampUrl2 = data['ampUrl2']
if 'campusphere' in ampUrl2 or 'cpdaily' in ampUrl2:
parse = urlparse(ampUrl2)
host = parse.netloc
res = requests.get(parse.scheme + '://' + host)
parse = urlparse(res.url)
apis[
'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin'
apis['host'] = host
break
if flag:
log(user['school'] + ' 未找到该院校信息,请检查是否是学校全称错误')
exit(-1)
log(apis)
return apis
# 登陆并获取session
def getSession(user, apis):
user = user['user']
params = {
# 'login_url': 'http://authserverxg.swu.edu.cn/authserver/login?service=https://swu.cpdaily.com/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay',
'login_url': 'https://ids.henu.edu.cn/authserver/login?service=https%3A%2F%2Fids.henu.edu.cn%2Fauthserver%2Fmobile%2Fcallback%3FappId%3D277935239',
'needcaptcha_url': '',
'captcha_url': '',
'username': user['username'],
'password': user['password']
}
cookies = {}
# 借助上一个项目开放出来的登陆API,模拟登陆
if 'enable' in user:
if user['enable'] == 0:
print('您设定了enable=0,安全模式将不会获取COOKIE,您想要使用的话请修改config.yml里面的到enable=1!')
sendMessage('如果您看到这条消息,请您去github上重新设置您的config。', user, '报错提醒-今日校园自动签到')
exit(888)
if user['usecookies'] == 0:
res = ''
try:
j = 0
for i in range(0, 5):
print("使用config中定义的api")
res = requests.post(config['login']['api'], data=params)
if 'success' not in res.json()['msg']:
time.sleep(5)
print(f'第{j + 1}次未获取到Cookies')
j = j + 1
else:
break
if 'success' not in res.json()['msg']:
print(f'{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!')
sendMessage(f'如果您看到这条消息,证明{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!', user)
exit(888)
print(res.json())
except Exception as e:
res = requests.post(url='http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login', data=params)
print("使用子墨的API")
if 'success' not in res.json()['msg']:
print('用子墨的API也没有获取到Cookies')
sendMessage(f'如果您看到这条消息,证明子墨的api也没有获取到cookies,可能学校服务器坏了,自己弄吧!', user, '报错提醒-今日校园自动签到')
# cookieStr可以使用手动抓包获取到的cookie,有效期暂时未知,请自己测试
# cookieStr = str(res.json()['cookies'])
cookieStr = str(res.json()['cookies'])
print('已从API获取到Cookie')
# exit(999)
else:
cookieStr = user['cookies']
print('使用文件内Cookie')
print(cookieStr)
# log(cookieStr) 调试时再输出
# if cookieStr == 'None':
# log(res.json())
# exit(-1)
# log(cookieStr)
# 解析cookie
for line in cookieStr.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
session = requests.session()
session.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar=None, overwrite=True)
return session
# 获取最新未签到任务并全部签到
def getUnSignedTasksAndSign(session, apis, user):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0',
'content-type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Content-Type': 'application/json;charset=UTF-8'
}
print(headers)
# 第一次请求每日签到任务接口,主要是为了获取MOD_AUTH_CAS
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']),
headers=headers, data=json.dumps({}))
# 第二次请求每日签到任务接口,拿到具体的签到任务
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']),
headers=headers, data=json.dumps({}))
print(res.json())
if len(res.json()['datas']['unSignedTasks']) < 1:
log('当前没有未签到任务')
sendMessage('当前没有未签到任务', user['user'])
exit(0)
elif time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
sendMessage('自定义限制:未在签到时间,等会再来吧!', user['user'])
# exit(8)
# TODO 删掉
# log(res.json())
for i in range(0, len(res.json()['datas']['unSignedTasks'])):
# if '出校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False:
# if '入校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False:
latestTask = res.json()['datas']['unSignedTasks'][i]
params = {
'signInstanceWid': latestTask['signInstanceWid'],
'signWid': latestTask['signWid']
}
task = getDetailTask(session, params, apis, user)
print(task)
if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
form = fillForm(task, session, user, apis)
print(form)
if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]:
print('未在签到时间,等会再来吧!')
submitForm(session, user, form, apis)
# 获取签到任务详情
def getDetailTask(session, params, apis, user):
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0',
'content-type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Content-Type': 'application/json;charset=UTF-8'
}
print(headers)
res = session.post(
url='https://{host}/wec-counselor-sign-apps/stu/sign/detailSignInstance'.format(host=apis['host']),
headers=headers, data=json.dumps(params))
data = res.json()['datas']
return data
# 填充表单
def fillForm(task, session, user, apis):
user = user['user']
form = {}
if task['isPhoto'] == 1:
fileName = uploadPicture(session, user['photo'], apis)
form['signPhotoUrl'] = getPictureUrl(session, fileName, apis)
else:
form['signPhotoUrl'] = ''
if task['isNeedExtra'] == 1:
extraFields = task['extraField']
defaults = config['cpdaily']['defaults']
extraFieldItemValues = []
for i in range(0, len(extraFields)):
default = defaults[i]['default']
extraField = extraFields[i]
if config['cpdaily']['check'] and default['title'] != extraField['title']:
log('第%d个默认配置项错误,请检查' % (i + 1))
sendMessage('提交错误' + '第%d个默认配置项错误,请检查' % (i + 1), user)
exit(-1)
extraFieldItems = extraField['extraFieldItems']
for extraFieldItem in extraFieldItems:
if extraFieldItem['content'] == default['value']:
extraFieldItemValue = {'extraFieldItemValue': default['value'],
'extraFieldItemWid': extraFieldItem['wid']}
# 其他,额外文本
if extraFieldItem['isOtherItems'] == 1:
extraFieldItemValue = {'extraFieldItemValue': default['other'],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
# log(extraFieldItemValues)
# 处理带附加选项的签到
form['extraFieldItems'] = extraFieldItemValues
# form['signInstanceWid'] = params['signInstanceWid']
form['signInstanceWid'] = task['signInstanceWid']
form['longitude'] = user['lon'] + rand_lon
form['latitude'] = user['lat'] + rand_lat
form['isMalposition'] = user['isMalposition']
form['uaIsCpadaily'] = True
################这个参数一定不能穿帮!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
form['abnormalReason'] = user['abnormalReason']
form['position'] = user['address']
# TODO 这个参数的名称有待考究 需要抓包见分晓!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return form
# 上传图片到阿里云oss
def uploadPicture(session, image, apis):
url = 'https://{host}/wec-counselor-sign-apps/stu/sign/getStsAccess'.format(host=apis['host'])
res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps({}), verify=not debug)
datas = res.json().get('datas')
fileName = datas.get('fileName')
accessKeyId = datas.get('accessKeyId')
accessSecret = datas.get('accessKeySecret')
securityToken = datas.get('securityToken')
endPoint = datas.get('endPoint')
bucket = datas.get('bucket')
bucket = oss2.Bucket(oss2.Auth(access_key_id=accessKeyId, access_key_secret=accessSecret), endPoint, bucket)
with open(image, "rb") as f:
data = f.read()
bucket.put_object(key=fileName, headers={'x-oss-security-token': securityToken}, data=data)
res = bucket.sign_url('PUT', fileName, 60)
# log(res)
return fileName
# 获取图片上传位置
def getPictureUrl(session, fileName, apis):
url = 'https://{host}/wec-counselor-sign-apps/stu/sign/previewAttachment'.format(host=apis['host'])
data = {
'ossKey': fileName
}
res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(data), verify=not debug)
photoUrl = res.json().get('datas')
return photoUrl
# DES加密
def DESEncrypt(s, key='b3L26XNL'):
key = key
iv = b"\x01\x02\x03\x04\x05\x06\x07\x08"
k = des(key, CBC, iv, pad=None, padmode=PAD_PKCS5)
encrypt_str = k.encrypt(s)
# print(encrypt_str)
print(f'加密结束的内容为:{base64.b64encode(encrypt_str).decode()}')
return base64.b64encode(encrypt_str).decode()
# 提交签到任务
def submitForm(session, user, form, apis):
user = user['user']
# Cpdaily-Extension
extension = {
"lon": user['lon'] + rand_lon,
"model": uanalyse.ua2model(user['ua']),
"appVersion": "9.0.0",
"systemVersion": uanalyse.ua2androidver(user['ua']),
"userId": user['username'],
"systemName": "android",
"lat": user['lat'] + rand_lat,
"deviceId": str(uuid.uuid1())
}
headers = {
'tenantId': 'henu',
'User-Agent': user['ua'] + ' okhttp/3.12.4',
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': DESEncrypt(json.dumps(extension)),
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip',
# 'Host': 'swu.cpdaily.com',
'Connection': 'Keep-Alive'
}
print(extension)
print(headers)
# print('程序还有一步就提交了,已暂停')
# exit(888)
# TODO 设置提交锁的位置
res = session.post(url='https://{host}/wec-counselor-sign-apps/stu/sign/submitSign'.format(host=apis['host']),
headers=headers, data=json.dumps(form))
message = res.json()['message']
if message == 'SUCCESS':
log('自动签到成功')
sendMessage('自动签到成功', user, title='今日校园签到成功通知')
else:
log('自动签到失败,原因是:' + message)
sendMessage('自动签到失败' + message, user)
# sendMessage('自动签到失败,原因是:' + message, user['email'])
exit(0)
# 发送邮件通知
def sendMessage(msg, user, title='[INFO] 今日校园自动签到信息通知'):
if msg.count("未开始") > 0:
return ''
print(user)
try:
if user['useserverchan'] != 0:
log('正在发送微信通知')
log(getTimeStr())
# sendMessageWeChat(msg + getTimeStr(), '今日校园自动签到结果通知')
notification.send_serverchan(user['serverchankey'], title, getTimeStr() + ' ' + msg)
except Exception as e:
log("send failed")
# 主函数
'''def main():
try:
continue
except:
print("有一个user出错啦")
continue
# 提供给腾讯云函数调用的启动函数
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'''
if __name__ == '__main__':
# print(extension)
#print(main_handler({}, {}))
for user in config['users']:
print(user)
apis = getCpdailyApis(user)
session = getSession(user, apis)
getUnSignedTasksAndSign(session, apis, user)
|
[
"yulonger@outlook.com"
] |
yulonger@outlook.com
|
346dfc71b0db9a749e8ee1d65b7425c276ff9cb1
|
4577d8169613b1620d70e3c2f50b6f36e6c46993
|
/students/1797637/homework01/program03.py
|
1dea672b0e9890cc0e4a8907a314950ef5731495
|
[] |
no_license
|
Fondamenti18/fondamenti-di-programmazione
|
cbaf31810a17b5bd2afaa430c4bf85d05b597bf0
|
031ec9761acb1a425fcc4a18b07884b45154516b
|
refs/heads/master
| 2020-03-24T03:25:58.222060
| 2018-08-01T17:52:06
| 2018-08-01T17:52:06
| 142,419,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
def codifica(chiave, testo):
''' Viene codificato e restituito un testo, fornito il testo stesso e una chiave di codifica'''
codifica=codifica_chiave(chiave)
for indice,carattere in enumerate(testo):
if carattere in codifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],codifica[carattere],1)
return testo
def decodifica(chiave, testo):
''' Viene decodificato e restituito un testo, fornito il testo stesso e una chiave di codifica'''
decodifica=decodifica_chiave(chiave)
for indice,carattere in enumerate(testo):
if carattere in decodifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],decodifica[carattere],1)
return testo
def codifica_chiave(chiave):
chiave=processa_chiave(chiave)
chiave_ord=''.join(sorted(chiave))
codifica={}
for indice,carattere in enumerate(chiave_ord): codifica[carattere]=chiave[indice]
return codifica
def decodifica_chiave(chiave):
chiave=processa_chiave(chiave)
chiave_ord=''.join(sorted(chiave))
decodifica={}
for indice,carattere in enumerate(chiave): decodifica[carattere]=chiave_ord[indice]
return decodifica
def processa_chiave(chiave):
for carattere in chiave:
if ord(carattere)<ord('a') or ord(carattere)>ord('z'): chiave= chiave.replace(carattere,'')
chiave=elimina_copie(chiave)
return chiave
def elimina_copie(chiave):
for carattere in chiave:
if carattere in chiave[chiave.find(carattere)+1:]: chiave= chiave.replace(carattere,'',1)
return chiave
|
[
"a.sterbini@gmail.com"
] |
a.sterbini@gmail.com
|
9c33d363aec75e149c68e57f14c11dbc0baa71bd
|
3825f56bef58063374d56d06a9de3418d04bedd6
|
/exercices/advanced-modules/stringio.py
|
168ebcf3ec63621fd6fabcba9afb42aea7e44b71
|
[
"MIT"
] |
permissive
|
cfascina/python-learning
|
a989869846fe8eca45f2f0717ea958bd603d12e5
|
1bc1d4032fb68456a092229de94b5207db7e9143
|
refs/heads/master
| 2020-05-20T08:48:06.121746
| 2019-07-11T18:07:01
| 2019-07-11T18:07:01
| 185,482,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import io
# StringIO method sets the string as file like object
file = io.StringIO("This is just a normal string.")
# Read the file and print it
print(file.read())
# Reset the cursor and writes a new string
file.seek(0)
file.write("Second line written to the file like object.")
# Reset the cursor, read the file again and print it
file.seek(0)
print(file.read())
# Close the file like object when contents are no longer needed
file.close()
|
[
"cfascina@gmail.com"
] |
cfascina@gmail.com
|
10dfdf1f98da77c3edb8bc6c1a987c773d2ff61f
|
bb4e603d41c040114a6161427593e30fad02828b
|
/classwork4.py
|
4db534ef9a08d9437b14bcf379813698bf674fbe
|
[] |
no_license
|
MS-Dok/pythonCore
|
40871c8dc53bee583fb12a6366db2275521d6e6e
|
d0d89997022f0e284626035d6fa61d94183d8f80
|
refs/heads/master
| 2021-07-02T00:52:31.621971
| 2020-10-20T12:29:40
| 2020-10-20T12:29:40
| 184,242,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,404
|
py
|
"""1. Створити список цілих чисел, які вводяться з терміналу та визначити серед них максимальне та мінімальне число.
"""
user_value=int(input("Enter the value: "))
print("Min value is",min([x for x in range(user_value)]))
print("Max value is",max([x for x in range(user_value)]))
"""
2. В інтервалі від 1 до 10 визначити числа
• парні, які діляться на 2,
• непарні, які діляться на 3,
• числа, які не діляться на 2 та 3.
"""
user_input_start,user_input_finish=int(input("Please enter the start value ")),int(input("Please enter the end value "))
print(list([x for x in range(user_input_start,user_input_finish) if x%2==0]))
print(list([x for x in range(user_input_start,user_input_finish) if x%3==0]))
print(list([x for x in range(user_input_start,user_input_finish) if x%3!=0 and x%2!=0]))
"""
3. Написати програму, яка обчислює факторіал числа, яке користувач вводить.(не використовувати рекурсивного виклику функції)
num_list = [int(input("Enter int {}: ".format(i+1))) for i in range(3)]
"""
while True:
user_input=int(input("Enter the value: "))
if user_input>=0:
break
if user_input==0:
"Factorial of 0 is equal to 1"
else:
result=1
for i in range(1,user_input+1):
result*=i
print("Factorial of {} is equal to {}".format(user_input,result))
"""
4. Напишіть скрипт, який перевіряє логін, який вводить користувач.
Якщо логін вірний (First), то привітайте користувача.
Якщо ні, то виведіть повідомлення про помилку.
(використайте цикл while)
"""
while True:
user_input=input("Please enter the login:\n")
if user_input=="First":
break
else:
print("Incorrect login. Please try again\n")
"""
5. Перший випадок.
Написати програму, яка буде зчитувати числа поки не зустріне від’ємне число. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється).
"""
some_array=[]
while True:
user_input=int(input("Please enter the >0 value "))
if user_input >0:
some_array.append(user_input)
else:
break
print(some_array)
"""
6. Другий випадок.
На початку на вхід подається кількість елементів послідовності, а потім самі елементи. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється).
"""
some_array_2=[]
quantity=int(input("Please enter the quantity of numbers "))
i=0
while i<quantity:
value_to_add=int(input("Please enter the value to add "))
if value_to_add>0:
some_array_2.append(value_to_add)
i+=1
else:
print("<=0 value entered. Termination")
break
print(some_array_2)
"""
7. Знайти прості числа від 10 до 30, а всі решта чисел представити у вигляді добутку чисел
(наприклад 10 equals 2 * 5
11 is a prime number
12 equals 2 * 6
13 is a prime number
14 equals 2 * 7
………………….)
"""
list_ex=[x for x in range(10,30)]
for i in list_ex:
if i%2==0:
print("{} equals 2*{}".format(str(i),int(i/2)))
elif i%3==0:
print("{} equals 3*{}".format(str(i),int(i/3)))
else:
print(str(i)+" is primal number")
"""
8. Відсортувати слова в реченні в порядку їх довжини (використати List Comprehensions)
"""
sentence="На початку на вхід подається кількість елементів послідовності а потім самі елементи."
print(sorted([x for x in set(sentence.lower().split())],key=len))
|
[
"noreply@github.com"
] |
noreply@github.com
|
8eac566ccd717ac44dc96ccf4939d880776e6da5
|
abeb7f8ce8fa3fe3035ad6d7139273266588248f
|
/bottles.py
|
6797be3262b2af6facecda607921990935effc46
|
[] |
no_license
|
mohanoatc/pythonSamples
|
dcddd6a9d989c5435d17bc888aa19ed6bc94c1c1
|
6ff5657e24d46b9d47561e9c9c5fe5735f65aea3
|
refs/heads/master
| 2020-03-22T14:16:46.219459
| 2018-07-15T12:03:17
| 2018-07-15T12:03:17
| 140,166,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
for bottles in range(10, 0, -1):
if bottles > 1:
print(bottles, " bottles of beer on the wall")
print(bottles, " bottles of the beer ")
else:
print(bottles, " bottle of beer on the wall")
print(bottles, " bottle of the beer ")
print("Take one down.\nPass it around.")
if bottles > 1:
print(bottles - 1, "bottle of beer on the wall\n")
else:
print("No more bottles of beer on the wall\n")
|
[
"noreply@github.com"
] |
noreply@github.com
|
385836ada1f0c7aa8919ec7aeb97acca6aea94c0
|
644b13f90d43e9eb2fae0d2dc580c7484b4c931b
|
/network2.py
|
5dbc8833c5526d15e355e3169680c46c4a5bc280
|
[] |
no_license
|
yeonnseok/ps-algorithm
|
c79a41f132c8016655719f74e9e224c0870a8f75
|
fc9d52b42385916344bdd923a7eb3839a3233f18
|
refs/heads/master
| 2020-07-09T11:53:55.786001
| 2020-01-26T02:27:09
| 2020-01-26T02:27:09
| 203,962,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
def cal_ans():
temp = []
ans = 0
for i in range(len(src)):
if src[i] == 0:
if len(temp) == 5:
temp = temp[1:]
temp.append(i)
else:
ans += i * len(temp) - sum(temp)
for j in temp:
link[i + 1].append(j + 1)
link[j + 1].append(i + 1)
return ans
def cal_group():
cnt, group = 0, 0
zero_one = False
start, end = -1, 0
for i in range(len(src)):
start = i + 1
if src[i] == 1:
group += 1
else:
break
for i in range(len(src) - 1, -1, -1):
end = i + 1
if src[i] == 0:
group += 1
else:
break
for i in range(start, end):
if src[i] == 0:
cnt += 1
elif src[i] == 1:
if cnt >= 5:
group += (cnt - 4)
elif i >= 1 and src[i-1] == 0:
zero_one = True
cnt = 0
if zero_one and len(src) != 1:
return group + 1
return group
num_of_case = int(input())
for case in range(1, num_of_case + 1):
n = int(input())
src = list(map(int, input().split()))
link = [[] for _ in range(n + 1)]
print("#%d" % case, end=" ")
print(cal_ans(), end=" ")
print(cal_group())
|
[
"smr603@snu.ac.kr"
] |
smr603@snu.ac.kr
|
3923da15d3cfb9a730088a4d9708e6a18aa4ff3f
|
2ef742fe5e3208715208ff711eb2046acc1f5ef6
|
/NathHorrigan/wsgi.py
|
b4e20145e98ba5cc686716da381e9690d4db59ac
|
[] |
no_license
|
NathHorrigan/NathHorrigan.com
|
9ac53208061b16d3f8bc4a00e4575df83083dc7c
|
636165b718659cf5dcd70ed29251ae69b4b09748
|
refs/heads/master
| 2020-03-20T16:23:07.531145
| 2018-08-31T22:22:13
| 2018-08-31T22:22:13
| 137,537,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for NathHorrigan project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NathHorrigan.settings.dev")
application = get_wsgi_application()
|
[
"nathan_horrigan@icloud.com"
] |
nathan_horrigan@icloud.com
|
a86eb97efcd2033e7ba2688689a2d35a96976693
|
48295cd5f8e7a1b1cfda8b9642012611488156ce
|
/users/migrations/0004_auto_20191123_1158.py
|
69f25c440fe48acfb912e16d8e4f514085e401e8
|
[] |
no_license
|
mugglecoder/airbnb-clone
|
0c47445761e9f9fd82805299ddab46e382e9b5a4
|
6276cdeaa13b1a88697b62d322dcb871d9a5e25a
|
refs/heads/master
| 2022-12-10T14:00:47.409310
| 2020-01-05T14:14:39
| 2020-01-05T14:14:39
| 212,250,078
| 0
| 0
| null | 2022-12-10T11:01:26
| 2019-10-02T03:44:49
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
# Generated by Django 2.2.5 on 2019-11-23 02:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20191123_1051'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='email_confirmed',
new_name='email_verified',
),
migrations.AddField(
model_name='user',
name='email_secret',
field=models.CharField(blank=True, default='', max_length=20),
),
]
|
[
"winkknd@naver.com"
] |
winkknd@naver.com
|
aa9c14845c14707dc3ac40e78df6b0a435a73c19
|
051fff90eb3fcb1f928c5857992fef351fc1ba04
|
/output/figuresAndTables/makeFinalTables.py
|
92d6e9cd2931bb2d07a7fdcae6dad5e5ed9ca5cd
|
[
"MIT"
] |
permissive
|
AndresYague/Snuppat
|
1503c8a729513d857a04a7963b8256451c9f6cd1
|
8a7f73fbc260bab67b5d38ed1efc628980f5047c
|
refs/heads/master
| 2021-06-08T11:22:11.930896
| 2021-04-08T12:58:53
| 2021-04-08T12:58:53
| 67,886,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
def getKeyList(indx, lst):
'''Return adecuate value from list'''
if indx >= 0 and indx < len(lst):
return lst[indx]
else:
return "--"
def printTable(storeNamVal):
'''Print table in order'''
nCol = 3
keys = [x for x in storeNamVal.keys()]; keys.sort()
nEls = len(keys); div = nEls/nCol
# Get number of lines for tables
nlines = div if nEls % nCol == 0 else int(div) + 1
firstOfSecond = None
for ii in range(nEls):
zz1 = getKeyList(ii, keys)
nam1, val1 = storeNamVal.get(zz1, ("--", "--"))
zz2 = getKeyList(ii + nlines, keys)
nam2, val2 = storeNamVal.get(zz2, ("--", "--"))
zz3 = getKeyList(ii + nlines*2, keys)
nam3, val3 = storeNamVal.get(zz3, ("--", "--"))
if firstOfSecond is None:
firstOfSecond = zz2
elif zz1 == firstOfSecond:
break
print("{} & {} & {:5.2f} & ".format(nam1, zz1, float(val1)), end = " ")
print("{} & {} & {:5.2f} & ".format(nam2, zz2, float(val2)), end = " ")
if val3 != "--":
print("{} & {} & {:5.2f}\\\\".format(nam3, zz3, float(val3)))
else:
print("{} & {} & {}\\\\".format(nam3, zz3, val3))
def main():
'''Transform plottedValues.dat into .tex tables'''
arch = "plottedValues.dat"
data = "../../data/species.dat"
# Index zz and names
zToName = {}
with open(data, "r") as fread:
for line in fread:
lnlst = line.split()
zz = int(lnlst[0]) - int(lnlst[2])
name = lnlst[1]
name = name[0].upper() + name[1:]
zToName[zz] = name
# Create and print tables
storeNamVal = {}
with open(arch, "r") as fread:
for line in fread:
if "#" in line:
if len(storeNamVal) > 0:
printTable(storeNamVal)
print()
storeNamVal = {}
print(line)
continue
lnlst = line.split()
if len(lnlst) == 0:
continue
zz = int(lnlst[0])
name = zToName[zz]
val = lnlst[1]
storeNamVal[zz] = (name, val)
if len(storeNamVal) > 0:
printTable(storeNamVal)
if __name__ == "__main__":
main()
|
[
"and.yague@gmail.com"
] |
and.yague@gmail.com
|
03ab69e575d2a03c8d9095898808b1c4e3877e59
|
6db68bd7f4e792d3df009671c10cbe93f963c5e6
|
/NOC_Chp0/NOC_0_3/walker.py
|
db7d99af07feffb25e3763feabc401da99e501ab
|
[] |
no_license
|
mickardinal/The-Nature-of-Code-Python
|
0ce9125b92707a9de4dd57a77c4a92c04df66467
|
a883e365051826228002317741df7d198eae6dfe
|
refs/heads/master
| 2020-03-11T20:55:21.939021
| 2018-04-25T16:29:06
| 2018-04-25T16:29:06
| 130,250,157
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
class Walker(object):
def __init__(self):
self.x = width/2
self.y = height/2
def display(self):
stroke(0)
point(self.x, self.y)
def step(self):
r = random(0, 1)
if r< 0.4:
self.x += 1
elif r < 0.6:
self.x -= 1
elif r < 0.8:
self.y += 1
else:
self.y -= 1
|
[
"jsrdccsx@gmail.com"
] |
jsrdccsx@gmail.com
|
e8b2f8c81f953e4c0e4a8d266dceb71804203e01
|
7f25740b1ef47edc24db1a3618b399959b073fe1
|
/1029_17_smallproject.py
|
97673d239a34ef5759856f9eeba050bcf1977446
|
[] |
no_license
|
pjh9362/PyProject
|
b2d0aa5f8cfbf2abbd16232f2b55859be50446dc
|
076d31e0055999c1f60767a9d60e122fb1fc913e
|
refs/heads/main
| 2023-01-09T12:12:06.913295
| 2020-11-07T15:32:03
| 2020-11-07T15:32:03
| 306,814,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
cost = int(input())
cpn = input()
if cpn == "Cash3000":
print(cost-3000)
elif cpn == "Cash5000":
print(cost-5000)
else:
print("쿠폰이 적용되지 않았습니다.")
print(cost)
|
[
"pjh9362@gmail.com"
] |
pjh9362@gmail.com
|
43f6176cdac6fed43d610aadb95791ffb1bc8e31
|
5f6e95aa83ca132c732f644c51e786785e9bdd2f
|
/src/e_psu/e_psu/urls.py
|
c6ecbf46fdf3b32c7d3230b71dd508c60b649c90
|
[] |
no_license
|
kerupuksambel/django-e-pantau
|
9905a9902752fd5143e03326a0ab585f09ccb50d
|
bbadcd31984c9bd254ac2cc23a30f55a9fe5b997
|
refs/heads/master
| 2022-12-22T07:28:58.444314
| 2020-10-02T14:35:05
| 2020-10-02T14:35:05
| 300,641,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
"""e_psu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from .views import home_view
urlpatterns = [
path("", home_view, name="home"),
path('admin/', admin.site.urls),
path('admin_kelola/serah_terima/', include("serah_terima.urls")),
path('laporan/', include("laporan.urls")),
path('warga/', include("warga.urls")),
path('admin_kelola/', include("admin_kelola.urls")),
path('admin_skpd/', include("admin_skpd.urls"))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"kerupuksambel.2000@gmail.com"
] |
kerupuksambel.2000@gmail.com
|
a105b75168724e5d6040804652d0f8dd4fadeb5e
|
ca97700838056596c072a0b63934f179c6fbac17
|
/_21_ev_differentDER.py
|
6e2edf1852af3d9af8f4081db991a47e467510ae
|
[] |
no_license
|
mlamlamla/powernet_pyGridlabD_eval
|
c18bff98164eb6df4ae79a157b840a59c19ff6d9
|
54275cbd86517bb1728e72824ba16fcbec99e767
|
refs/heads/master
| 2022-04-17T11:50:17.273163
| 2020-04-03T18:23:18
| 2020-04-03T18:23:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,644
|
py
|
import os
import pandas as pd
import numpy as np
def get_monthly(run,ind,month,df_total_load_all=None):
folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind
directory = run + '_' + ind + '_vis'
#Procurement costs
df_system = pd.read_csv(run+'/' + directory +'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):]
#df_system = df_system.iloc[24*60:]
df_system['measured_real_energy'] = df_system['measured_real_power']/60.
df_system['p_max'] = p_max
df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1)
df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_nomarket = df_system['procurement_cost'].sum()
print('Procurement cost in '+month+' (no market): '+str(proc_cost_Jan_nomarket))
#Total house load no market
df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4])
df_total_load = df_total_load.iloc[:-1]
df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp'])
df_total_load.set_index('# timestamp',inplace=True)
df_total_load = df_total_load/1000 #convert to MW
df_total_load = df_total_load/60. #convert to energy
df_total_load_gross = df_total_load.copy()
#Subtract PV generation and add EV consumption
df_PV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv')
list_PV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv')['inverter_name'])
list_EV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')['EV_name'])
list_EV_inv = []
for EV in list_EV:
EV_inv = 'EV_inverter'+EV[2:]
list_EV_inv += [EV_inv]
if len(list_PV) + len(list_EV) > 0:
df_inv_load = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):]
df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4])
df_inv_load = df_inv_load.iloc[:-1]
df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp'])
df_inv_load.set_index('# timestamp',inplace=True)
df_inv_load = (df_inv_load/1000000)/60 # to MWh
#Include PV
if len(list_PV) > 0:
df_PV = df_inv_load[list_PV] #W -> MW (comes from GridlabD)
for house in df_total_load.columns:
if house in (df_PV_appl['house_name']).tolist():
PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0]
df_total_load[house] = df_total_load[house] - df_PV[PV_inv]
#Include EV consumption
if len(list_EV):
df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')
df_EV = df_inv_load[list_EV_inv]
for house in df_total_load.columns:
if house in (df_EV_appl['house_name']).tolist():
EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:]
df_total_load[house] = df_total_load[house] - df_EV[EV_inv] #EV_inv is negatively defined
if df_total_load_all is None:
#print('df_total_load_all doesnot exist yet')
df_total_load_all = df_total_load.copy() #Becomes master load df
else:
df_total_load_all = df_total_load_all.append(df_total_load)
energy_nomarket_Jan = df_total_load.sum().sum() # Total net energy
print('Energy in '+month+' (no market): '+str(energy_nomarket_Jan))
# print(str(len(df_system)/(24*60))+' days')
# print(str(len(df_total_load)/(24*60))+' days')
# print(str(len(df_inv_load)/(24*60))+' days')
return df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan
def get_monthly_wm(run,ind,month,df_total_base_market=None,df_total_flex_market=None,df_cleared_market=None):
folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run + '/' + run + '_' + ind
directory = run + '/' + run + '_' + ind + '_vis'
#Procurement cost
df_system = pd.read_csv(directory+'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):]
df_system = df_system #.iloc[24*60:]
df_system['measured_real_energy'] = df_system['measured_real_power']/60. #MW
df_system['p_max'] = p_max
df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1)
df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_market = df_system['procurement_cost'].sum()
print('Procurement cost in '+month+' (market): '+str(proc_cost_Jan_market))
#print(str(len(df_system)/(24*60))+' days')
#Total house load with market
df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4])
df_total_load = df_total_load.iloc[:-1]
df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp'])
df_total_load.set_index('# timestamp',inplace=True)
df_total_load = df_total_load/1000 #convert to MW
df_total_load = df_total_load/60. #convert to energy
df_hvac_load = pd.read_csv(folder+'/hvac_load_all.csv',skiprows=range(8)).iloc[(24*60):]
df_hvac_load['# timestamp'] = df_hvac_load['# timestamp'].map(lambda x: str(x)[:-4])
df_hvac_load = df_hvac_load.iloc[:-1]
df_hvac_load['# timestamp'] = pd.to_datetime(df_hvac_load['# timestamp'])
df_hvac_load.set_index('# timestamp',inplace=True)
df_hvac_load = df_hvac_load/1000 #convert to MW
df_hvac_load = df_hvac_load/60. #convert to energy
df_base_load = df_total_load.copy()
df_flex_load = df_total_load.copy()
df_total_load.data = 0.0
#Get list of flexible appliances
df_PV_appl = pd.read_csv(folder+'/df_PV_state.csv')
list_PV = list(df_PV_appl['inverter_name'])
df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')
list_EV = list(df_EV_appl['EV_name'])
list_EV_inv = []
for EV in list_EV:
EV_inv = 'EV_inverter'+EV[2:]
df_Bat_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind +'/df_battery_state.csv')
list_Bat = list(df_Bat_appl['battery_name'])
list_Bat_inv = []
for Bat in list_Bat:
Bat_inv = 'Bat_inverter'+Bat[7:]
list_Bat_inv += [Bat_inv]
if len(list_PV) + len(list_Bat) + len(list_EV_inv) > 0:
df_inv_load = pd.read_csv(folder+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):]
df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4])
df_inv_load = df_inv_load.iloc[:-1]
df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp'])
df_inv_load.set_index('# timestamp',inplace=True)
df_inv_load = (df_inv_load/1000000)/60 # to MWh
df_PV = df_inv_load[list_PV]
df_EV = df_inv_load[list_EV_inv]
df_Bat = df_inv_load[list_Bat_inv]
df_base_load = df_total_load - df_hvac_load #for100% flex hvac!
df_flex_load = df_hvac_load.copy()
for house in df_hvac_load.columns:
if len(list_PV) > 0:
if house in (df_PV_appl['house_name']).tolist():
PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0]
df_flex_load[house] = df_flex_load[house] - df_PV[PV_inv]
if len(list_EV_inv):
if house in (df_EV_appl['house_name']).tolist():
EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:]
df_flex_load[house] = df_flex_load[house] - df_EV[EV_inv] #EV_inv is negatively defined
if len(list_Bat) > 0:
if house in (df_Bat_appl['house_name']).tolist():
Bat_inv = 'Bat_inverter'+df_Bat_appl['battery_name'].loc[df_Bat_appl['house_name'] == house].iloc[0][7:]
df_flex_load[house] = df_flex_load[house] - df_Bat[Bat_inv] #Bat_inv is negatively defined
#Clearing prices
df_cleared = pd.read_csv(folder+'/df_prices.csv',parse_dates=[0]).iloc[24*12:] #USD/MWh
df_cleared.rename(columns={'Unnamed: 0':'timedate'},inplace=True)
df_cleared.set_index('timedate',inplace=True)
df_cleared = df_cleared[['clearing_price']]
df_cleared_long = pd.DataFrame(index=df_total_load.index,columns=['clearing_price'],data=df_cleared['clearing_price'])
df_cleared_long.fillna(method='ffill',inplace=True)
# print(str(len(df_system)/(24*60))+' days')
# print(str(len(df_total_load)/(24*60))+' days')
# print(str(len(df_hvac_load)/(24*60))+' days')
# print(str(len(df_inv_load)/(24*60))+' days')
# print(str(len(df_cleared_long)/(24*60))+' days')
#Total load
if df_total_base_market is None:
print('df_total_load_all_market doesnot exist yet')
df_total_base_market = df_base_load.copy() #Becomes master load df
df_total_flex_market = df_flex_load.copy() #Becomes master load df
df_cleared_market = df_cleared_long.copy()
else:
df_total_base_market = df_total_base_market.append(df_base_load)
df_total_flex_market = df_total_flex_market.append(df_flex_load)
df_cleared_market = df_cleared_market.append(df_cleared_long)
energy_nomarket_Jan = df_total_load.sum().sum()
if len(list_PV) > 0:
energy_nomarket_Jan -= df_PV.sum().sum()
if len(list_EV) > 0:
energy_nomarket_Jan -= df_EV.sum().sum()
if len(list_Bat) > 0:
energy_nomarket_Jan -= df_Bat.sum().sum()
#Use baseload only
df_system['measured_real_energy_base'] = df_base_load.sum(axis=1)
df_system['procurement_cost_base'] = df_system['measured_real_energy_base']*df_system['WS_capped'] # in MW and USD/MWh
proc_cost_Jan_market = df_system['procurement_cost_base'].sum()
energy_nomarket_Jan = df_system['measured_real_energy_base'].sum()
print('Energy in '+month+' (market): '+str(energy_nomarket_Jan))
return df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_nomarket_Jan
##############
#GENERAL SETTINGS
##############
run = 'FinalReport2' #'FinalReport_Jul1d'
settings_file = '/Users/admin/Documents/powernet/powernet_markets_mysql/settings_final2.csv'
df_settings = pd.read_csv(settings_file)
p_max = 100.
risk_prem = 1.025
##############
#SETTINGS: Only HVAC, no other DER
#
#NO market: 64,65,66
#With market: 70,71,72 // 103, 104, 105 (with reference price based on forward prices)
##############
print('Only HVAC, no other DER')
##############
#NO MARKET YET
##############
df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0064','JANUARY')
df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0065','JULY',df_total_load_all)
df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0066','OCTOBER',df_total_load_all)
#Calculate the retail tariff for procurement of energy
proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket
print('Procurement cost (no market, no DER): '+str(proc_cost_nomarket))
energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct
retail_nomarket = proc_cost_nomarket/energy_nomarket
print('Retail tariff (no market, no DER): '+str(retail_nomarket))
#Calculate cost for houses without a market under a constant retail tariff
df_cost_nomarket = df_total_load_all*retail_nomarket
df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0))
df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem
print('Total customer bills (no market, no DER) over three weeks: '+str(df_cost['costs_nomarket_riskprem5'].sum()))
#print('Calculate for year')
##############
#MARKET
##############
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0106','JANUARY')
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0107','JULY',df_total_base_market, df_total_flex_market,df_cleared_market)
df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0108','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market)
proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market
print('Procurement cost (market, HVAC only): '+str(proc_cost_market))
retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct)
print('New retail tariff (with market): '+str(retail_new))
#Calculate consumer costs
df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0)
df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
print('Total customer bills (market, HVAC only) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum()))
df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0)
df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
print('Total customer bills (market, HVAC only) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum()))
df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])
df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
print('\nMedian type 1600 old RR')
print(df_cost['change_oldRR'].median())
df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])
df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
print('\nMedian type 1600 new RR')
print(df_cost['change_newRR'].median())
#df_cost.to_csv(run+'/cost_changes_procneutral_1600_all.csv')
# ##############
# #SETTINGS: Only other DER, no HVAC
# #
# #NO market: 79,80,81
# #With market: 70,71,72
# ##############
# print('No HVAC, only other DER')
# ##############
# #NO MARKET YET
# ##############
# df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0079','JANUARY')
# df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0080','JULY',df_total_load_all)
# df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0081','OCTOBER',df_total_load_all)
# #Calculate the retail tariff for procurement of energy
# proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket
# print('Procurement cost (no market, with PV and EV): '+str(proc_cost_nomarket))
# energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct
# retail_nomarket = proc_cost_nomarket/energy_nomarket
# print('Retail tariff (no market, with PV and EV): '+str(retail_nomarket))
# #Calculate cost for houses without a market under a constant retail tariff
# df_cost_nomarket = df_total_load_all*retail_nomarket
# df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0))
# df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem
# print('Total customer bills (no market, with PV and EV): '+str(df_cost['costs_nomarket_riskprem5'].sum()))
# print('Calculate for year')
# ##############
# #MARKET
# ##############
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0076','JANUARY')
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0077','JULY',df_total_base_market, df_total_flex_market,df_cleared_market)
# df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0078','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market)
# proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market
# print('Procurement cost (market, other DER): '+str(proc_cost_market))
# retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct)
# print('New retail tariff (with market): '+str(retail_new))
# #Calculate consumer costs
# df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
# df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0)
# df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
# print('Total customer bills (market, other DER) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum()))
# df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index")
# df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0)
# df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem
# print('Total customer bills (market, other DER) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum()))
# df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])
# df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
# print('\nMedian type 1600 old RR')
# print(df_cost['change_oldRR'].median())
# df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])
# df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100
# print('\nMedian type 1600 new RR')
# print(df_cost['change_newRR'].median())
|
[
"admin@admins-air.attlocal.net"
] |
admin@admins-air.attlocal.net
|
181d7604566e31eea4b774b2ae9b3356926009e6
|
a40950330ea44c2721f35aeeab8f3a0a11846b68
|
/VTK/Actors/ThreeLine.py
|
e780418bfccbe2f4be8ca077eaf8f0c68c4225b5
|
[] |
no_license
|
huang443765159/kai
|
7726bcad4e204629edb453aeabcc97242af7132b
|
0d66ae4da5a6973e24e1e512fd0df32335e710c5
|
refs/heads/master
| 2023-03-06T23:13:59.600011
| 2023-03-04T06:14:12
| 2023-03-04T06:14:12
| 233,500,005
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
import vtk
# Visualize
colors = vtk.vtkNamedColors()
# Create points
p0 = [0.0, 0.0, 0.0]
p1 = [1.0, 0.0, 0.0]
p2 = [1.0, 1.0, 0.0]
p3 = [0.0, 1.0, 0.0]
p4 = [2.0, 0.0, 0.0]
p5 = [2.0, 1.0, 0.0]
# LineSource: draw a line with two points
def createLine1():
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p1)
lineSource.SetPoint2(p2)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lineSource.GetOutputPort())
return mapper
# LineSource Multi-point continuous straight line
def createLine2():
lineSource = vtk.vtkLineSource()
points = vtk.vtkPoints()
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
lineSource.SetPoints(points)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lineSource.GetOutputPort())
return mapper
# LineSource multi-point set geometry + topology
def createLine3(): # 多条线添加 一个points_actor添加多条线段
# Create a vtkPoints object and store the points in it
points = vtk.vtkPoints()
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
points.InsertNextPoint(p4)
points.InsertNextPoint(p5)
# Create a cell array to store the lines in and add the lines to it
lines = vtk.vtkCellArray()
# for i in range(0, 5, 2):
# line = vtk.vtkLine()
# line.GetPointIds().SetId(0, i)
# line.GetPointIds().SetId(1, i + 1)
# lines.InsertNextCell(line)
line = vtk.vtkLine() # 默认为2个端点,
# print(line.GetPointIds())
# line.GetPointIds().SetNumberOfIds(4) # 可以设置为N个端点
line.GetPointIds().SetId(0, 0) # SetId第一个参数为端点ID, 第二个参数为点的ID
line.GetPointIds().SetId(1, 1)
lines.InsertNextCell(line)
line.GetPointIds().SetId(0, 1)
line.GetPointIds().SetId(1, 4)
# line.GetPointIds().SetId(2, 4)
lines.InsertNextCell(line)
# Create a polydata to store everything in
linesPolyData = vtk.vtkPolyData()
# Add the points to the dataset geometry
linesPolyData.SetPoints(points)
# Add the lines to the dataset topology
linesPolyData.SetLines(lines)
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(linesPolyData)
return mapper
def main():
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Line")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Visualize
colors = vtk.vtkNamedColors()
renderer.SetBackground(colors.GetColor3d("Silver"))
actor = vtk.vtkActor()
# The first way
# actor.SetMapper(createLine1())
# The second way
# actor.SetMapper(createLine2())
# The third way
actor.SetMapper(createLine3())
actor.GetProperty().SetLineWidth(4)
actor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
renderer.AddActor(actor)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
[
"443765159@qq.com"
] |
443765159@qq.com
|
06a25a1b6196b3b4b67262bea39f8289fb2daa7e
|
c059ed04ed5f72d11dbe3b01e9395bacd28b6e8b
|
/문자열내p와y개수.py
|
fdb32a8a8483982f6580418362fe2487966dd8ad
|
[] |
no_license
|
kimhyewon0/kimhyewon0.github.io
|
532b5feb214d686865b8e6169251de8dca7a2caf
|
eaac275ff5b933e477099c9b4c3a1b69e05fa521
|
refs/heads/master
| 2021-01-23T04:13:25.509101
| 2019-09-22T16:40:30
| 2019-09-22T16:40:30
| 33,710,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
def solution(s):
s=s.upper()
if s.count('P') ==s.count('Y'): return True
else: return False
print(solution("Py"))
|
[
"coope0357@gmail.com"
] |
coope0357@gmail.com
|
03ba849ab901a2dd4684b9660222925b7988aa2f
|
6b7aa3e8a15ab8502094d41f88c72e0fa0a6cc6d
|
/python/algoMonster/dp/knapsackWeightOnly.py
|
e98b2cd035c350fae50709d41ea7b21b5190ac2a
|
[] |
no_license
|
artem-tkachuk/algorithms
|
77f51c0db2467f718ef1ebe3822343282fc8bf39
|
a656bc363d3cf4bb81fa83d0c627bf6f12029943
|
refs/heads/master
| 2023-05-25T12:30:10.094499
| 2023-05-16T03:17:25
| 2023-05-16T03:17:25
| 205,235,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
from typing import List
def knapsackWeightOnly(weights: List[int]) -> List[int]:
# return list(knapsackWeightOnly_TopDown_Helper(weights, valuesSet=set()))
return knapsackWeightOnly_BottomUp_Tabulation_Helper(weights)
# Top down solution, no memoization, bad time complexity
def knapsackWeightOnly_TopDown_Helper(weights: List[int], valuesSet: set) -> List[int]:
# number of elements in the weights array
n = len(weights)
# Base case: 0 elements in weights gives sum of 0
if n == 0:
return set([0])
# Add the sum of weights itself to the set
valuesSet.add(sum(weights))
# go over each element and use it, recursively calling knapsack on remaining elements
for i in range(n):
weights_without_i = weights[:i] + weights[(i + 1):]
# merge all possible sums of the weights array exclusing i'th elem to all the valuesSet
valuesSet |= knapsackWeightOnly_TopDown_Helper(weights_without_i, valuesSet)
# return all possible sums for the current weights array
return valuesSet
# bottom-up tabulation solution
def knapsackWeightOnly_BottomUp_Tabulation_Helper(weights: List[int]):
# Testing
print(knapsackWeightOnly([1, 3, 3, 5]))
print(knapsackWeightOnly([1, 2, 3]))
|
[
"artemtkachuk@yahoo.com"
] |
artemtkachuk@yahoo.com
|
83f0e5e137f2710df1e45e901c6a227e112040d5
|
3a698e77300380546267afacf72568ce8586e4f8
|
/test.py
|
ef88e5117c4ba019bead6794c698caf7f5eff76d
|
[] |
no_license
|
MichaelESwartz/TWITTERWEBBOT
|
10e6934f0bbaada148d9bee6f7c907e08cdccba8
|
e0598ddfd598c0105b0687ab2b63d5a8acb0fbaf
|
refs/heads/master
| 2021-01-20T00:47:23.285010
| 2017-05-02T22:02:31
| 2017-05-02T22:02:31
| 89,189,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
def math(y, z):
x = y + z
return x
if math(3, 2) == 5:
print "success"
else:
print "failed"
|
[
"Michael@Michaels-MacBook-Pro-4.local"
] |
Michael@Michaels-MacBook-Pro-4.local
|
1ae7978cbc58218d181868d7280ebd339c401050
|
099f7e9234cd8b3afa6f7cd8cb81a654ca5043ea
|
/models/payment.py
|
38bf0252591bf723514586c8ec8f04e40171c1d6
|
[] |
no_license
|
nazrinshahaf/Nextagram_python
|
1716893e7b4466fec5b9d48fd630e00d01f2b74f
|
8738929ca6f11da6943b9093f05bd445ff58e951
|
refs/heads/master
| 2022-12-11T21:45:58.316999
| 2020-02-04T10:35:55
| 2020-02-04T10:35:55
| 235,014,627
| 0
| 0
| null | 2021-06-02T00:56:25
| 2020-01-20T03:55:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
from models.base_model import BaseModel
import peewee as pw
from models.user import User
from models.user_images import User_images
from config import S3_LOCATION
from playhouse.hybrid import hybrid_property
from flask_login import current_user
class Payment(BaseModel):
user = pw.ForeignKeyField(User, backref='donations')
image = pw.ForeignKeyField(User_images, backref='donations')
amount = pw.IntegerField(null = False, default= 5)
message = pw.TextField(null=True)
|
[
"nazrinfernandez@gmail.com"
] |
nazrinfernandez@gmail.com
|
7d24324bd1f5837946c3a16a2bf594cd700afd24
|
9d53d831b631c5431d625848ca0dbd1e4a02eb78
|
/pybo/models.py
|
7f387f16dda1be32c0a6e106a2f4bc1f0512818a
|
[] |
no_license
|
jghee/Django_pratice
|
2b918f730dc40cd6f0c9881ad1c176906e84de8f
|
859befa7b04df8dd119cd6c8985d0c13edd7521a
|
refs/heads/main
| 2023-06-20T19:56:37.231458
| 2021-07-17T02:43:25
| 2021-07-17T02:43:25
| 383,633,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Question(models.Model):
subject = models.CharField(max_length=200)
content = models.TextField()
create_date = models.DateTimeField()
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_question')
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_question')
def __str__(self):
return self.subject
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_answer')
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_answer')
class Comment(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
question = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE)
answer = models.ForeignKey(Answer, null=True, blank=True, on_delete=models.CASCADE)
|
[
"ghj171937@gmail.com"
] |
ghj171937@gmail.com
|
59bc5e311c76d97d748a6bf5da5acff9c9eafe2f
|
92e6d757704f9916bbc9374d40d3d575122ab9f7
|
/5-Factory.py
|
df982127becd77e6fd54f11e032f776b36ebc019
|
[] |
no_license
|
TomCranitch/MATH3202-Tutorials
|
2479dced9ef89bff101a9b98a8a94caf30cd5962
|
0a4e81e82f7473b1d993b5a212d9ce2c98fe7aeb
|
refs/heads/master
| 2020-04-27T10:07:13.016853
| 2019-06-14T02:56:02
| 2019-06-14T02:56:02
| 174,240,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from gurobipy import *
# Set up your data
profit = [10, 6, 8, 4, 11, 9, 3]
P = range(len(profit))
n = [4, 2, 3, 1, 1]
M = range(len(n))
# usage[P][M]
usage = [
[0.5, 0.1, 0.2, 0.05, 0.00],
[0.7, 0.2, 0.0, 0.03, 0.00],
[0.0, 0.0, 0.8, 0.00, 0.01],
[0.0, 0.3, 0.0, 0.07, 0.00],
[0.3, 0.0, 0.0, 0.10, 0.05],
[0.2, 0.6, 0.0, 0.00, 0.00],
[0.5, 0.0, 0.6, 0.08, 0.05]
]
T = range(6)
# maintenance[T][M]
maint = [
[1, 0, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 1]
]
# market[P][T]
market = [
[ 500, 600, 300, 200, 0, 500],
[1000, 500, 600, 300, 100, 500],
[ 300, 200, 0, 400, 500, 100],
[ 300, 0, 0, 500, 100, 300],
[ 800, 400, 500, 200,1000,1100],
[ 200, 300, 400, 0, 300, 500],
[ 100, 150, 100, 100, 0, 60]
]
MAX_STORE = 100
STORE_COST = 0.5
FINAL_STORE = 50
MONTH_HOURS = 16*24
mod = Model("Factory Planning")
X = [[mod.addVar(vtype=GRB.INTEGER) for t in T] for p in P]
Y = [[mod.addVar(vtype=GRB.INTEGER, ub=market[p][t]) for t in T] for p in P]
S = [[mod.addVar(vtype=GRB.INTEGER, ub=MAX_STORE) for t in T] for p in P]
Z = [[mod.addVar(vtype=GRB.INTEGER) for m in M] for t in T]
mod.addConstrs((quicksum(usage[p][m] * X[p][t] for p in P) <= MONTH_HOURS*(n[m] - Z[t][m]) for m in M for t in T))
mod.addConstrs(S[p][t] == S[p][t-1] + X[p][t] - Y[p][t] for p in P for t in T if t > 1)
mod.addConstrs(S[p][t] <= MAX_STORE for p in P for t in T)
mod.addConstrs(S[p][-1] >= FINAL_STORE for p in P)
mod.addConstrs(S[p][0] == X[p][0] - Y[p][0] for p in P)
mod.addConstrs(quicksum(Z[t][m] for t in T) == sum(maint[t][m] for t in T) for m in M)
mod.setObjective(quicksum(profit[p]*Y[p][t] for p in P for t in T) - quicksum(STORE_COST*S[p][t] for p in P for t in T), GRB.MAXIMIZE)
mod.optimize()
print("\n\n Report Prepared for Factory Planing\n Optimal Cost", mod.objVal, "\n\n")
for p in P:
print([X[p][t].x for t in T])
print("\n\n Sell \n")
for p in P:
print([Y[p][t].x for t in T])
print("\n\n Storage \n")
for p in P:
print([S[p][t].x for t in T])
print("\n\n Maintainence \n")
for m in M:
print([Z[t][m].x for t in T])
|
[
"tom@cranitch.com.au"
] |
tom@cranitch.com.au
|
6017f8bc5e80a39ea78cc67cbc7474a53ad39874
|
4d259f441632f5c45b94e8d816fc31a4f022af3c
|
/tornado/mongodb/client.py
|
df52fa27df3ea41b18e3d682e2bcf182a9f48e30
|
[] |
no_license
|
xiaoruiguo/lab
|
c37224fd4eb604aa2b39fe18ba64e93b7159a1eb
|
ec99f51b498244c414b025d7dae91fdad2f8ef46
|
refs/heads/master
| 2020-05-25T01:37:42.070770
| 2016-05-16T23:24:26
| 2016-05-16T23:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
import httplib2
from urllib import urlencode
h = httplib2.Http()
## Add articles
data = {'id':'1', 'author':'B', 'genre':'comedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'1', 'author':'C', 'genre':'comedys'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'2', 'author':'A', 'genre':'tragedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
data = {'id':'3', 'author':'X', 'genre':'tragedy'}
body = urlencode(data)
h.request("http://127.0.0.1:8888/articles", "POST", body=body)
## View all articles
content, response = h.request("http://127.0.0.1:8888/articles", "GET")
print '------- all articles -------'
print response
## View articles
print '------- per articles -------'
data = {"articleid":1}
data = urlencode(data)
content, response = h.request("http://127.0.0.1:8888/articles"+ "?" + data, "GET")
#for res in response:
# print res
print response
## Delete articles
#content, response = h.request("http://127.0.0.1:8888/articles", "DELETE")
#content, response = h.request("http://127.0.0.1:8888/articles", "GET")
#print response
|
[
"junmein@junmeinde-macbook-pro-3.local"
] |
junmein@junmeinde-macbook-pro-3.local
|
792c81288e99d8d6ff55699c1e6d26a7002d0431
|
8bb062d48354fd7a9cca14c0637871e803a1a8ce
|
/agregator/business/__init__.py
|
69cb3bd528e7daa45010680730ba95ba237eb8f3
|
[] |
no_license
|
denislamard/aggregator
|
b58887fbee4bf5beb833a847e3518d82ff3e3414
|
c040d36dab1b083c1ce2d518af458fc3b19cca6c
|
refs/heads/master
| 2020-06-22T18:26:17.015586
| 2019-07-19T12:49:18
| 2019-07-19T12:49:18
| 197,771,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
import os
import json
FILENAME = "entries.json"
class Agregator:
def __init__(self):
self._routes = None
self._loaddata(self.filedata)
def _loaddata(self, path: str):
with open(path) as json_file:
self._routes = json.load(json_file)
def _savedata(self, path: str):
with open(path, 'w') as json_file:
json.dump(self._routes, json_file, indent=4)
def addentry(self, entry: dict):
if self._routes is not None:
self._routes.append(entry)
self._savedata(self.filedata)
def findentry(self, entry: str) -> dict:
if self._routes is None:
return None
for route in self._routes:
if route['name'] == entry:
return route
return None
routes = property(lambda self: self._routes)
filedata = property(lambda self: os.path.join(os.path.dirname(os.path.realpath(__file__)), FILENAME))
|
[
"noreply@github.com"
] |
noreply@github.com
|
3dc8a090b2c9403994f512b47d7fc301201b29e0
|
4d5c542f56dad6668dd30be7693ac93032adfe4c
|
/app.py
|
cbb0742b6876948e3d5b1447ee18a3993a7ff68a
|
[] |
no_license
|
maanbosa/restful-flask
|
0044cd6179f6877b14f46c29e11f094dbee68946
|
c587b17276929cf567d22ed4bcc51676308a8ad5
|
refs/heads/master
| 2023-02-01T23:08:48.615079
| 2020-12-11T19:01:55
| 2020-12-11T19:01:55
| 320,643,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item, ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
app.secret_key = 'jose'
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
api.add_resource(Store, '/store/<string:name>')
api.add_resource(StoreList, '/stores')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
|
[
"maanbosa@gmail.com"
] |
maanbosa@gmail.com
|
6da13e87abfd10017f1f682867f5f982147bbccc
|
f8ff25224bf827406c65560e247e7c3c064cdd38
|
/convert_savedmodel_keras_tflite.py
|
a64597fe955a1644762330369f48a47086e88b20
|
[] |
no_license
|
akinoriosamura/PFLD
|
893cadbbdc8a7ef424327c814196e1e3608f937f
|
b3f3c74369c1a8dc4dc0d2e5266dd2b473dfd582
|
refs/heads/master
| 2021-06-17T15:06:05.468485
| 2020-12-10T09:39:08
| 2020-12-10T09:39:08
| 211,257,866
| 0
| 0
| null | 2019-09-27T07:09:04
| 2019-09-27T07:09:03
| null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import tensorflow as tf
# Load the saved keras model back.
k_model = tf.keras.models.load_model(
"SavedModelPre",
custom_objects=None,
compile=True
)
# k_model = tf.keras.experimental.load_from_saved_model("SavedModelPre")
k_model.summary()
k_model.save('model.h5', include_optimizer=False)
converter = tf.lite.TFLiteConverter.from_keras_model_file("model.h5")
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
|
[
"osamura.akinori@gmail.com"
] |
osamura.akinori@gmail.com
|
18eaf4480da5398f037854fd148de9adc33abbe1
|
d8940b6d45c15a84c8ee1ab298c4df8a905f956c
|
/pysnooper/__init__.py
|
4b6ea5bc1ee65f9e361836555c20c181a5e8e0ff
|
[
"MIT"
] |
permissive
|
Karanxa/PySnooper
|
f179c3e23627979c3a58664b966c9ae4cfa522a2
|
22f63ae09bb6d63de86496d613815ee03d191b75
|
refs/heads/master
| 2023-05-27T14:23:00.604201
| 2021-06-11T15:06:55
| 2021-06-11T15:06:55
| 376,061,317
| 1
| 0
|
MIT
| 2021-06-11T15:06:55
| 2021-06-11T15:04:02
| null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
'''
PySnooper - Never use print for debugging again
Usage:
import pysnooper
@pysnooper.snoop()
def your_function(x):
...
A log will be written to stderr showing the lines executed and variables
changed in the decorated function.
For more information, see https://github.com/cool-RR/PySnooper
'''
from .tracer import Tracer as snoop
from .variables import Attrs, Exploding, Indices, Keys
import collections
__VersionInfo = collections.namedtuple('VersionInfo',
('major', 'minor', 'micro'))
__version__ = '0.5.0'
__version_info__ = __VersionInfo(*(map(int, __version__.split('.'))))
del collections, __VersionInfo # Avoid polluting the namespace
|
[
"ram@rachum.com"
] |
ram@rachum.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.