blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2095e3bfdb5024a6fd017a4c63e5e06b6416e81f | 19fb0eb26f5a6d2180a323cf242ce00f5e4e1c6d | /contrib/seeds/makeseeds.py | 39db93d0b44c8c2565919b9c1b35031aba660f40 | [
"MIT"
] | permissive | j00v/NestEGG | bd4c9555f6473cc655e203531c6ab4d0dc795b61 | 8c507974a5d49f5ffa7000fa8b864a528dcb9c3e | refs/heads/master | 2022-12-03T09:16:14.732378 | 2020-08-12T15:25:31 | 2020-08-12T15:25:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,517 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/NESTEGGCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"shamim.ice.ewu@gmail.com"
] | shamim.ice.ewu@gmail.com |
4850a93389426ba1af6d9b80f1441d2e15b33c02 | a88a07f83024f6781bf0f297a7585686a97030e1 | /src/learning/theano/concatennate.py | 35bef22d31217cc2097e13b618070d02eb1adef4 | [] | no_license | muqiann/NamedEntityRecognition | 0b89c79a46dc6be6b61a5fe020d003724d04a971 | 3c3a979950f3f172a61fd7c9ff5d3563877810a9 | refs/heads/master | 2020-04-27T17:53:21.485758 | 2018-06-26T01:26:08 | 2018-06-26T01:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # -*- coding: utf-8 -*-
import theano
import numpy as np
import theano.tensor as T
ones = theano.shared(np.float32([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
twos = theano.shared(np.float32([[10, 11, 12], [13, 14, 15]]))
print(ones.get_value())
result = T.concatenate([ones, ones], axis=0) # 在列上连接
print(result.eval())
result = T.concatenate([ones, ones], axis=1) # 在行上连接
print(result.eval())
# wrong : all the input array dimensions except for the concatenation axis must match exactly
result = T.concatenate([ones, twos], axis=1)
print (result.eval()) | [
"572176750@qq.com"
] | 572176750@qq.com |
740e7c5bd00047252a123bf3aef1db3897986788 | e41e614249db33edfd62831ae30e08596e32dde6 | /filter_data.py | 839b19a473b33cbb8d42b50d12d96b238eef8d2a | [] | no_license | newtein/pollution_science | 237fd2385c870db2fdb3cc97bbc5a9d864b4e5f8 | 6779aa729f412ffe0901c069c8ef63b3a83c4ce4 | refs/heads/master | 2023-02-01T08:14:26.612619 | 2020-12-13T06:07:38 | 2020-12-13T06:07:38 | 275,144,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | from read_data import ReadData
from combine_data import CombineData
from copy import copy
class FilterData:
def __init__(self, pollutant, year=None, observation_type="daily", start_date="01-12-2019",
end_date="15-06-2020", index_col = None, fixed_where_payload = {}):
self.pollutant = pollutant
self.start_date = start_date
self.end_date = end_date
self.observation_type = observation_type
self.index_col = index_col
if year:
self.df = ReadData(self.pollutant, observation_type=observation_type, year=year).get_pandas_obj()
else:
self.df = CombineData(self.pollutant, start_date=self.start_date, end_date=self.end_date,
observation_type=observation_type).get_pandas_obj()
if self.index_col:
self.df = self.df.set_index(self.index_col)
if fixed_where_payload:
for col_name, col_value in fixed_where_payload.items():
if col_name in self.index_col:
self.df = self.df[self.df.index == col_value]
else:
self.df = self.df[self.df[col_name] == col_value]
def filter_df(self, select_columns, where_payload):
df = copy(self.df)
for col_name, col_value in where_payload.items():
if col_name in self.index_col:
df = df[df.index == col_value]
else:
df = df[df[col_name] == col_value]
df = df[select_columns] if select_columns else df
return df
if __name__ == "__main__":
obj = FilterData('2020', 'PM2')
print(obj.filter_df(['County Name', '1st Max Value'], {'State Name': 'California'}))
| [
"harshitgujral12@gmail.com"
] | harshitgujral12@gmail.com |
6934696f4d50fcab91b608637cf2ad56e25e07b2 | f3b233e5053e28fa95c549017bd75a30456eb50c | /CDK2_input/L31/31-28_wat_20Abox/set_5.py | 0b41608e37632fc6b7eb344e7e03bd50ed7ca3cd | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/CDK2/L31/wat_20Abox/ti_one-step/31_28/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
31fd733e963aacdd5f306e6ce2af4e3ce832b28d | 5d1c43bb4881039f198eedcee2ceb101b406e0a0 | /Django/project03/app/migrations/0001_initial.py | e8289a90ad8bf2cb47749b6bf504ceb405feb277 | [] | no_license | MunSeoHee/Likelion_Gachon_2020 | 46155b1686a245a59c5664f7726ac754b7079e4b | e0e48845fdb0e4aa2365e7c47e29880a27f0f261 | refs/heads/master | 2021-04-10T09:51:06.618980 | 2020-12-07T10:06:43 | 2020-12-07T10:06:43 | 248,927,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # Generated by Django 3.0.4 on 2020-07-29 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('title', models.CharField(max_length=100)),
('contents', models.TextField()),
],
),
]
| [
"nansh9815@naver.com"
] | nansh9815@naver.com |
0c34ebdee8bacb1ea327c5bb8e6ed30ba5e6a457 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_tuberculosis.py | 254ad24d14a38ada684c33760bf2c6c129c5c8c6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
#calss header
class _TUBERCULOSIS():
def __init__(self,):
self.name = "TUBERCULOSIS"
self.definitions = [u"a serious infectious disease that can attack many parts of a person's body, especially their lungs"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f3f02c3c24a39f79b783b10e87490b85b01b559c | 75b9e31895336ee5f174ac94f679dcc7cda94cab | /core/__init__.py | fe72f6d4fd2808c9f8df490d4c90dc7ee37312ac | [] | no_license | eduarde/PentaGroup | e319633274d128025e538ff0afd0d5b026461491 | d67e67495daf96e274ccf5ac31f043ffd5f30f58 | refs/heads/master | 2021-09-06T16:08:02.748042 | 2018-02-08T10:56:47 | 2018-02-08T10:56:47 | 108,404,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | default_app_config = 'core.apps.CoreConfig'
# import logging
# from django.conf import settings
# fmt = getattr(settings, 'LOG_FORMAT', None)
# lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)
# logging.basicConfig(format=fmt, level=lvl)
# logging.debug("Logging started on %s for %s" % (logging.root.name, logging.getLevelName(lvl)))
| [
"eduard.erja@gmail.com"
] | eduard.erja@gmail.com |
dea18cce19c36cadcf59982b6e850953b0d73ae9 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/5e828e59-aad1-47ca-bae8-ed5d41514fe3__FairAndSquare.py | be747649a6d849ba6cd0795593bbc095c0784804 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | '''
Solving the third code jam problem
'''
def isPalindrome(number):
original = number
reverse = 0
while number != 0:
reverse = reverse * 10 + number % 10
number /= 10
return reverse == original
# main program
import os, math
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
f = open(path + '\\' + 'C-small-attempt0.in')
numOfTests = int(f.readline())
results = []
for test in range(numOfTests):
a, b = [int(x) for x in f.readline().split()] # read a line
counter = 0
root = int(math.sqrt(a))
square = int(root**2)
#in case it's truncated
if square < a:
root += 1
square = root**2
while square <= b:
if isPalindrome(root) and isPalindrome(square):
counter += 1
square += root * 2 + 1 # (x+1)^2 = x^2 + 2x + 1
root += 1
results.append(counter)
outFile = open(path + '\\' + 'output.txt', 'w')
for i in range(0, len(results)):
caseNumber = i+1
outFile.write("Case #%d: %d\n" % (caseNumber, results[i] ))
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
8b0248ae612554e90e944519298e07b1bdeb10ae | 0a89cad9f98e5c014b4c6970c6a63d29c89bacbf | /ilisa/antennameta/delays.py | 7a368c97f1533e4be73314a83c1f4b556de7b0ec | [
"ISC"
] | permissive | mpozoga/iLiSA | 5fe516972d010f04695a5990f68c1b4b6f092889 | 164c198a7569413a12d52338738aaa24c763890d | refs/heads/master | 2020-08-27T07:40:41.099906 | 2019-10-12T12:37:16 | 2019-10-12T12:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | #!/usr/bin/env python
"""Module for handling the LOFAR antenna delay files.
"""
import sys
import os
import numpy as np
#import pkg_resources
#my_data = pkg_resources.resource_filename(__name__, "share/StaticMetaData")
STATICMETADATA = os.path.join(os.path.dirname(__file__),'share/StaticMetaData/')
CABLEDELAYDIR = STATICMETADATA
TABCOLUMNS = ('RCU','LBL_len','LBL_delay','LBH_len','LBH_delay','HBA_len','HBA_delay')
TABFORMATS = ('i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
def _load_stn_cabledelay_file(f):
lengthsNdelays = np.loadtxt(f, dtype={
'names': TABCOLUMNS,
'formats': TABFORMATS})
return lengthsNdelays
def _stnid2filename(stnid):
filename = stnid+'-'+'CableDelays.conf'
return os.path.join(CABLEDELAYDIR,filename)
def _get_units(quantitystr):
arr, qtype = quantitystr.split('_')
if qtype == 'len':
unit = 'm'
elif qtype == 'delay':
unit = 'ns'
else:
raise ValueError, "Unknown quantity type"
return unit
def get_stn_cabledelays(stnid):
f = _stnid2filename(stnid)
lengthsNdelays = _load_stn_cabledelay_file(f)
return lengthsNdelays
if __name__ == '__main__':
stnid = sys.argv[1]
quantity = sys.argv[2]
if quantity not in TABCOLUMNS:
raise ValueError, "Choose one of the following quantities: {}".format(TABCOLUMNS[1:])
unit = _get_units(quantity)
lengthsNdelays = get_stn_cabledelays(stnid)
print("RCU [#] {} [{}]".format(quantity,unit))
for row, rcu in enumerate(lengthsNdelays['RCU']):
print("{} {}".format(rcu, lengthsNdelays[quantity][row]))
| [
"tobia@chalmers.se"
] | tobia@chalmers.se |
18915148d4eae7ae3f755afbef18c840ec5dae52 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/e27b4c2ae40681b2c2ec196cd7d853614b265abd-<main>-bug.py | b7e22bbd6cbf18bf9fd295b34e4f4275576b906b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,373 | py | def main():
state_map = dict(present='install', absent='uninstall -y', latest='install -U', forcereinstall='install -U --force-reinstall')
module = AnsibleModule(argument_spec=dict(state=dict(default='present', choices=state_map.keys()), name=dict(type='list'), version=dict(type='str'), requirements=dict(), virtualenv=dict(type='path'), virtualenv_site_packages=dict(default=False, type='bool'), virtualenv_command=dict(default='virtualenv', type='path'), virtualenv_python=dict(type='str'), use_mirrors=dict(default=True, type='bool'), extra_args=dict(), editable=dict(default=True, type='bool'), chdir=dict(type='path'), executable=dict(), umask=dict()), required_one_of=[['name', 'requirements']], mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']], supports_check_mode=True)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if (umask and (not isinstance(umask, int))):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg='umask must be an octal integer', details=to_native(sys.exc_info()[1]))
old_umask = None
if (umask is not None):
old_umask = os.umask(umask)
try:
if ((state == 'latest') and (version is not None)):
module.fail_json(msg='version is incompatible with state=latest')
if (chdir is None):
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if (not os.path.exists(os.path.join(env, 'bin', 'activate'))):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if (os.path.basename(cmd) == cmd):
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if ('--no-site-packages' in cmd_opts):
cmd += ' --no-site-packages'
if virtualenv_python:
cmd += (' -p%s' % virtualenv_python)
elif PY3:
cmd += (' -p%s' % sys.executable)
cmd = ('%s %s' % (cmd, env))
(rc, out_venv, err_venv) = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if (rc != 0):
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = ('%s %s' % (pip, state_map[state]))
path_prefix = None
if env:
path_prefix = '/'.join(pip.split('/')[:(- 1)])
has_vcs = False
if name:
for pkg in name:
if bool((pkg and re.match('(svn|git|hg|bzr)\\+', pkg))):
has_vcs = True
break
if (has_vcs and module.params['editable']):
args_list = []
if extra_args:
args_list = extra_args.split(' ')
if ('-e' not in args_list):
args_list.append('-e')
extra_args = ' '.join(args_list)
if extra_args:
cmd += (' %s' % extra_args)
if name:
for pkg in name:
cmd += (' %s' % _get_full_name(pkg, version))
elif requirements:
cmd += (' -r %s' % requirements)
if module.check_mode:
if (extra_args or requirements or (state == 'latest') or (not name)):
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
(pkg_cmd, out_pip, err_pip) = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if ((not p.startswith('You are using')) and (not p.startswith('You should consider')) and p)]
if (pkg_cmd.endswith(' freeze') and (('pip' in name) or ('setuptools' in name))):
for pkg in ('setuptools', 'pip'):
if (pkg in name):
formatted_dep = _get_package_info(module, pkg, env)
if (formatted_dep is not None):
pkg_list.append(formatted_dep)
out += ('%s\n' % formatted_dep)
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (((state == 'present') and (not is_present)) or ((state == 'absent') and is_present)):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if (requirements or has_vcs):
(_, out_freeze_before, _) = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
(rc, out_pip, err_pip) = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if ((rc == 1) and (state == 'absent') and (('not installed' in out_pip) or ('not installed' in err_pip))):
pass
elif (rc != 0):
_fail(module, cmd, out, err)
if (state == 'absent'):
changed = ('Successfully uninstalled' in out_pip)
elif (out_freeze_before is None):
changed = ('Successfully installed' in out_pip)
elif (out_freeze_before is None):
changed = ('Successfully installed' in out_pip)
else:
(_, out_freeze_after, _) = _get_packages(module, pip, chdir)
changed = (out_freeze_before != out_freeze_after)
module.exit_json(changed=changed, cmd=cmd, name=name, version=version, state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err)
finally:
if (old_umask is not None):
os.umask(old_umask) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e5c2442247427cec1a3951aa995099d60185b687 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/308/93222/submittedfiles/principal.py | 2e2b9bd7c56e2b637fb56a6e0c2b10a929045553 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # -*- coding: utf-8 -*-
from minha_bib import *
from random import randint
notas = []
for i in range(0, 5):
x = randint(0, 100)/10.0
notas.append(x)
print('Valor sorteado: %.1f' % x)
print (notas)
for i in range (len(notas), 0, -1):
print(notas[i-1])
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
40a782cbd11ac6b9d420ca5d39aea916b32437c7 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/domain/AlipayTradePageMergePayModel.py | 070e26792a14dd740369010833dd12f5d7db348d | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 2,684 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OrderDetail import OrderDetail
class AlipayTradePageMergePayModel(object):
def __init__(self):
self._order_details = None
self._out_merge_no = None
self._timeout_express = None
@property
def order_details(self):
return self._order_details
@order_details.setter
def order_details(self, value):
if isinstance(value, list):
self._order_details = list()
for i in value:
if isinstance(i, OrderDetail):
self._order_details.append(i)
else:
self._order_details.append(OrderDetail.from_alipay_dict(i))
@property
def out_merge_no(self):
return self._out_merge_no
@out_merge_no.setter
def out_merge_no(self, value):
self._out_merge_no = value
@property
def timeout_express(self):
return self._timeout_express
@timeout_express.setter
def timeout_express(self, value):
self._timeout_express = value
def to_alipay_dict(self):
params = dict()
if self.order_details:
if isinstance(self.order_details, list):
for i in range(0, len(self.order_details)):
element = self.order_details[i]
if hasattr(element, 'to_alipay_dict'):
self.order_details[i] = element.to_alipay_dict()
if hasattr(self.order_details, 'to_alipay_dict'):
params['order_details'] = self.order_details.to_alipay_dict()
else:
params['order_details'] = self.order_details
if self.out_merge_no:
if hasattr(self.out_merge_no, 'to_alipay_dict'):
params['out_merge_no'] = self.out_merge_no.to_alipay_dict()
else:
params['out_merge_no'] = self.out_merge_no
if self.timeout_express:
if hasattr(self.timeout_express, 'to_alipay_dict'):
params['timeout_express'] = self.timeout_express.to_alipay_dict()
else:
params['timeout_express'] = self.timeout_express
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradePageMergePayModel()
if 'order_details' in d:
o.order_details = d['order_details']
if 'out_merge_no' in d:
o.out_merge_no = d['out_merge_no']
if 'timeout_express' in d:
o.timeout_express = d['timeout_express']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
a458a4a886e0d030318c0fd387a072c062b86fc0 | bd010b944cb658531fe1e3a4fda9e8dd2d4e2a15 | /runners/beginner/features/signal_to_noise_limit.py | 4ac17e0544fedb41e5bca2399961cb2294abc81d | [] | no_license | harshitjindal/autolens_workspace | f32132a51eff888c3b25098df09f514be2dd6422 | 47f85e6b7c2f5871055b9b88520c30d39fd91e2a | refs/heads/cern-submission | 2021-03-20T19:53:12.153296 | 2020-04-24T15:25:25 | 2020-04-24T15:25:25 | 247,228,818 | 0 | 0 | null | 2020-04-24T15:26:45 | 2020-03-14T07:04:59 | Jupyter Notebook | UTF-8 | Python | false | false | 1,779 | py | import os
# This pipeline runner demonstrates how to use the signal-to-noise limits in pipelines. Checkout the pipeline
# 'autolens_workspace/pipelines/beginner/features/signal_to_noise_limit.py' for a description binning up.
# I'll assume that you are familiar with how the beginner runners work, so if any code doesn't make sense familiarize
# yourself with those first!
### AUTOFIT + CONFIG SETUP ###
import autofit as af
workspace_path = "{}/../../../".format(os.path.dirname(os.path.realpath(__file__)))
config_path = workspace_path + "config"
af.conf.instance = af.conf.Config(
config_path=workspace_path + "config", output_path=workspace_path + "output"
)
dataset_label = "imaging"
dataset_name = "lens_sie__source_sersic"
pixel_scales = 0.1
### AUTOLENS + DATA SETUP ###
import autolens as al
import autolens.plot as aplt
dataset_path = af.path_util.make_and_return_path_from_path_and_folder_names(
path=workspace_path, folder_names=["dataset", dataset_label, dataset_name]
)
imaging = al.imaging.from_fits(
image_path=dataset_path + "image.fits",
psf_path=dataset_path + "psf.fits",
noise_map_path=dataset_path + "noise_map.fits",
pixel_scales=pixel_scales,
)
mask = al.mask.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, radius=3.0
)
aplt.imaging.subplot_imaging(imaging=imaging, mask=mask)
# We simply import the signal-to-noise limit pipeline and pass the signal-to-noise limit we want as an input parameter
# (which for the pipeline below, is only used in phase 1).
from pipelines.beginner.features import signal_to_noise_limit
pipeline = signal_to_noise_limit.make_pipeline(
phase_folders=[dataset_label, dataset_name], signal_to_noise_limit=20.0
)
pipeline.run(dataset=imaging, mask=mask)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
88e2b2e2b6a4072ebb10fc9315ec7f85230da990 | e2c0b31cf4e1611631658ac2bc2dd22e8d3607b0 | /webapp/common/logger.py | 6718fab11a845f77104cde6b48b96fa087f817bf | [
"MIT"
] | permissive | binary-butterfly/open-booking-connect | 2aef9ed443ed8096e4876a923cfb02e535494d99 | ed153dd191c75810cbd2d9b74aee2962380a54d0 | refs/heads/master | 2023-08-19T22:12:47.150414 | 2021-10-10T18:53:35 | 2021-10-10T18:53:35 | 355,625,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | # encoding: utf-8
"""
open booking connect
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
import os
import logging
from logging.handlers import WatchedFileHandler
from ..config import Config
class Logger:
registered_logs = {}
def get_log(self, log_name):
if log_name in self.registered_logs:
return self.registered_logs[log_name]
logger = logging.getLogger(log_name)
logger.handlers.clear()
logger.setLevel(logging.INFO)
# Init File Handler
file_name = os.path.join(Config.LOG_DIR, '%s.log' % log_name)
file_handler = WatchedFileHandler(file_name)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s ')
)
logger.addHandler(file_handler)
file_name = os.path.join(Config.LOG_DIR, '%s.err' % log_name)
file_handler = WatchedFileHandler(file_name)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s ')
)
logger.addHandler(file_handler)
if Config.DEBUG:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
self.registered_logs[log_name] = logger
return logger
def debug(self, log_name, message):
self.get_log(log_name).debug(message)
def info(self, log_name, message):
self.get_log(log_name).info(message)
def warn(self, log_name, message):
self.get_log(log_name).warning(message)
def error(self, log_name, message, details=None):
self.get_log(log_name).error(message + (("\n" + details) if details else ""))
def exception(self, log_name, message, details=None):
self.get_log(log_name).exception(message + (("\n" + details) if details else ""))
def critical(self, log_name, message, details=None):
self.get_log(log_name).critical(message + (("\n" + details) if details else ""))
| [
"mail@ernestoruge.de"
] | mail@ernestoruge.de |
919f93e4e6bf935186a408a749c680b7cfb98e10 | d54b49fb7a899fa7c1b0bd5be02c9af43cb9cae0 | /accesspanel/extensions/lock_input.py | e551b8ba5094a45ef28355b950c2c7a16667ba3e | [
"BSD-3-Clause"
] | permissive | vincent-lg/accesspanel | 923a03f852aa96804abafe4c51833ded6e091427 | 42d27b7f12e9c3f9b5467a8ba4e973e2e9735796 | refs/heads/master | 2021-06-16T03:11:33.937714 | 2017-05-19T19:19:20 | 2017-05-19T19:19:20 | 71,724,440 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | # Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the LockInput extension."""
import wx
from accesspanel.extensions.base import BaseExtension
class LockInput(BaseExtension):
"""Implement a lock-in-input mode.
In this mode, the user cannot use tab or shift-tab to leave the
input field. The cases in which the lock is applied can be changed
through the extension's settings.
Behavior of the extension can be altered through attributes:
empty: the lock will be active unless the input is empty
>>> import wx
>>> from accesspanel import AccessPanel
>>> class MyAccessPanel(AccessPanel):
... def __init__(self, parent):
... AccessPanel.__init__(self, parent, lock_input=True)
... # Configure the lock
... lock = self.extensions["lock_input"]
... lock.empty = True
Default values:
input: False
If you with to modify these default values, see the example above.
"""
def __init__(self, panel):
BaseExtension.__init__(self, panel)
# Features that can be set in the AccessPanel
self.empty = False
def OnKeyDown(self, modifiers, key):
"""Prevent changing focus with tab/shift-tab."""
skip = True
if modifiers in (wx.MOD_NONE, wx.MOD_SHIFT) and key == wx.WXK_TAB:
if not self.empty:
skip = False
elif self.panel.input:
skip = False
return skip
| [
"vincent.legoff.srs@gmail.com"
] | vincent.legoff.srs@gmail.com |
bf34a703de23cc4f65a0f4660199b7563d9f3c42 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/peering/v20200101preview/registered_asn.py | 1b858c98471a749f3f06d915f00f04ec6173b3ae | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,592 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['RegisteredAsn']
class RegisteredAsn(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
registered_asn_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The customer's ASN that is registered by the peering service provider.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] asn: The customer's ASN from which traffic originates.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] registered_asn_name: The name of the ASN.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['asn'] = asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
if registered_asn_name is None:
raise TypeError("Missing required property 'registered_asn_name'")
__props__['registered_asn_name'] = registered_asn_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['peering_service_prefix_key'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:peering/latest:RegisteredAsn"), pulumi.Alias(type_="azure-nextgen:peering/v20200401:RegisteredAsn")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RegisteredAsn, __self__).__init__(
'azure-nextgen:peering/v20200101preview:RegisteredAsn',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegisteredAsn':
"""
Get an existing RegisteredAsn resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RegisteredAsn(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def asn(self) -> pulumi.Output[Optional[int]]:
"""
The customer's ASN from which traffic originates.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringServicePrefixKey")
def peering_service_prefix_key(self) -> pulumi.Output[str]:
"""
The peering service prefix key that is to be shared with the customer.
"""
return pulumi.get(self, "peering_service_prefix_key")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
379c4b9eceb06d3d37321197658f376170da04d7 | dde951c8bcfb79cdead3449de42d9ed3e6f24fbe | /LearnPythontheHradWay/ex48/ex48/parser.py | 85e2fad7d534fc6e3c0c28fe74bf27d1a0b2d23c | [] | no_license | wolfeyuanwei/study-python | c764353cbf75b0ccd79dc562fe11eebee712510b | be1a9ec93cd29d9fe6b69ad4f9c059fb9dd308de | refs/heads/master | 2021-05-11T22:57:51.541684 | 2018-02-08T05:03:10 | 2018-02-08T05:03:10 | 117,504,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | #!/user/bin/python
#filename:parser.py
#-*-coding:utf-8 -*-
class ParseError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParseError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'verb':
return ('noun', 'player')
else:
raise ParseError("Expected a verb next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
if __name__ == '__main__':
x = parse_sentence([('verb', 'run'), ('direction', 'north')])
print x.subject
print x.verb
print x.object
x = parse_sentence([('noun','bear'),('verb', 'eat'), ('stop', 'the'),('noun', 'honey')])
print x.subject
print x.verb
print x.object | [
"wolfe_yuan@163.com"
] | wolfe_yuan@163.com |
d4c65b9241fb8af0e9e3ce0f0f5a8c6653b57571 | fc365e7d2a558bf819b8062fb5a452e8c4ad3ca8 | /library/Codon.py | 8898e8ca7433963a80232402bd67fbb3deb8b0a8 | [] | no_license | ajrichards/phylogenetic-models | 9e47f27ff46ce95dc365d45fcd11949be3b506cb | 783f9a6b6cea816d255fa23f2e62423d98059ad9 | refs/heads/master | 2021-01-01T19:39:41.550390 | 2015-08-19T13:59:58 | 2015-08-19T13:59:58 | 25,293,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | #!/usr/bin/env python
"""
TODO
change aa2pp to aa2ppc
and use http://www.geneinfinity.org/sp/sp_aaprops.html
"""
__author__ = "Adam Richards"
class Codon(object):
"""
A class to handle codon related functions
Amino acids are handled using the single character id, with the exception of STOP.
"""
def __init__(self):
"""
initialize dictionaries
"""
## dictionaries to convert between amino acids and codons
self.aa2codon = {'C': ['TGT','TGC'],\
'D': ['GAT','GAC'],\
'S': ['TCT', 'TCG', 'TCA', 'TCC', 'AGC', 'AGT'],\
'Q': ['CAA', 'CAG'],\
'M': ['ATG'],\
'N': ['AAC', 'AAT'],\
'P': ['CCT', 'CCG', 'CCA', 'CCC'],\
'K': ['AAG', 'AAA'],\
'STOP': ['TAG', 'TGA', 'TAA'],\
'T': ['ACC', 'ACA', 'ACG', 'ACT'],\
'F': ['TTT', 'TTC'],\
'A': ['GCA', 'GCC', 'GCG', 'GCT'],\
'G': ['GGT', 'GGG', 'GGA', 'GGC'],\
'I': ['ATC', 'ATA', 'ATT'],\
'L': ['TTA', 'TTG', 'CTC', 'CTT', 'CTG', 'CTA'],\
'H': ['CAT', 'CAC'],\
'R': ['CGA', 'CGC', 'CGG', 'CGT', 'AGG', 'AGA'],\
'W': ['TGG'],\
'V': ['GTA', 'GTC', 'GTG', 'GTT'],\
'E': ['GAG', 'GAA'],\
'Y': ['TAT', 'TAC']}
self.codon2aa = {}
for key,val in self.aa2codon.iteritems():
for c in val:
self.codon2aa[c] = key
## dictionaries to convert between amino acids physical property class
self.pp2aa = {"neg":["D","E"],\
"pos":["K","R","H"],\
"pnc":["S","T","C","M","N","Q"],\
"aro":["F","Y","W"],\
"npa":["G","A","V","L","I","P"]}
self.aa2pp = {}
for key,val in self.pp2aa.iteritems():
for c in val:
self.aa2pp[c] = key
## dictionaries to convert between short and long versions of amino acids
self.long2short = {"ALA":"A","ARG":"R","ASN":"N","ASP":"D",\
"CYS":"C","GLU":"E","GLN":"Q","GLY":"G",\
"HIS":"H","ILE":"I","LEU":"L","LYS":"K",\
"MET":"M","PHE":"F","PRO":"P","SER":"S",\
"THR":"T","TRP":"W","TYR":"Y","VAL":"V"}
self.short2long = dict([(value,key) for key,value in self.long2short.iteritems()])
if __name__ == "__main__":
print "Running..."
cd = Codon()
if cd.aa2pp["F"] != "aro":
raise Exception("Failed aa to pp test")
| [
"adamricha@gmail.com"
] | adamricha@gmail.com |
ca30c065fa064d98cd699f2fce97525a36130d24 | a1119965e2e3bdc40126fd92f4b4b8ee7016dfca | /trunk/repy/tests/ut_repytests_encodingcommentisignored.py | 6158b4dcedcbf4dd34764b31884788af8eca2271 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | SeattleTestbed/attic | 0e33211ddf39efdbcf5573d4fc7fa5201aa7310d | f618a962ce2fd3c4838564e8c62c10924f5df45f | refs/heads/master | 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 | Python | UTF-8 | Python | false | false | 179 | py | #!/usr/bin/env python
# -*- coding: rot13 -*-
#pragma repy
#pragma error NameError
# this will raise a NameError if this isn't rot13... (cevag == print)
cevag('hello world')
| [
"USER@DOMAIN"
] | USER@DOMAIN |
866ff7508fcdecdbbeb77624b1ce0bde394c5c83 | 1b78a071c2134beafc265b839ba8acba63142be2 | /intersight/models/os_windows_parameters.py | f22edebf6c628538017f34e5c2fad86260c944c0 | [
"Apache-2.0"
] | permissive | dyoshiha/intersight-python | 59c2ed3f751726a1d7c0e4254f1203e6546f1d47 | 01d1abcf8a9dcee0fe9150cdec70eb39d76ca290 | refs/heads/master | 2020-12-30T07:32:16.452334 | 2020-02-03T21:32:36 | 2020-02-03T21:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1295
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OsWindowsParameters(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'edition': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'edition': 'Edition'
}
def __init__(self, object_type=None, edition='Standard'):
"""
OsWindowsParameters - a model defined in Swagger
"""
self._object_type = None
self._edition = None
if object_type is not None:
self.object_type = object_type
if edition is not None:
self.edition = edition
@property
def object_type(self):
"""
Gets the object_type of this OsWindowsParameters.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:return: The object_type of this OsWindowsParameters.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this OsWindowsParameters.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:param object_type: The object_type of this OsWindowsParameters.
:type: str
"""
self._object_type = object_type
@property
def edition(self):
"""
Gets the edition of this OsWindowsParameters.
Lists all the editions supported for Windows Server installation.
:return: The edition of this OsWindowsParameters.
:rtype: str
"""
return self._edition
@edition.setter
def edition(self, edition):
"""
Sets the edition of this OsWindowsParameters.
Lists all the editions supported for Windows Server installation.
:param edition: The edition of this OsWindowsParameters.
:type: str
"""
allowed_values = ["Standard", "StandardCore", "Datacenter", "DatacenterCore"]
if edition not in allowed_values:
raise ValueError(
"Invalid value for `edition` ({0}), must be one of {1}"
.format(edition, allowed_values)
)
self._edition = edition
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OsWindowsParameters):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ucs-build@github.com"
] | ucs-build@github.com |
1412a681a3399d9ea069e81d3912099989b16321 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /YRwZvg5Pkgw4pEWC5_11.py | aecb9fbb1c52ca98444155f4ac40055298714c4d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py |
def flick_switch(lst):
a=True
c=[]
for i in range(len(lst)):
if lst[i]!="flick":
c.append(a)
if lst[i]=="flick":
a=not a
c.append(a)
return c
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
262e4ad5a1421368dfd879c840aabfff3ff1235f | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/PYTHIA6/PYTHIA6_Tauola_SM_H_2tau_zh_mH140_lepdecay_8TeV_cff.py | 82691bffb6acd1a22174cf7f3d23d9a9bfb00d7a | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 4,537 | py | import FWCore.ParameterSet.Config as cms
source = cms.Source("EmptySource")
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
# put here the efficiency of your filter (1. if no filter)
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
# put here the cross section of your process (in pb)
crossSection = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
comEnergy = cms.double(8000.0),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
TauolaDefaultInputCards
),
parameterSets = cms.vstring('Tauola')
),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('PMAS(25,1)=140.0 !mass of Higgs',
'MSEL=0 ! user selection for process',
'MSUB(102)=0 !ggH',
'MSUB(123)=0 !ZZ fusion to H',
'MSUB(124)=0 !WW fusion to H',
'MSUB(24)=1 !ZH production',
'MSUB(26)=0 !WH production',
'MSUB(121)=0 !gg to ttH',
'MSUB(122)=0 !qq to ttH',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'MDME(190,1)=0 ! W decay into dbar u',
'MDME(191,1)=0 ! W decay into dbar c',
'MDME(192,1)=0 ! W decay into dbar t',
'MDME(194,1)=0 ! W decay into sbar u',
'MDME(195,1)=0 ! W decay into sbar c',
'MDME(196,1)=0 ! W decay into sbar t',
'MDME(198,1)=0 ! W decay into bbar u',
'MDME(199,1)=0 ! W decay into bbar c',
'MDME(200,1)=0 ! W decay into bbar t',
'MDME(206,1)=0 ! W decay into e+ nu_e',
'MDME(207,1)=0 ! W decay into mu+ nu_mu',
'MDME(208,1)=0 ! W decay into tau+ nu_tau',
'MDME(210,1)=0 !Higgs decay into dd',
'MDME(211,1)=0 !Higgs decay into uu',
'MDME(212,1)=0 !Higgs decay into ss',
'MDME(213,1)=0 !Higgs decay into cc',
'MDME(214,1)=0 !Higgs decay into bb',
'MDME(215,1)=0 !Higgs decay into tt',
'MDME(216,1)=0 !Higgs decay into',
'MDME(217,1)=0 !Higgs decay into Higgs decay',
'MDME(218,1)=0 !Higgs decay into e nu e',
'MDME(219,1)=0 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=0 !Higgs decay into Higgs decay',
'MDME(222,1)=0 !Higgs decay into g g',
'MDME(223,1)=0 !Higgs decay into gam gam',
'MDME(224,1)=0 !Higgs decay into gam Z',
'MDME(225,1)=0 !Higgs decay into Z Z',
'MDME(226,1)=0 !Higgs decay into W W'
),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/EightTeV/PYTHIA6_Tauola_SM_H_2tau_wh_zh_tth_mH1140_lepdecay_8TeV_cff.py,v $'),
annotation = cms.untracked.string('PYTHIA6 WH/ZH/ttH, H->tautau mH=140GeV with TAUOLA at 8TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-c8b28d70dd1f4235246c4a027e80dcdcf397db6f@cern.ch"
] | sha1-c8b28d70dd1f4235246c4a027e80dcdcf397db6f@cern.ch |
c087d32f2facd8e4c3b24b1fd7edf2e63c4c486e | e28009b0a4584e8d128ed6fbd4ba84a1db11d1b9 | /1.Two Sum/Two Sum.py | 49443b25034a21dc94c2086ac0cca41d3b957794 | [] | no_license | jerrylance/LeetCode | 509d16e4285296167feb51a80d6c382b3833405e | 06ed3e9b27a3f1c0c517710d57fbbd794fd83e45 | refs/heads/master | 2020-12-02T23:10:27.382142 | 2020-08-02T02:03:54 | 2020-08-02T02:03:54 | 231,141,551 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | # LeetCode Solution
# Zeyu Liu
# 2019.12.31
# 1.Two Sum
# method 1
# 暴力
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
renums = []
n = len(nums)
for i in range(n):
for j in range(i+1,n):
if nums[i]+nums[j] == target:
renums.append(i)
renums.append(j)
return(renums)
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15],9))
# method 2
# 哈希(比较好)
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dictory = {}
for i, num in enumerate(nums):
if num in dictory:
return[dictory[num], i]
else:
dictory[target - num] = i
# enumerate()函数可以把一个List按照索引从小到大的顺序组成一个字典
# 速度最快
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15],9))
# method 3
# 切片
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
n = len(nums)
for i in range(n):
if target - nums[i] in nums[i+1:]:
return [i, nums.index(target - nums[i],i+1)]
# 这里return中的i+1,是index函数中的参数,意味着索引起始值从自己下一个数开始,如果不设置,那么如果有相等value时,如(3,3),6这种情况下会返回[0,0],而不是[0,1]
# 切片占用内存较小
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15,-2],9)) | [
"noreply@github.com"
] | jerrylance.noreply@github.com |
979baf5b9d39a2a3c69640d35c34d92478815b1f | 6dfc23ef65e5943712340ef2b4b648cc25ea1fad | /2018/04/12/Creating a Weather App in Django Using Python Requests [Part 1]/weather_app_django/the_weather/weather/views.py | 7a14b9b7253bd4e4585a206e82820683967d15e3 | [
"Unlicense"
] | permissive | PrettyPrinted/youtube_video_code | 6d265c910de18d780cdb99f7ea11b8b963929dc2 | 5654e5feba854d3b41b8dd75218e0221408e7831 | refs/heads/master | 2023-09-04T21:28:57.386174 | 2023-08-11T07:07:45 | 2023-08-11T07:07:45 | 186,743,986 | 698 | 2,347 | Unlicense | 2022-10-06T04:06:56 | 2019-05-15T03:40:45 | HTML | UTF-8 | Python | false | false | 857 | py | import requests
from django.shortcuts import render
from .models import City
from .forms import CityForm
def index(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=YOUR_API_KEY'
if request.method == 'POST':
form = CityForm(request.POST)
form.save()
form = CityForm()
cities = City.objects.all()
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
city_weather = {
'city' : city.name,
'temperature' : r['main']['temp'],
'description' : r['weather'][0]['description'],
'icon' : r['weather'][0]['icon'],
}
weather_data.append(city_weather)
context = {'weather_data' : weather_data, 'form' : form}
return render(request, 'weather/weather.html', context)
| [
"anthony@prettyprinted.com"
] | anthony@prettyprinted.com |
cecca4dec78b16568a5dd8a9f07ecf906268784e | 49a93012ce18b72abdb85aae1af09504fa039b6c | /20년 2월/1479.py | bcaca841af25e0c185a53b6c16c59829d9464ae7 | [] | no_license | JinleeJeong/Algorithm | ca3e755a29537f8d82ef770f174fd055242dd708 | a81257d7e4a54a00ac2c9a1dd324cc7eeb765240 | refs/heads/master | 2020-12-08T13:26:27.917450 | 2020-05-12T07:16:08 | 2020-05-12T07:16:08 | 232,992,516 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | n, m = map(int, input().split())
matrix = [[0]*m for i in range(n)]
count = 0
#0부터 3까지 0~4
for i in range(0, n+m-1): # 0 1 2 3 n+m-1인 이유는 2행 3열을 나타내기 때문에, 나열했을 때, i값과 같아야 함!!
for j in range(0, m): # 0 1 2
for k in range(0, n): # 0 1
if j+k == i:
count += 1
matrix[k][j] = count
for i in range(0, n):
for j in range(m-1, -1, -1):
print(matrix[i][j], end=' ')
print() | [
"pjjr0118@gmail.com"
] | pjjr0118@gmail.com |
8e0c705abcc33c690cb87bdeaeccab37c03a3755 | 52b43ba9fdba64b9a82f8042ebb19190d811b6de | /ck/incubator/cbench/setup.py | 42dbabf39607cae66eddc9c4792a66cbb0107282 | [
"Apache-2.0",
"CC-BY-SA-3.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cknowledge/ck | 4f18f6dd6c6278a8942d7288ae5c68007da190fa | ffcf31d18b14f55b8f20b7a9d078ebd61023ca7e | refs/heads/master | 2023-03-15T18:10:57.003077 | 2023-03-06T12:11:41 | 2023-03-06T12:11:41 | 256,570,425 | 0 | 0 | BSD-3-Clause | 2020-04-17T17:40:09 | 2020-04-17T17:40:08 | null | UTF-8 | Python | false | false | 3,205 | py | #
# Developer(s): Grigori Fursin
# Herve Guillou
#
import os
import sys
import imp
############################################################
from setuptools import find_packages, setup, convert_path
try:
from io import open
except ImportError:
pass
############################################################
# Version
version = imp.load_source(
'cbench.__init__', os.path.join('cbench', '__init__.py')).__version__
# Default portal
portal_url='https://cKnowledge.io'
############################################################
setup(
name='cbench',
author="Grigori Fursin",
author_email="Grigori.Fursin@cTuning.org",
version=version,
description="A cross-platform client to perform collaborative and reproducible benchmarking, optimization and co-design of software and hardware for emerging workloads (AI, ML, quantum, IoT) via the open cKnowledge.io portal",
license="Apache Software License (Apache 2.0)",
long_description=open(convert_path('./README.md'), encoding="utf-8").read(),
long_description_content_type="text/markdown",
url=portal_url,
python_requires=">=2.7",
packages=find_packages(exclude=["tests*", "docs*"]),
package_data={"cbench":['static/*']},
include_package_data=True,
install_requires=[
'requests',
'click>=7.0',
'ck',
'virtualenv'
],
entry_points={
"console_scripts":
[
"cr = cbench.main:cli",
"cb = cbench.main:cli",
"cbench = cbench.main:cli"
]
},
zip_safe=False,
keywords="reproducible benchmarking, customizable benchmarking, portable workflows, reusable computational components, reproducibility, collaborative experiments, automation, optimization, co-design, collective knowledge",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Environment :: Console",
"Environment :: Plugins",
"Environment :: Web Environment",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Topic :: System",
"Topic :: System :: Benchmark",
"Topic :: Education",
"Topic :: Utilities"
],
)
###########################################################
# Get release notes
import cbench.comm_min
r=cbench.comm_min.send({'url':portal_url+'/api/v1/?',
'action':'event',
'dict':{'type':'get-cbench-release-notes','version':version}})
notes=r.get('notes','')
if notes!='':
print ('*********************************************************************')
print ('Release notes:')
print ('')
print (notes)
print ('*********************************************************************')
| [
"Grigori.Fursin@cTuning.org"
] | Grigori.Fursin@cTuning.org |
0d8b8bb1d3429f2aa1d4bab4fa0f23598807ad86 | 7985715183962847e4717da8be46ce9415bd4a3f | /tests/parse/parse_bbox_input.py | 93ea3eff60dfad44a9f8853288306d6449478cf6 | [] | no_license | mgax/pywps-4 | 68bcd2e3398f2a312715e43135797c6906c1f7c9 | 03fd63d490d22b86dc1c14cb51eb6fb437812ca6 | refs/heads/master | 2020-04-10T13:30:09.145281 | 2013-06-03T20:22:23 | 2013-06-03T20:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | """Test parsing of BoundingBoxInput
"""
import os
import sys
from io import StringIO
from lxml import objectify
pywpsPath = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0],"..",".."))
sys.path.insert(0,pywpsPath)
sys.path.append(pywpsPath)
import unittest
from pywps.request.execute.bbox import BoundingBoxInput
class ParseBBoxInputTestCase(unittest.TestCase):
def setUp(self):
self.inpt = BoundingBoxInput("bbox")
def test_parse_bbox_input_GET(self):
# testing basic parsing
request="bbox=1,2,3,4"
self.inpt.parse_url(request)
self.assertEquals(1,self.inpt.get_value().left)
self.assertEquals(2,self.inpt.get_value().dimensions)
# parse crs
request="bbox=1,2,3,4,epsg:4326"
self.inpt.parse_url(request)
self.assertEquals("EPSG:4326",self.inpt.get_crs(1).getcode())
def test_parse_bbox_input_POST(self):
"""Parse bounding box input XML"""
req_str = StringIO("""<wps:Input xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1">
<ows:Identifier>bbox</ows:Identifier>
<ows:Title>Bounding box title</ows:Title>
<ows:BoundingBox xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/ows/1.1 owsCommon.xsd"
crs="urn:ogc:crs:EPSG:6.3:26986" dimensions="2">
<!-- Example. Primary editor: Arliss Whiteside. Last updated 2005- 01-25 -->
<ows:LowerCorner>189000 834000</ows:LowerCorner>
<ows:UpperCorner>285000 962000</ows:UpperCorner>
</ows:BoundingBox>
</wps:Input>""")
request = objectify.parse(req_str)
self.inpt.parse_xml(request.getroot())
self.assertEquals(189000,self.inpt.get_value(2).left)
self.assertEquals(962000,self.inpt.get_value(2).top)
self.assertEquals(26986,self.inpt.get_crs(2).code)
self.assertEquals(2,self.inpt.get_dimensions(2))
pass
def test_parse_bbox_wgs84_POST(self):
"""Parse bounding box input XML as WGS84"""
req_str = StringIO("""<wps:Input xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1">
<ows:Identifier>bbox</ows:Identifier>
<ows:Title>Bounding box WGS84 title</ows:Title>
<ows:WGS84BoundingBox xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/ows/1.1 owsCommon.xsd">
<!-- Example. Primary editor: Arliss Whiteside. Last updated 2004/10/13. -->
<ows:LowerCorner>-71.63 41.75</ows:LowerCorner>
<ows:UpperCorner>-70.78 42.90</ows:UpperCorner>
</ows:WGS84BoundingBox>
</wps:Input>""")
request = objectify.parse(req_str)
self.inpt.parse_xml(request.getroot())
self.assertEquals(-71.63,self.inpt.get_value(3).left)
self.assertEquals(42.90,self.inpt.get_value(3).top)
self.assertEquals("EPSG:4326",self.inpt.get_value(3).get_crs().getcode())
pass
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(ParseBBoxInputTestCase)
unittest.TextTestRunner(verbosity=4).run(suite)
| [
"jachym.cepicky@gmail.com"
] | jachym.cepicky@gmail.com |
fa3703b31c4348a9ad084e7286d2ee6b4c101c05 | 76adadc595cf0e27f03833036ecb9e7e9387c7d5 | /obstacle_avoidance_gazebo_and_tx2/Navigator_2D_gazebo/Pos2PosController.py | 551b7e6c1ece177a9ce71037ff31a5155b4b3d49 | [
"Apache-2.0"
] | permissive | hddxds/scripts_from_gi | 2fdef4dc747b6a269a1aa9df871afaca19bbe178 | afb8977c001b860335f9062464e600d9115ea56e | refs/heads/master | 2022-12-08T21:32:42.307594 | 2020-09-07T13:25:40 | 2020-09-07T13:25:40 | 293,529,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | #encoding=utf-8
def bangbang(error): # on 1 axis:
if error>THRES:
move(-error)
else:
pass
if error<-THRES:
move(error)
else:
pass
def pid():
pass
def
class Pos2PosController:
def __init__(self):
pass
def main_thread(self):
self.init_mavros()
self.main_loop()
def init_mavros(self):
while self.mavros_state == "OFFBOARD":
def set_mavros_mode(self, mode):
self.mavros_mode
def get
def main_loop(self):
while True:
#PID controller or bang-bang controller
get_target() # continuous x,y,z,yaw
get_current_pos()
err = xxx
do_action()
sleep(0.1)
def mav_move(self, position_x, position_y, position_z, relative_yaw=0):
self.set_status(status.GOING_TO_TARGET)
new_command = Command()
new_command.header.stamp = rospy.Time.now()
# use body frame
new_command.sub_mode = 0
# use command = MOVE according to AMO lab
new_command.command = 6
new_command.pos_sp[0] = position_x
new_command.pos_sp[1] = position_y
new_command.pos_sp[2] = position_z
new_command.vel_sp[0] = 0.0
new_command.vel_sp[1] = 0.0
new_command.yaw_sp = relative_yaw # TODO:fix this with 2step: 1:move;2.rotate(in absolute mode)
new_command.comid = self.cur_command_id
# self.task_id = new_command.comid
self.prev_command_id = self.cur_command_id
self.cur_command_id = self.cur_command_id + 1
self.mavros_control_pub.publish(new_command)
if self.reachTargetPosition(new_command):
return True
else:
return False
| [
"dongshuo@giai.tech"
] | dongshuo@giai.tech |
ca3978a5f8b151caf4f1debd0bc43cc60768672a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03971/s931383808.py | 524b86c116645356f3c2bf545938cf9268af51e8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | n,a,b = map(int , input().split())
s = input()
Judgment = 0
rank = 0
overseas=0
for num in range(n):
if s[num] == 'a' and rank < a+b:
print('Yes')
rank = rank+1
elif s[num] == 'b' and rank < a+b and overseas < b:
print('Yes')
rank = rank +1
overseas = overseas + 1
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c697e60d74d74ff44aacff3b5a9830ed04f0219a | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /youTube/codebasics/numpyTutorial/2_basicArrayOperations.py | 39a039c8f30e1b3b68b438c43594839bac54cbd0 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 364 | py | import numpy as np
a = np.array([5, 6, 9])
print('a: ', a)
print('a dimension: ', a.ndim)
print('access elemeent', a[1])
print('itemsize: ', a.itemsize)
print('############### Multidimensional ###############')
t = np.array([ [1, 2], [3, 4], [5, 6]])
print('t: ', t)
print('t dimension: ', t.ndim)
print('access element: ', t[2])
print('itemsize: ', t.itemsize)
| [
"sagarnikam123@gmail.com"
] | sagarnikam123@gmail.com |
855b7266b343bfa7d96dc95976e72742f81f2cd1 | 3c1ad0919924ed8d96ae5f9d9a10b97cfdf1ee38 | /topic_categories.py | 66576dd60242eef788d762326154e23626f8eccc | [] | no_license | emonson/CopyrightScripts | 4439ba584840e74ebdc5ab6083887e530757de64 | 862e5d2eb0af848647bf1cb2d95519071a00adc0 | refs/heads/master | 2020-05-18T15:37:15.926524 | 2017-03-16T14:51:08 | 2017-03-16T14:51:08 | 1,569,450 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,256 | py | #!/usr/bin/env python
"""
Script to move articles (e.g. de, de la, d'...) from before
last name to after first name in Hilary's FileMaker Pro database
through the ODBC connection
Takes in an Excel workbook with three sheets:
one for the list of articles
one for the column and table names (Table, ID name, Last name, First name)
one for name exceptions that should not be changed
31 Jan 2012 -- E Monson
"""
from openpyxl import load_workbook
import numpy as N
import os
from pymongo import Connection
# Make a connection to MongoDB
try:
db_conn = Connection()
# db_conn = Connection("emo2.trinity.duke.edu", 27017)
except ConnectionFailure:
print "couldn't connect: be sure that Mongo is running on localhost:27017"
sys.exit(1)
db = db_conn['fashion_ip']
def n_str(s, a):
"""Deals with None in first_name"""
if s is None:
return unicode(a.strip())
else:
return unicode(s.decode('utf8').strip() + ' ' + a.strip())
in_file = '/Users/emonson/Data/ArtMarkets/Katherine/mallet/nonstate_topic_keys_KDD_Edits.xlsx'
doc_topics_file = '/Users/emonson/Data/ArtMarkets/Katherine/mallet/nonstate_copy_200_doc_topics.txt'
# Load in Excel sheet with topic keys
wb = load_workbook(in_file)
sheet = wb.get_sheet_by_name("nonstate_copy_200_topic_keys.tx")
row_tuples = [tuple(xx.value for xx in yy) for yy in sheet.rows]
ntopics = len(sheet.rows)
subject_names = []
subject_vectors = []
for tt in row_tuples:
subs = tt[0] # subject string
top = tt[1] # topic index
if subs is not None:
# compound subjects separated by commas
subs_list = [xx.strip() for xx in subs.split(',')]
for sub in subs_list:
if sub not in subject_names:
subject_names.append(sub)
subject_vectors.append(N.zeros(ntopics))
idx = subject_names.index(sub)
subject_vectors[idx][top] = 1
# Read in document topics and calculate subject mixtures
file_ids = []
file_subjects = []
for jj, line in enumerate(open(doc_topics_file)):
# Header line
if jj == 0:
continue
ll = line.rstrip().split(' ')
# Get rid of document index
del ll[0]
# Grab the file ID
file_ids.append(os.path.splitext(os.path.basename(ll[0]))[0])
del ll[0]
# Generate the ordered array of topic weight values
# (initially ordered by weight rather than by topic)
weights = N.zeros(ntopics)
for ii in range(0,len(ll),2):
weights[int(ll[ii])] = float(ll[ii+1])
# Do a dot product to find the subject overlap
subject_weights = []
for ss in subject_vectors:
subject_weights.append(N.dot(ss,weights))
file_subjects.append(subject_weights)
print "Done computing subject vectors"
# Probably should have output MongoDB docs with _id as name of file
# to make sure it's really unique, but I think the Google Scholar file name
# is also a unique identifier.
# Clear out all subjects first so we don't get leftovers from another analysis
print "Clearing out old subjects"
db.docs.update({},{'$unset':{'subjects':1}})
# Add in new subject weights as name:weight pairs
print "Updating new subjects"
for name, vector in zip(file_ids, file_subjects):
sub_dict = dict(zip(subject_names, vector))
db.docs.update({'filename':name+'.html'},{'$set':{'subjects':sub_dict}}, upsert=False, multi=False)
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
4a7a97e46437fe38cad331c51922a344587cc1e3 | 3432efd194137e1d0cb05656eb547c9992229f02 | /django/pytest/test5/test5/settings.py | 19b1d23ae1de415200eae15591fceee0dd6403bd | [] | no_license | zhanganxia/other_code | 31747d7689ae1e91fcf3f9f758df130246e7d495 | 8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa | refs/heads/master | 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | """
Django settings for test5 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vp2*ll9@-#nv1q)1$lor5g+6xol4v2ql22&rq&lkgng&x1musf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booktest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# 'booktest.middleware.my_mid',
)
ROOT_URLCONF = 'test5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test2',
'USER': 'test',
'PASSWORD': 'mysql',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# STATIC_URL = '/abc/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
MEDIA_ROOT = os.path.join(BASE_DIR,"static/media")
| [
"kk@kk.rhel.cc"
] | kk@kk.rhel.cc |
942e8bf47622d8a6e758e7280fef2995844ceadc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02775/s986607651.py | c3546065a4b749bfa2bed51b4f2688ba876de31b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | s=input()
dp0=[0]*(len(s)+1)
dp1=[0]*(len(s)+1)
dp1[0]=1
for i in range(1,len(s)+1):
n=int(s[i-1:i])
dp0[i]=min(dp0[i-1]+n,dp1[i-1]+10-n)
dp1[i]=min(dp0[i-1]+(1 if n+1==10 else n+1),dp1[i-1]+10-n-1)
print(dp0[-1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3ec0809e0b6b7fba80e5361b491f4b5848f0fffb | 4012f290d83ae7f4c09d7440f26d2acd7e63efbe | /2705.py | cfa9d3e09805b3d8318fdf0725d78d909b7a3f91 | [] | no_license | jinaur/codeup | ffc2d0fdf73892c1f46d80021ad8f4c1293c9e2e | 5f75ace909e2b3151171932cc3ee9f3c49dd46d9 | refs/heads/master | 2023-04-15T07:42:06.244806 | 2021-04-25T13:59:42 | 2021-04-25T13:59:42 | 277,760,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | n = int(input())
l, t = list(map(int, input().split()))
a = list(map(int, input().split()))
r = 0
count = 0
i_count = 0
for i in range(0, n) :
if a[i] < i_count + count :
continue
else :
count = 0
for j in range(1, l+1) :
if i >= n-j :
break
if a[i] == a[i+j] :
i_count = a[i]
count += t
break
if count == 0 :
r += 10000
print(r) | [
"50763720+jinaur@users.noreply.github.com"
] | 50763720+jinaur@users.noreply.github.com |
2f8c934b78b3d3a3e7e7e52ba27a85a4e8cc7054 | 0760fb4901a75766921a205b55686d6d6f049b30 | /rllib/utils/tests/test_taskpool.py | de0fd4919e05832faff78e2270f197ead660328b | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ray-project/ray | a4bb6940b08b59a61ef0b8e755a52d8563a2f867 | edba68c3e7cf255d1d6479329f305adb7fa4c3ed | refs/heads/master | 2023-08-31T03:36:48.164405 | 2023-08-31T03:20:38 | 2023-08-31T03:20:38 | 71,932,349 | 29,482 | 5,669 | Apache-2.0 | 2023-09-14T21:48:14 | 2016-10-25T19:38:30 | Python | UTF-8 | Python | false | false | 5,151 | py | import unittest
from unittest.mock import patch
import ray
from ray.rllib.utils.actors import TaskPool
def createMockWorkerAndObjectRef(obj_ref):
return ({obj_ref: 1}, obj_ref)
class TaskPoolTest(unittest.TestCase):
@patch("ray.wait")
def test_completed_prefetch_yieldsAllComplete(self, rayWaitMock):
task1 = createMockWorkerAndObjectRef(1)
task2 = createMockWorkerAndObjectRef(2)
# Return the second task as complete and the first as pending
rayWaitMock.return_value = ([2], [1])
pool = TaskPool()
pool.add(*task1)
pool.add(*task2)
fetched = list(pool.completed_prefetch())
self.assertListEqual(fetched, [task2])
@patch("ray.wait")
def test_completed_prefetch_yieldsAllCompleteUpToDefaultLimit(self, rayWaitMock):
# Load the pool with 1000 tasks, mock them all as complete and then
# check that the first call to completed_prefetch only yields 999
# items and the second call yields the final one
pool = TaskPool()
for i in range(1000):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(1000)), [])
# For this test, we're only checking the object refs
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(999)))
# Finally, check the next iteration returns the final taks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, [999])
@patch("ray.wait")
def test_completed_prefetch_yieldsAllCompleteUpToSpecifiedLimit(self, rayWaitMock):
# Load the pool with 1000 tasks, mock them all as complete and then
# check that the first call to completed_prefetch only yield 999 items
# and the second call yields the final one
pool = TaskPool()
for i in range(1000):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(1000)), [])
# Verify that only the first 500 tasks are returned, this should leave
# some tasks in the _fetching deque for later
fetched = [pair[1] for pair in pool.completed_prefetch(max_yield=500)]
self.assertListEqual(fetched, list(range(500)))
# Finally, check the next iteration returns the remaining tasks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(500, 1000)))
@patch("ray.wait")
def test_completed_prefetch_yieldsRemainingIfIterationStops(self, rayWaitMock):
# Test for issue #7106
# In versions of Ray up to 0.8.1, if the pre-fetch generator failed to
# run to completion, then the TaskPool would fail to clear up already
# fetched tasks resulting in stale object refs being returned
pool = TaskPool()
for i in range(10):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(10)), [])
# This should fetch just the first item in the list
try:
for _ in pool.completed_prefetch():
# Simulate a worker failure returned by ray.get()
raise ray.exceptions.RayError
except ray.exceptions.RayError:
pass
# This fetch should return the remaining pre-fetched tasks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(1, 10)))
@patch("ray.wait")
def test_reset_workers_pendingFetchesFromFailedWorkersRemoved(self, rayWaitMock):
pool = TaskPool()
# We need to hold onto the tasks for this test so that we can fail a
# specific worker
tasks = []
for i in range(10):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
tasks.append(task)
# Simulate only some of the work being complete and fetch a couple of
# tasks in order to fill the fetching queue
rayWaitMock.return_value = ([0, 1, 2, 3, 4, 5], [6, 7, 8, 9])
fetched = [pair[1] for pair in pool.completed_prefetch(max_yield=2)]
# As we still have some pending tasks, we need to update the
# completion states to remove the completed tasks
rayWaitMock.return_value = ([], [6, 7, 8, 9])
pool.reset_workers(
[
tasks[0][0],
tasks[1][0],
tasks[2][0],
tasks[3][0],
# OH NO! WORKER 4 HAS CRASHED!
tasks[5][0],
tasks[6][0],
tasks[7][0],
tasks[8][0],
tasks[9][0],
]
)
# Fetch the remaining tasks which should already be in the _fetching
# queue
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, [2, 3, 5])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
3838bf7b8309dce816bfa0285dca3bdb5173f0a0 | 3a426d6cd831183fa00a22e426da44692c870f0c | /sidekick-seq/sidekick/seq/util.py | 670f4e19ab6178bea6335d246611acef2e759a05 | [
"MIT"
] | permissive | fabiommendes/sidekick | d399d57f13ae606a99623af22c63a32343d66592 | 993ae7b8496347ad9720d3ff11e10ab946c3a800 | refs/heads/master | 2021-07-09T15:48:04.113881 | 2021-06-28T16:44:21 | 2021-06-28T16:44:56 | 98,376,293 | 32 | 5 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import itertools
from ..typing import Index
INDEX_DOC = """index:
Either a number that starts an infinite enumeration or a sequence
of indexes that is passed as the first argument to func."""
def to_index_seq(index: Index):
"""
Convert the index argument of many functions to a proper sequence.
"""
if index is False or index is None:
return None
elif index is True:
return itertools.count(0)
elif isinstance(index, int):
return itertools.count(index)
else:
return index
def vargs(args):
"""
Conform function args to a sequence of sequences.
"""
n = len(args)
if n == 1:
return args[0]
elif n == 0:
raise TypeError("no arguments given")
else:
return args
| [
"fabiomacedomendes@gmail.com"
] | fabiomacedomendes@gmail.com |
e5de69aeef1912a706f33d248d0f5177a7659fe7 | 62c6884e9597d96a25d274515d6124c46daffec8 | /examples/reports/__init__.py | 0984649742c7f11520b17547eaebd313405fb49e | [
"MIT"
] | permissive | doncat99/zvt | 0f9305442af287e63f15de11cb2e2f6b5f9b3d05 | 831183bdf7a6d0fc3acd3ea51984df590078eec6 | refs/heads/master | 2023-03-22T13:35:17.277276 | 2021-03-10T14:02:08 | 2021-03-10T14:02:08 | 284,984,720 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | # -*- coding: utf-8 -*-
import datetime
import json
import os
from sqlalchemy import or_
from zvt.api.data_type import Region
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_pd_timestamp, now_time_str
from zvt.domain import FinanceFactor, BalanceSheet, IncomeStatement
def get_subscriber_emails():
emails_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'subscriber_emails.json'))
with open(emails_file) as f:
return json.load(f)
def risky_company(the_date=to_pd_timestamp(now_time_str()), income_yoy=-0.1, profit_yoy=-0.1, entity_ids=None):
codes = []
start_timestamp = to_pd_timestamp(the_date) - datetime.timedelta(130)
# 营收降,利润降,流动比率低,速动比率低
finance_filter = or_(FinanceFactor.op_income_growth_yoy < income_yoy,
FinanceFactor.net_profit_growth_yoy <= profit_yoy,
FinanceFactor.current_ratio < 0.7,
FinanceFactor.quick_ratio < 0.5)
df = FinanceFactor.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp, filters=[finance_filter],
columns=['code'])
if pd_is_not_null(df):
codes = codes + df.code.tolist()
# 高应收,高存货,高商誉
balance_filter = (BalanceSheet.accounts_receivable + BalanceSheet.inventories + BalanceSheet.goodwill) > BalanceSheet.total_equity
df = BalanceSheet.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp, filters=[balance_filter],
columns=['code'])
if pd_is_not_null(df):
codes = codes + df.code.tolist()
# 应收>利润*1/2
df1 = BalanceSheet.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp,
columns=[BalanceSheet.code, BalanceSheet.accounts_receivable])
if pd_is_not_null(df1):
df1.drop_duplicates(subset='code', keep='last', inplace=True)
df1 = df1.set_index('code', drop=True).sort_index()
df2 = IncomeStatement.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp,
columns=[IncomeStatement.code,
IncomeStatement.net_profit])
if pd_is_not_null(df2):
df2.drop_duplicates(subset='code', keep='last', inplace=True)
df2 = df2.set_index('code', drop=True).sort_index()
if pd_is_not_null(df1) and pd_is_not_null(df2):
codes = codes + df1[df1.accounts_receivable > df2.net_profit / 2].index.tolist()
return list(set(codes))
if __name__ == '__main__':
print(get_subscriber_emails())
| [
"doncat99@gmail.com"
] | doncat99@gmail.com |
9846bc35bfa3391fc47e58f9a2879889ab9fa42e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/17/usersdata/129/7024/submittedfiles/lecker.py | 41f8857dcf14a6238bfb49ef35648401bdecabca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('Digite o valor: ')
b=input('Digite o valor: ')
c=input('Digite o valor: ')
d=input('Digite o valor: ')
if a>b and a<b>c:
print ('N')
elif a>b and b<c>d :
print ('N')
elif a>b and c<d:
print ('N')
elif a<b>c and b<c>d:
print ('N')
elif a<b>c and c<d:
print ('N')
elif b<c>d and c<d:
print ('N')
else:
print ('S') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5e28c0093f7c78ce2b10fae07e900c56f374c650 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/UpdateEdgeInstanceRequest.py | c43811f3b4e4b5c6c68fbb0615a7607275f92b02 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,204 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class UpdateEdgeInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'UpdateEdgeInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BizEnable(self):
return self.get_query_params().get('BizEnable')
def set_BizEnable(self,BizEnable):
self.add_query_param('BizEnable',BizEnable)
def get_Spec(self):
return self.get_query_params().get('Spec')
def set_Spec(self,Spec):
self.add_query_param('Spec',Spec)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
eb25e9c4b639d7a0427e13c3c6e14a6bbfec3069 | dac12c9178b13d60f401c4febff5569af8aa2719 | /cvat/apps/iam/apps.py | 4f6979b7a7c54e461d2602f06d537cb0802cec40 | [
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | opencv/cvat | 39dc66ca20f972ba40b79c44d7ce43590dc0b0b5 | 899c9fd75146744def061efd7ab1b1c6c9f6942f | refs/heads/develop | 2023-08-19T04:27:56.974498 | 2023-08-18T09:58:25 | 2023-08-18T09:58:25 | 139,156,354 | 6,558 | 1,887 | MIT | 2023-09-14T12:44:39 | 2018-06-29T14:02:45 | TypeScript | UTF-8 | Python | false | false | 368 | py | from distutils.util import strtobool
import os
from django.apps import AppConfig
from .utils import create_opa_bundle
class IAMConfig(AppConfig):
name = 'cvat.apps.iam'
def ready(self):
from .signals import register_signals
register_signals(self)
if strtobool(os.environ.get("IAM_OPA_BUNDLE", '0')):
create_opa_bundle()
| [
"noreply@github.com"
] | opencv.noreply@github.com |
780ce0fba2444e5a07ea67d6ed14d86a19ea2e4d | ec181b840d3462eb43de5682adde38fa3c0ab570 | /towhee/compiler/backends/__init__.py | 03b640a4d7eb5f6b90d404c245eb933791a56562 | [
"Apache-2.0"
] | permissive | towhee-io/towhee-compiler | 37fc26ec87fc20710d2e1b653b2d83fad0dfc63f | e9a724169ae96d3ae73db732ae3d8b4e9e3f9b5c | refs/heads/main | 2023-05-23T07:59:11.217347 | 2022-09-13T11:32:23 | 2022-09-13T11:32:23 | 514,104,716 | 6 | 6 | Apache-2.0 | 2022-09-13T11:32:24 | 2022-07-15T02:10:13 | Python | UTF-8 | Python | false | false | 416 | py | from typing import Callable
from .backend_compiler import BackendCompiler
from .nebullvm_compiler import NebullvmCompiler
def resolve(name: str) -> Callable:
if name in BackendCompiler.backends:
return BackendCompiler.backends[name]()
from torchdynamo.optimizations.backends import BACKENDS
return BACKENDS[name]
__all__ = [
"BackendCompiler",
"NebullvmCompiler",
"resolve",
]
| [
"shiyu.chen@zilliz.com"
] | shiyu.chen@zilliz.com |
85d78ffc916b78fe38f01989bed8b03fcf69acd7 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/core/search_space/__init__.py | 62c21622ed68be03022e38e7e52bef05102fab74 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 503 | py | from vega.core.search_space.ext_hyper_parameter import IntHyperParameter, FloatHyperParameter, \
FloatExpHyperParameter, IntExpHyperParameter, CatHyperParameter, BoolCatHyperParameter, \
AdjacencyListHyperParameter, BinaryCodeHyperParameter, HalfCodeHyperParameter
from .search_space import SearchSpace, SpaceSet
from .condition_types import ConditionTypes, CONDITION_TYPE_MAP
from .ext_conditions import EqualCondition, NotEqualCondition, InCondition
from .range_generator import AdjacencyList
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
20367adb74f550000ac708a28b1b02c267317161 | ac4ba3868bb87c995772a293360e7cc4e38a3ccc | /one_model_chooser_svm.py | 556e321e0d5d3401ea464633f255dce33b9035b7 | [] | no_license | greggoren/robustness | 8d503370f72c91882e205cd7af00d727997a7906 | d9328bb86d66e4767b6d998125c8ef5a5c540c5e | refs/heads/master | 2021-01-15T13:33:49.425635 | 2018-01-29T20:47:54 | 2018-01-29T20:47:54 | 99,676,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | import preprocess_clueweb as p
import single_model_handler as mh
import evaluator as e
import params
import sys
if __name__=="__main__":
preprocess = p.preprocess()
X,y,queries=preprocess.retrieve_data_from_file(params.data_set_file,params.normalized)
sys.stdout.flush()
number_of_queries = len(set(queries))
evaluator = e.eval()
evaluator.create_index_to_doc_name_dict()
evaluator.remove_score_file_from_last_run()
sys.stdout.flush()
train,validation = preprocess.create_test_train_split_cluweb(queries)
sys.stdout.flush()
X_i,y_i=preprocess.create_data_set(X[train], y[train], queries[train])
sys.stdout.flush()
C_array = [0.1,0.01,0.001]
single_model_handler = mh.single_model_handler(C_array)
single_model_handler.fit_model_on_train_set_and_choose_best_for_competition(X,y,X_i,y_i,validation,queries,evaluator,preprocess)
print("learning is finished")
| [
"grggoren@gmail.com"
] | grggoren@gmail.com |
fc39b2c4d90ba07db630ff735ea9f1d228fce7d5 | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /python开发技术详解/源文件/09/9.1.6/assert.py | edda342a1eec64a32b4fae7c6342fe51a3b140d9 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | GB18030 | Python | false | false | 284 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# assert判断逻辑表达式
t = ("hello",)
assert len(t) >= 1
#t = ("hello")
#assert len(t) == 1
# 带message的assert语句
month = 13
assert 1 <= month <= 12, "month errors"
#assert month >= 1 and month <= 12, "month errors" | [
"evan886@gmail.com"
] | evan886@gmail.com |
9ab5da089f7593a59cf5768ebba2bee45058e0c4 | 9b57629a451471c0d38fbc1d29373938e4856ed5 | /pyreto/renderer.py | 5d2e46bc1dd781d7dcc392699a78034a020e2287 | [
"Apache-2.0"
] | permissive | rwl/pylon | 0b9d635f51be6fdf20dbf77b736e3b2f87e76a69 | 916514255db1ae1661406f0283df756baf960d14 | refs/heads/master | 2021-01-01T18:34:08.453649 | 2015-07-07T17:38:09 | 2015-07-07T17:38:09 | 107,383 | 15 | 12 | null | 2015-07-07T17:38:09 | 2009-01-14T16:18:21 | Python | UTF-8 | Python | false | false | 7,339 | py | #------------------------------------------------------------------------------
# Copyright (C) 2007-2010 Richard Lincoln
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
""" Defines a renderer that is executed as a concurrent thread and displays
aspects of the environment.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import time
import threading
import matplotlib
matplotlib.use('WXAgg')
import numpy
import pylab
from pybrain.rl.environments.renderer import Renderer
#------------------------------------------------------------------------------
# "ExperimentRenderer" class:
#------------------------------------------------------------------------------
class ExperimentRenderer(Renderer):
""" Defines a renderer that displays aspects of a market experiment.
"""
# def __init__(self):
# """ Constructs a new ExperimentRenderer.
# """
# super(ExperimentRenderer, self).__init__()
#--------------------------------------------------------------------------
# "Renderer" interface:
#--------------------------------------------------------------------------
def updateData(self, data):
""" Updates the data used by the renderer.
"""
# pylab.ion()
fig = pylab.figure(1)
n_agent = len(data)
idx = 1
for i, adata in enumerate(data):
saxis = fig.add_subplot(3, n_agent, i + 1)
saxis.plot(adata[0])
idx += 1
aaxis = fig.add_subplot(3, n_agent, i + 1 + n_agent)
aaxis.plot(adata[1])
idx += 1
raxis = fig.add_subplot(3, n_agent, i + 1 + (n_agent * 2))
raxis.plot(adata[2])
idx += 1
pylab.show()
# self._render()
def start(self):
""" Wrapper for Thread.start().
"""
self.draw_plot()
super(ExperimentRenderer, self).start()
def _render(self):
""" Calls the render methods.
"""
# self.reward_line.set_ydata(self.reward_data)
def stop(self):
""" Stops the current rendering thread.
"""
pass
#--------------------------------------------------------------------------
# "ExperimentRenderer" interface:
#--------------------------------------------------------------------------
def draw_plot(self):
""" Initialises plots of the environment.
"""
pylab.ion()
fig = pylab.figure(1)
reward_axis = fig.add_subplot(1, 1, 1)
reward_lines = reward_axis.plot([0.0, 1.0], [0.0, 1.0], "mx-")
# self.reward_line = reward_lines[0]
pylab.draw()
#------------------------------------------------------------------------------
# "ParticipantRenderer" class:
#------------------------------------------------------------------------------
class ParticipantRenderer(Renderer):
""" Defines a renderer that displays aspects of a market participant's
environment.
"""
def __init__(self, outdim, indim, intermax=1000):
""" Initialises a new ParticipantRenderer instance.
"""
super(ParticipantRenderer, self).__init__()
# self.dataLock = threading.Lock()
self.stopRequest = False
self.updates = 0
self.state_data = numpy.zeros((outdim, intermax), float)
self.action_data = numpy.zeros((indim, intermax), float)
self.reward_data = numpy.zeros((1, intermax), float)
self.state_lines = []
self.action_lines = []
self.reward_line = None
#--------------------------------------------------------------------------
# "Renderer" interface:
#--------------------------------------------------------------------------
def updateData(self, state_data, action_data, reward_data):
""" Updates the data used by the renderer.
"""
# self.dataLock.acquire()
self.state_data[:, self.updates] = state_data
self.action_data[:, self.updates] = action_data
self.reward_data[0, self.updates] = reward_data
self.updates += 1
self._render()
# self.dataLock.release()
def start(self):
""" Wrapper for Thread.start().
"""
self.draw_plot()
super(ParticipantRenderer, self).start()
# def stop(self):
# """ Stops the current thread.
# """
# pass
# self.dataLock.acquire()
# self.stopRequest = True
# self.dataLock.release()
#--------------------------------------------------------------------------
# "ParticipantRenderer" interface:
#--------------------------------------------------------------------------
def draw_plot(self):
""" Initialises plots of the environment.
"""
pylab.ion()
fig = pylab.figure(1)
# State plot.
# state_axis = fig.add_subplot(3, 1, 1) # numrows, numcols, fignum
# state_axis.title = 'State'
# state_axis.xlabel = 'Time (hours)'
# state_axis.grid = True
# for i in range(self.state_data.shape[0]):
# lines = state_axis.plot(self.state_data[i, 0], "g+-")
# self.state_lines.append(lines[0])
# Action plot.
# action_axis = fig.add_subplot(3, 1, 2)
# action_axis.title = 'Action'
# action_axis.xlabel = 'Time (hours)'
# action_axis.ylabel = 'Price ($/MWh)'
# action_axis.grid = True
# for i in range(self.action_data.shape[0]):
# lines = action_axis.plot(self.action_data[i, 0], "ro-")
# self.action_lines.append(lines[0])
# Reward plot.
reward_axis = fig.add_subplot(3, 1, 3)
# reward_axis.title = 'Reward'
# reward_axis.xlabel = 'Time (hours)'
# reward_axis.ylabel = 'Earnings ($)'
# reward_axis.grid(True)
reward_lines = reward_axis.plot(self.reward_data[0, 0], [0], "mx-")
self.reward_line = reward_lines[0]
pylab.draw()
def _render(self):
""" Calls the render methods.
"""
# while not self.stopRequest:
# self.dataLock.acquire()
# for i, line in enumerate(self.state_lines):
# ydata = self.state_data[i, :]
# line.set_ydata(ydata)
#
# for j, line in enumerate(self.action_lines):
# ydata = self.action_data[j, :]
# line.set_ydata(ydata)
self.reward_line.set_ydata(self.reward_data)
# self.dataLock.release()
# time.sleep(0.05)
# self.stopRequest = False
# EOF -------------------------------------------------------------------------
| [
"r.w.lincoln@gmail.com"
] | r.w.lincoln@gmail.com |
7bfc33fa5570e49a7df08895a50f1226f8bcf524 | e9be8f5c0b4468d29c798a67a56aa15be504a723 | /home/migrations/0003_customtext_name.py | a65a9fbb96ed9638e8ef5a752e341131475ddd4f | [] | no_license | crowdbotics-apps/test-23226 | a73aa026e079de4c3419d85d214dbce66b9137fc | c2abc5faac4eba99f2c60a3c2fd2b382700266f3 | refs/heads/master | 2023-01-21T00:51:18.149767 | 2020-12-07T02:29:48 | 2020-12-07T02:29:48 | 319,181,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.2.17 on 2020-12-07 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="customtext",
name="name",
field=models.TextField(blank=True, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
04341bccd4121da621985612485cabb3d56736b4 | 4235c0baec2f75a9ab74ca2b88f9d05559db7969 | /posts/migrations/0010_auto_20190422_1123.py | 98861078292488d5c2a5d8ba7eb6a539183ee499 | [] | no_license | yooseungju/Fake-Instagram | 364fc31337449274e0373cce86ebf171cb7c3271 | d30767aa4f0705150d4fb430def92ae514565969 | refs/heads/master | 2020-05-07T21:44:51.621461 | 2019-04-22T08:36:59 | 2019-04-22T08:36:59 | 180,916,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Generated by Django 2.1.8 on 2019-04-22 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20190416_1413'),
]
operations = [
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, unique=True)),
],
),
migrations.AddField(
model_name='post',
name='hashtags',
field=models.ManyToManyField(to='posts.Hashtag'),
),
]
| [
"seung338989@gmail.com"
] | seung338989@gmail.com |
165df46c501326550a09e354ab5dd3e63c1af15e | f10d45aecbfccb3f469ab0c4ae55fc0f256c9004 | /Functions/compile.py | 666455168649a3d7f56313f12ddcc095a7f07e45 | [] | no_license | Do-code-ing/Python_Built-ins | c34c1cea19a2cef80ab3a16d050e8825af0feb59 | 03b2f277acde4fce00bb521e3a0b8c0469b39879 | refs/heads/master | 2023-07-29T15:30:00.693005 | 2021-09-04T18:48:18 | 2021-09-04T18:48:18 | 354,467,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | # compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
# source를 편집하거나 번역하여 code나 AST object로 만든다.
# eval, exec을 호출할 때, 실제 처리과정에서 과부하를 줄 수 있기에 compile을 사용
# source : 문자열이나, bytes, AST object여야 한다.
# filename : code를 읽은 file 이름을 제공하면 된다.
# code가 문자열이라면 <string>을 제공하면 된다.
# mode : compile하려는 code 종류를 지정하면 된다.
# 예를 들어, source가 문장의 sequence로 구성되어 있다면 'exec',
# 단일 표현식으로 구성되어 있다면 'eval',
# 단일 대화형으로 구성되어 있다면 'single'.
# 마지막 "single"의 경우, 표현식이 None을 제외한 값을 평가하여 print된다.
# flags, dont_inherit : source를 compile할 때 'future statements'가 어떠한 영향을 미치는지 제어한다.
# 기본값 = 0
# optimize : compiler의 최적화 수준을 지정한다.
# 기본값 = -1
# 조금 더 자세한 내용을 알고 싶다면 아래의 주소에 들어가 보도록 하자.
# https://docs.python.org/ko/3/library/functions.html#compile
# https://www.programiz.com/python-programming/methods/built-in/compile
import ast
statement_a = "int_a + 3"
statement_b = "result = int_a + 3"
int_a = 10
statement_c = open(".\\study_builtins\\compile_doc.py")
filename_a = "<string>"
# 기본값으로 compile
print(compile(statement_a, filename_a, "eval"))
# <code object <module> at 0x00000261660065B0, file "<string>", line 1>
print(compile(statement_b, "statement_b", "single"))
# <code object <module> at 0x00000261660065B0, file "statement_b", line 1>
print(compile(statement_c.read(), "formula_doc", "exec"))
# <code object <module> at 0x00000261660065B0, file "formula_doc", line 1>
# compiler options과 future features, optimize를 넣어 compile
print(compile(statement_a, filename_a, "exec", ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, 0, 2))
# <code object <module> at 0x00000261660065B0, file "<string>", line 1>
# 참고
# 'single' 또는 'eval' mode로 여러 줄 코드를 가진 문자열을 컴파일할 때,
# 적어도 하나의 개행 문자로 입력을 끝내야 한다.
# 이것은 code 모듈에서 문장이 불완전한지 완전한지를 쉽게 탐지하게 하기 위함이다.
# 경고
# 파이썬의 AST compiler에서 스택 깊이 제한으로 인해,
# AST object로 compile할 때 충분히 크고 복잡한 문자열로 인해 python interpreter가 crash를 일으킬 수 있다. | [
"zxcvbn658@naver.com"
] | zxcvbn658@naver.com |
253b7329e6fc95b64e65cbc96b5cd33556a88bc3 | 9c315e3762961668a1fe58ad811ae87c5fbf7539 | /apertium-tools/getBookNames.py | f309abe2db7601f668b301f996622c39c83b04b0 | [] | no_license | frankier/apertium | f2b893115c413203b1194e5c0d4feb0adf2b1b3e | d3f5515bf2455f3046314a62ea564457bcf504b8 | refs/heads/gnulib | 2021-01-20T21:00:53.139135 | 2016-05-27T17:30:01 | 2016-05-27T17:30:01 | 59,847,975 | 0 | 1 | null | 2016-07-07T12:39:01 | 2016-05-27T16:21:14 | HTML | UTF-8 | Python | false | false | 1,248 | py | #!/usr/bin/env python3
import os
import sys
import pprint
import argparse
#!/usr/bin/env python
#langs = ["xal", "chv", "tat", "kaz", "kaz2", "alt", "bua", "kir", "tgk", "tyv", "kaa", "gag", "kum", "aze", "kjh"] #POSSIBLE languages, kaz2 is a second kaz translation of the Bible
def todict(langs):
langData = {} #this is a dictionary
for lang in langs:
langData[lang] = {}
with open("%s.dat" % lang) as databaseFile:
for line in databaseFile :
if line.strip():
(english, target) = line.split(',')
langData[lang][english] = target.strip()
return langData
def main():
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This script generates a dictionary from a .dat file in trunk/apertium-tools')
parser.add_argument('datfile', metavar='i', help='Languages (3 letter iso code) separated by a comma, make sure the corresponding .dat files exist')
args = vars(parser.parse_args())
if "," in args['datfile']:
langs=args['datfile'].split(",")
else:
langs=[args['datfile']]
langDict=todict(langs)
pprint.pprint(langDict)
main()
| [
"unhammer@72bbbca6-d526-0410-a7d9-f06f51895060"
] | unhammer@72bbbca6-d526-0410-a7d9-f06f51895060 |
0137356885a920c1fd4914505128d497798afb4f | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/796.rotate-string.py | 72980badbb5ee3be57bade30782a99641d9abe38 | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | #
# @lc app=leetcode id=796 lang=python3
#
# [796] Rotate String
#
# https://leetcode.com/problems/rotate-string/description/
#
# algorithms
# Easy (49.30%)
# Total Accepted: 47.2K
# Total Submissions: 95.8K
# Testcase Example: '"abcde"\n"cdeab"'
#
# We are given two strings, A and B.
#
# A shift on A consists of taking string A and moving the leftmost character to
# the rightmost position. For example, if A = 'abcde', then it will be 'bcdea'
# after one shift on A. Return True if and only if A can become B after some
# number of shifts on A.
#
#
# Example 1:
# Input: A = 'abcde', B = 'cdeab'
# Output: true
#
# Example 2:
# Input: A = 'abcde', B = 'abced'
# Output: false
#
#
# Note:
#
#
# A and B will have length at most 100.
#
#
#
class Solution:
def rotateString(self, A: str, B: str) -> bool:
| [
"sodgso262@gmail.com"
] | sodgso262@gmail.com |
eb32725b060733641d92539f1dad81793d9e1b55 | e7bba3dd662bf2778c36a406f72ee93b2ea05e11 | /CardinalityEstimationTestbed/Synthetic/deepdb/deepdb_job_ranges/ensemble_compilation/physical_db.py | 49aaa09f09166393579b33846891d7c049f6f700 | [
"MIT"
] | permissive | TsinghuaDatabaseGroup/AI4DBCode | 37e45b176bc94e77fe250ea45f0ad7b9054c7f11 | a8989bfadcf551ee1dee2aec57ef6b2709c9f85d | refs/heads/master | 2023-07-07T05:42:15.590000 | 2023-07-04T01:04:15 | 2023-07-04T01:04:15 | 217,175,047 | 53 | 35 | null | 2023-06-20T13:00:17 | 2019-10-24T00:03:14 | Scala | UTF-8 | Python | false | false | 4,011 | py | import pandas as pd
import psycopg2
from ensemble_compilation.utils import gen_full_join_query, print_conditions
class DBConnection:
def __init__(self, db='postgres', db_user='postgres', db_host="/var/run/postgresql", db_password="jintao",
db_port="5432"):
self.db_user = db_user
self.db_password = db_password
self.db_host = db_host
self.db_port = db_port
self.db = db
def vacuum(self):
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
query = "VACUUM"
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.set_isolation_level(old_isolation_level)
def get_dataframe(self, sql):
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
return pd.read_sql(sql, connection)
def submit_query(self, sql):
"""Submits query and ignores result."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
def get_result(self, sql):
"""Fetches exactly one row of result set."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
record = cursor.fetchone()
result = record[0]
if connection:
cursor.close()
connection.close()
return result
def get_result_set(self, sql, return_columns=False):
"""Fetches all rows of result set."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
columns = [desc[0] for desc in cursor.description]
if connection:
cursor.close()
connection.close()
if return_columns:
return rows, columns
return rows
class TrueCardinalityEstimator:
"""Queries the database to return true cardinalities."""
def __init__(self, schema_graph, db_connection):
self.schema_graph = schema_graph
self.db_connection = db_connection
def true_cardinality(self, query):
full_join_query = gen_full_join_query(self.schema_graph, query.relationship_set, query.table_set, "JOIN")
where_cond = print_conditions(query.conditions, seperator='AND')
if where_cond != "":
where_cond = "WHERE " + where_cond
sql_query = full_join_query.format("COUNT(*)", where_cond)
cardinality = self.db_connection.get_result(sql_query)
return sql_query, cardinality
| [
"zhouxuan19@mails.tsinghua.edu.cn"
] | zhouxuan19@mails.tsinghua.edu.cn |
9edf384e79dea79baa8bb61cf6401ef072f974a0 | a03b30ee77b49e19a72b647e984b98f878c2847a | /Anaconda-files/Programs_13c.py | 86e9478978c292b6db68c6833bab67a5bbdf74f5 | [
"BSD-2-Clause"
] | permissive | SSalaPla/dynamical-systems-with-applications-using-python | d47f46dfbe7195d2446cdee7f874cc3e4a5ab90a | c80582ae3559230d12e2aee15f94c465e367fdda | refs/heads/master | 2021-05-03T16:00:31.561907 | 2018-02-05T15:16:13 | 2018-02-05T15:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # Programs_13c: The Leslie matrix. See Example 4.
# Compute the population distribution after 50 years.
# Determine the eigenvalues and eigenvectors of a Leslie matrix.
import numpy as np
import numpy.linalg as LA
L=np.array([[0,3,1],[0.3,0,0],[0,0.5,0]])
X0=np.array([[1000],[2000],[3000]])
X_50=np.dot(LA.matrix_power(L,50),X0)
X_50=X_50.round()
print('X(50)=',X_50)
dL,VL=LA.eig(L)
print('Eigenvalues=',dL)
print('Eigenvectors=',VL) | [
"samuel.dibella@gmail.com"
] | samuel.dibella@gmail.com |
82958dfb2f0172c53857321c5004392e3ea3c047 | c5d68f58c9523257a8b41954553f5cff2cd5f487 | /Secao_06_Lista_Ex_62e/ex_56.py | bd2bccb3ca2646da55f1e255bafaab409a0bf57a | [] | no_license | SouzaCadu/guppe | 04bfcde82d4404eb9ec795006c6931ba07dc72b6 | 1f8a672230c5c27712f522e1e34516591c012453 | refs/heads/master | 2023-03-13T01:32:51.019871 | 2021-02-25T17:02:59 | 2021-02-25T17:02:59 | 320,908,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | """
faça um programa que some todos os números primos abaixo de 2MM
Observações
1) Existe um teorema na matemática que diz que se um número não possui divisores até sua raiz quadrada então ele é primo, por isso o num**0.5
2) Este código supõe que o número n inserido será maior que 0, por isso a soma já começa = 2, uma vez que 2 é primo. E só passa a executar a verificação se n>1, caso contrário é impresso apenas 2.
"""
contador = 1
num = 3
soma = 2
referencia = 2000000
while num < referencia:
primo = True
verificador = 3
while verificador <= num ** 0.5 and primo:
if num % verificador == 0:
primo = False
verificador += 2
if primo:
contador += 1
soma = soma + num
num += 2
# print(f"{num}", end=" ")
print(f'\n')
print(f"A soma dos {num} números primos é {soma}.")
| [
"cadu.souza81@gmail.com"
] | cadu.souza81@gmail.com |
016beda8449388bcc4a78f821ef89a6b1d737a78 | aee00a21081bb0d6a2ed96218d650663294dd0dc | /pyLibrary/queries/es_query_aggop.py | 186257e87b901ca55ba06d4d1801ea4b85e926b0 | [] | no_license | klahnakoski/Datazilla2ElasticSearch | 8c386b0ed3f52412981cae852e7ecb66f17b43cb | 9675cbdb5fb0428a247f38e7088a8f42f19f3e20 | refs/heads/master | 2021-01-22T09:33:40.958865 | 2014-10-20T20:19:35 | 2014-10-20T20:19:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from ..collections.matrix import Matrix
from ..collections import AND
from ..structs.wraps import listwrap
from ..struct import unwrap
from ..queries import es_query_util
from ..queries.es_query_util import aggregates, fix_es_stats, buildESQuery
from ..queries.filters import simplify
from ..queries import MVEL
from ..queries.cube import Cube
def is_aggop(query):
if not query.edges:
return True
return False
def es_aggop(es, mvel, query):
select = listwrap(query.select)
esQuery = buildESQuery(query)
isSimple = AND(aggregates[s.aggregate] == "count" for s in select)
if isSimple:
return es_countop(es, query) # SIMPLE, USE TERMS FACET INSTEAD
value2facet = dict() # ONLY ONE FACET NEEDED PER
name2facet = dict() # MAP name TO FACET WITH STATS
for s in select:
if s.value not in value2facet:
if MVEL.isKeyword(s.value):
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"field": s.value
},
"facet_filter": simplify(query.where)
}
else:
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"script": mvel.compile_expression(s.value, query)
},
"facet_filter": simplify(query.where)
}
value2facet[s.value] = s.name
name2facet[s.name] = value2facet[s.value]
data = es_query_util.post(es, esQuery, query.limit)
matricies = {s.name: Matrix(value=fix_es_stats(unwrap(data.facets)[s.name])[aggregates[s.aggregate]]) for s in select}
cube = Cube(query.select, [], matricies)
cube.frum = query
return cube
def es_countop(es, mvel, query):
"""
RETURN SINGLE COUNT
"""
select = listwrap(query.select)
esQuery = buildESQuery(query)
for s in select:
if MVEL.isKeyword(s.value):
esQuery.facets[s.name] = {
"terms": {
"field": s.value,
"size": query.limit,
},
"facet_filter":{"exists":{"field":s.value}}
}
else:
# COMPLICATED value IS PROBABLY A SCRIPT, USE IT
esQuery.facets[s.name] = {
"terms": {
"script_field": mvel.compile_expression(s.value, query),
"size": 200000
}
}
data = es_query_util.post(es, esQuery, query.limit)
matricies = {}
for s in select:
matricies[s.name] = Matrix(value=data.hits.facets[s.name].total)
cube = Cube(query.select, query.edges, matricies)
cube.frum = query
return cube
| [
"klahnakoski@mozilla.com"
] | klahnakoski@mozilla.com |
a047aee51e0337ee44f1cded42f2c410b3866aad | 929fba6e9f74cc109d98efdc0f32fa4cadbd4def | /Mathematics/1161.py | ceab72e57e789f3cd59716a281a2cb5cfb4428e1 | [
"MIT"
] | permissive | LorranSutter/URI-Online-Judge | 2be9d95a27e52fad6bb1ae189d9bb39c72a43228 | 01822b6124a535aeecbdbdad616b61f2d55dd8d4 | refs/heads/master | 2023-01-28T00:15:34.569999 | 2023-01-24T03:08:38 | 2023-01-24T03:08:38 | 141,393,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | fats = {
'0':1,
'1':1,
'2':2,
'3':6,
'4':24,
'5':120,
'6':720,
'7':5040,
'8':40320,
'9':362880,
'10':3628800,
'11':39916800,
'12':479001600,
'13':6227020800,
'14':87178291200,
'15':1307674368000,
'16':20922789888000,
'17':355687428096000,
'18':6402373705728000,
'19':121645100408832000,
'20':2432902008176640000
}
while True:
try:
M, N = input().split()
print(fats[M] + fats[N])
except:
break
| [
"lorransutter1836@gmail.com"
] | lorransutter1836@gmail.com |
4b874967bf34bdc5d672c7193fca61f4f6696d35 | 854394f4148e7bee8cd3c6d2a01e97ffbf772103 | /0x02-python-import_modules/2-args.py | a3ea363e7986241d9426c33948fa9622f6fa3db5 | [] | no_license | garethbrickman/holbertonschool-higher_level_programming | cb3ccb864102d62af72b5e86d53638bd899bfabb | 05d65c6c89008cb70cbc1ada5bb9c8ed7a2733e9 | refs/heads/master | 2021-07-10T08:32:23.397388 | 2020-10-15T18:40:55 | 2020-10-15T18:40:55 | 207,379,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!/usr/bin/python3
if __name__ == '__main__':
from sys import argv
argc = len(argv)
if argc < 2:
print("0 arguments.")
for i in range(1, argc):
if argc < 3:
print("1 argument:")
print("1: {}".format(argv[1]))
else:
if i == 1:
print("{} arguments:".format(argc-1))
print("{}: {}".format(i, argv[i]))
| [
"977@holbertonschool.com"
] | 977@holbertonschool.com |
7065e46a10f60850b0d2b2f007bfe93dcacb0bec | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1318.py | e3cf5015dc7050102c3190364333952ee4a6a424 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main(inp):
D, N = inp.split()
D, N = int(D), int(N)
horses = []
for i in range(N):
K, S = input().split()
K, S = int(K), int(S)
horses.append((K, S))
slowest_time = 0
for horse in horses:
time = (D - horse[0]) / horse[1]
if time > slowest_time:
slowest_time = time
return "{0:.6f}".format(D / slowest_time)
if __name__ == '__main__':
testcases = int(input())
for case in range(testcases):
inp = input()
print("Case #{}: {}".format(case+1, main(inp)))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e7ef237a242c1df8dd3125dd680dfc3a251e39e4 | 3fda3ff2e9334433554b6cf923506f428d9e9366 | /hipeac/migrations/0018_auto_20190131_1245.py | 05e389d20c7a24fa3c385a730a8607cce3faedd6 | [
"MIT"
] | permissive | CreativeOthman/hipeac | 12adb61099886a6719dfccfa5ce26fdec8951bf9 | 2ce98da17cac2c6a87ec88df1b7676db4c200607 | refs/heads/master | 2022-07-20T10:06:58.771811 | 2020-05-07T11:39:13 | 2020-05-07T11:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.1.5 on 2019-01-31 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("hipeac", "0017_membershiprequest"),
]
operations = [
migrations.RemoveField(model_name="membershiprequest", name="status",),
migrations.AddField(
model_name="membershiprequest", name="accepted", field=models.BooleanField(default=None, null=True),
),
]
| [
"eneko.illarramendi@ugent.be"
] | eneko.illarramendi@ugent.be |
3d42f04e1dbdfd001aec0c19bf420821cdefd8be | d89eea893b1491b545075bc16eb63b9e99aabf45 | /store/urls.py | 6f8661c1157fed563f0d8f73dbae06037e48e4c3 | [] | no_license | kkthecompguy/allsafeshop | ed6d19555e3bfffe54812a399c62380a5189c229 | 836919d6652fccc72ad95c097f627b82d6d2504e | refs/heads/master | 2023-02-06T06:34:16.504053 | 2021-01-02T16:45:11 | 2021-01-02T16:45:11 | 326,227,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from django.urls import path
from .views import store, cart, checkout, add_to_cart, place_order
app_name = "store"
urlpatterns = [
path('', store, name='store'),
path('cart', cart, name='cart'),
path('checkout', checkout, name='checkout'),
path('add-to-cart', add_to_cart, name='add-to-cart'),
path('place-order', place_order, name='place-order'),
] | [
"hnkhosamomollo3@gmail.com"
] | hnkhosamomollo3@gmail.com |
3ae23f556592d59e06b9d9779437a55a17712b25 | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/test/test_io_k8s_api_storage_v1beta1_csi_node_list.py | d6234ac236a906689b94489a832c768f4bfb9f87 | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo
from models.io_k8s_api_storage_v1beta1_csi_node_list import IoK8sApiStorageV1beta1CSINodeList # noqa: E501
from argo.rest import ApiException
class TestIoK8sApiStorageV1beta1CSINodeList(unittest.TestCase):
"""IoK8sApiStorageV1beta1CSINodeList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiStorageV1beta1CSINodeList(self):
"""Test IoK8sApiStorageV1beta1CSINodeList"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.models.io_k8s_api_storage_v1beta1_csi_node_list.IoK8sApiStorageV1beta1CSINodeList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nfillot@weborama.com"
] | nfillot@weborama.com |
c23ec8c67e4e0266f52cc21a90f42748f9f6b3d7 | 124cabad0cbf1e7249958d087d666231623444dc | /monkeys/post_image.py | d7005028aa1fd7ca8605a23c621b46a87f2eb57d | [] | no_license | shish/code-portfolio | e7bfe0f2f8c357f124e942a4e836dc06f33bede2 | a33d65011f26874f0626b4c9ae50affce36c407a | refs/heads/master | 2023-07-07T14:12:07.883334 | 2023-06-21T11:00:54 | 2023-06-21T11:00:54 | 4,450,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | # tribes/civicboom/post_image.py
from tribes.civicboom import CivicboomMonkey
class PostImageMonkey(CivicboomMonkey):
def run(self):
self.log_in_as("unittest")
# create an article
response = self.post(
"/contents.json",
params={
'_authentication_token': self.auth_token,
'title': "Attachment test",
'type': "draft",
'content': "Media Incoming",
},
status=201
)
my_article_id = response.json["data"]["id"]
# upload an attachment
self.post(
"/contents/%d.json" % my_article_id,
params={
'_method': 'PUT',
'_authentication_token': self.auth_token,
'media_caption': "A random image",
'media_credit': "Test Monkey",
},
upload_files = [
("media_file", "landscape.png", self.generate_image((400, 300), 42))
],
)
# publish the article
self.post(
"/contents/%d.json" % my_article_id,
params={
'_authentication_token': self.auth_token,
'_method': 'PUT',
'type': "article",
}
)
| [
"shish@shishnet.org"
] | shish@shishnet.org |
ac519a5a420f5a5d46df514bc6e310ef24fdad7c | 747255e913980d401341f164366a67d2a5c302af | /video_slomo.py | 76cef827b46a4bc4d055fb691a9a5385d6cf90ce | [] | no_license | zhaoyuzhi/Auto-Crop-Videos-and-Blur-Modelling | 5365e5f4eea6521e2251ce41f57b6d30223b961d | 345a67316483b1c2c40e63b0a43b87d6de410d51 | refs/heads/master | 2022-12-03T05:34:24.333430 | 2020-08-29T03:50:36 | 2020-08-29T03:50:36 | 255,800,343 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,939 | py | import argparse
import os
import cv2
import numpy as np
import VideoFrameConversion as vfc
import SuperSloMo as vslomo
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(filespath)
return ret
def text_save(content, filename, mode = 'a'):
# save a list to a txt
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i]) + '\n')
file.close()
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def get_statics(opt, time, fps):
interval_value = int(time / opt.interval_second)
print('Current center interval frames equal to:', interval_value)
interval_second_list = []
for i in range(interval_value):
this_interval_time = opt.interval_second * (i + 0.5)
interval_second_list.append(this_interval_time)
print('Time list:', interval_second_list)
interval_frame_list = []
for j, t in enumerate(interval_second_list):
this_interval_frame = int(t * fps)
interval_frame_list.append(this_interval_frame)
print('Frame list:', interval_frame_list)
return interval_frame_list
def get_interp_video(opt):
print(opt.videopath)
fps, frames, time, width, height = vfc.get_video_info(opt.videopath)
fps = round(fps) * opt.exposure_type
width = opt.resize_w
height = opt.resize_h
print("corrected video fps =", fps)
print("corrected video width =", width)
print("corrected video height =", height)
# create a video writer
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
print('Saving folder:', opt.savepath)
check_path(opt.savepath)
savepath = os.path.join(opt.savepath, opt.videopath.split('/')[-1] + '_interp.mp4')
video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))
# create Super Slomo network
interp, flow, back_warp = vslomo.create_slomonet(opt)
# read and write
vc = cv2.VideoCapture(opt.videopath)
# whether it is truely opened
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print(rval)
# save frames
c = 1
while rval:
# interpolation
last_frame = frame # "last_frame" saves frame from last loop
last_frame = cv2.resize(last_frame, (width, height))
c = c + 1
cv2.waitKey(1)
rval, frame = vc.read() # "frame" saves frame of Current time
if frame is None:
frame = last_frame
frame = cv2.resize(frame, (width, height))
interp_frames = vslomo.save_inter_frames(last_frame, frame, opt, interp, flow, back_warp)
# write frames
video.write(last_frame)
print('This is %d-th interval. Original frame %d is saved' % (i + 1, c - 1))
for k, interp_frame in enumerate(interp_frames):
video.write(interp_frame)
print('This is %d-th interval. Interpolated frames are saved %d times' % (i + 1, k + 1))
# release the video
vc.release()
video.release()
cv2.destroyAllWindows()
print('Released!')
def get_interp_videos(opt):
videolist = get_files(opt.video_folder_path)[:11]
print(videolist)
for item, videopath in enumerate(videolist):
# video statics
fps, frames, time, width, height = vfc.get_video_info(videopath)
fps = round(fps) * opt.exposure_type
width = opt.resize_w
height = opt.resize_h
print("corrected video fps =", fps)
print("corrected video width =", width)
print("corrected video height =", height)
# create a video writer
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
print('Saving folder:', opt.savepath)
check_path(opt.savepath)
savepath = os.path.join(opt.savepath, videopath.split('/')[-1] + '_interp.mp4')
video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))
# create Super Slomo network
interp, flow, back_warp = vslomo.create_slomonet(opt)
# read and write
vc = cv2.VideoCapture(videopath)
# whether it is truely opened
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print(rval)
# save frames
c = 1
while rval:
# interpolation
last_frame = frame # "last_frame" saves frame from last loop
last_frame = cv2.resize(last_frame, (width, height))
c = c + 1
cv2.waitKey(1)
rval, frame = vc.read() # "frame" saves frame of Current time
if frame is None:
frame = last_frame
frame = cv2.resize(frame, (width, height))
interp_frames = vslomo.save_inter_frames(last_frame, frame, opt, interp, flow, back_warp)
# write frames
video.write(last_frame)
print('This is the %d-th video %d-th interval. Original frame %d is saved' % (item + 1, i + 1, c - 1))
for k, interp_frame in enumerate(interp_frames):
video.write(interp_frame)
print('This is the %d-th video %d-th interval. Interpolated frames are saved %d times' % (item + 1, i + 1, k + 1))
# release the video
vc.release()
video.release()
cv2.destroyAllWindows()
print('Released!')
if __name__ == "__main__":
# Define parameters
parser = argparse.ArgumentParser()
parser.add_argument('--interval_second', type = int, default = 10, help = 'interval of second')
parser.add_argument('--crop_range', type = int, default = 1, help = 'the time range (second) for true video clip')
parser.add_argument('--target_range', type = int, default = 1, help = 'the time range (second) for output video clip')
parser.add_argument('--exposure_type', type = int, default = 40, help = 'e.g. exposure_type=8 means exposure time 1/8 seconds')
parser.add_argument('--resize_w', type = int, default = 2560, help = 'resize_w') # 3840, 1920
parser.add_argument('--resize_h', type = int, default = 1440, help = 'resize_h') # 2160, 1080
parser.add_argument('--checkpoint_path', type = str, \
default = './SuperSloMo/SuperSloMo.ckpt', \
help = 'model weight path')
parser.add_argument('--videopath', type = str, \
default = 'F:\\SenseTime\\Quad-Bayer to RGB Mapping\\data\\video_original\\Moscow Russia Aerial Drone 5K Timelab.pro _ Москва Россия Аэросъемка-S_dfq9rFWAE.webm', \
help = 'video path')
# F:\\SenseTime\\Quad-Bayer to RGB Mapping\\data\\video_original\\Dubai in 4K - City of Gold-SLaYPmhse30.webm
parser.add_argument('--video_folder_path', type = str, \
default = 'E:\\Deblur\\data collection\\video_original', \
help = 'video folder path')
parser.add_argument('--savepath', type = str, \
default = 'E:\\Deblur\\data collection\\video_original_interp_by_superslomo', \
help = 'save path')
opt = parser.parse_args()
print(opt)
# General information of processing folder
videolist = get_jpgs(opt.video_folder_path)
for i in range(len(videolist)):
print(i, videolist[i])
videolist = get_files(opt.video_folder_path)
# Process videos
get_interp_videos(opt)
| [
"noreply@github.com"
] | zhaoyuzhi.noreply@github.com |
957f2c59a82039e7ca05cb449191376e312de5d4 | 56b47728ffe36878096fac0d8fb0deb94a8a9b7c | /SQLdb.py | 7140ce7027f76ebad847674b3e3bf46a455fe87a | [] | no_license | CaMeLCa5e/dailyspring2015 | 1a930fc74930bb7d286956f17fcf36ec48802b4e | 1b2039b9908407a31e951e44f66bafebf3d7422b | refs/heads/master | 2016-09-05T19:54:44.918992 | 2015-05-24T23:51:39 | 2015-05-24T23:51:39 | 33,795,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #! usr/bin/python
import MySQLdb
db = MySQLdb.connect("localhost", "testuser", "test123", "TESTDB")
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# cursor.execute("SELECT VERSION()")
# data = cursor.fetchone()
# print "Database version : %s" %data
# db.close
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NALE CHAR(20),
AGE INT,
SEX CHAR(1)
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
| [
"JM273606@gmail.com"
] | JM273606@gmail.com |
62da20225b5af908f0ff70e87fb3ad679eae1688 | e52c7431f1b14444de52fd943a39fcaabeca21e4 | /torch_geometric/sparse/__init__.py | faa61231fd6d50a6b07f253fe18fdf19e1b6117f | [] | no_license | jwyang/pytorch_geometric | 72d3a62f6991d90edb3b8da6445e18421f2174a8 | 31043b182248852768317a4185384390e95217d5 | refs/heads/master | 2021-08-30T16:16:03.613724 | 2017-12-18T15:52:08 | 2017-12-18T15:52:08 | 114,831,322 | 0 | 1 | null | 2017-12-20T02:02:54 | 2017-12-20T02:02:53 | null | UTF-8 | Python | false | false | 228 | py | from .sparse import SparseTensor
from .mm import mm
from .mm_diagonal import mm_diagonal
from .sum import sum
from .eye import eye
from .stack import stack
__all__ = ['SparseTensor', 'mm', 'mm_diagonal', 'sum', 'eye', 'stack']
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
627d1e6a2cdc3cf718162c2da7f7045a0cc2c408 | 7978cf6a612816b97beeb34e4ccc4a3f68c44767 | /1/1_2.py | 2b44561d8277813145276f3ac86f8525dc54c6aa | [] | no_license | nemesmarci/Advent-of-Code-2018 | 13e9acd01b019ef0e890f0472c0c316a17dd60be | 47dfac4afa69636428b722eb96fba2596bf8368c | refs/heads/master | 2022-01-01T09:47:46.652193 | 2019-12-10T23:28:36 | 2021-12-29T19:48:02 | 159,982,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | with open('input.txt') as data:
lines = data.readlines()
frequency = 0
frequencies = set()
found = False
while not found:
for line in lines:
frequencies.add(frequency)
frequency += int(line)
if frequency in frequencies:
found = True
break
print(frequency)
| [
"nemes@sch.bme.hu"
] | nemes@sch.bme.hu |
26fabfda61115811b13d95b272a0c78d93ef5adb | 2ca91d379b291a4e7f5e804a63bb43f8bf316adf | /transmutator/orchestration.py | 4ce766f5a01531d50647193e7696e42afa9455f4 | [
"BSD-3-Clause"
] | permissive | benoitbryon/transmutator | 918146ebfdd67ca67ac7f97715f8d59d745c32da | 865a275a601cd735a131a58576aa12c68510b644 | refs/heads/master | 2021-01-17T13:21:39.027197 | 2015-06-24T10:30:58 | 2015-06-24T10:30:58 | 12,803,979 | 0 | 1 | null | 2016-06-27T21:40:38 | 2013-09-13T07:30:30 | Python | UTF-8 | Python | false | false | 7,563 | py | import os
import shutil
from xal.session.local import LocalSession
class Orchestrator(object):
def __init__(self):
root_dir = os.path.abspath(os.getcwd())
self.mutations_dir = os.path.join(root_dir, 'mutations')
self.working_dir = os.path.join(root_dir, 'var', 'transmutator')
if not os.path.isdir(self.working_dir):
os.makedirs(self.working_dir)
self.todo_dir = os.path.join(self.working_dir, 'todo')
if not os.path.isdir(self.todo_dir):
os.makedirs(self.todo_dir)
self.doing_dir = os.path.join(self.working_dir, 'doing')
if not os.path.isdir(self.doing_dir):
os.makedirs(self.doing_dir)
self.done_dir = os.path.join(self.working_dir, 'done')
if not os.path.isdir(self.done_dir):
os.makedirs(self.done_dir)
def mutation_sourcefile(self, mutation):
"""Return absolute filename to mutation."""
return os.path.join(self.mutations_dir, mutation)
def is_mutation(self, mutation):
"""Return ``True`` if ``mutation`` is path to an executable file."""
return os.access(self.mutation_sourcefile(mutation), os.X_OK)
def is_done(self, mutation):
"""Return ``True`` if ``mutation`` has already been performed."""
return os.path.isfile(os.path.join(self.done_dir, mutation))
def is_new(self, mutation):
"""Return ``True`` if ``mutation`` has not been performed yet."""
return not os.path.exists(os.path.join(self.done_dir, mutation))
def is_recurrent(self, mutation):
"""Return ``True`` if ``mutation`` has to be performed on every run.
On forward, recurrent mutations are not skipped, they go forward.
"""
return mutation.startswith('recurrent/')
def is_in_development(self, mutation):
"""Return ``True`` if ``mutation`` is in development.
On forward, in-development mutations go backward and forward.
"""
return mutation.startswith('development')
def collect_mutations(self):
"""Iterates over all available mutations, whatever their status.
The return iterator is not sorted.
"""
for (dirpath, dirnames, filenames) in os.walk(self.mutations_dir):
for filename in filenames:
relative_dirname = dirpath[len(self.mutations_dir):]
relative_dirname = relative_dirname.lstrip(os.path.sep)
relative_filename = os.path.join(relative_dirname, filename)
yield relative_filename
def register_mutation(self, mutation):
"""Register mutation as TODO or DONE."""
todo = self.is_new(mutation) or \
self.is_in_development(mutation) or \
self.is_recurrent(mutation)
if todo:
dest = os.path.join(self.todo_dir, mutation)
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
shutil.copy2(os.path.join(self.mutations_dir, mutation), dest)
def start_mutation(self, mutation):
"""Mark mutation from TODO to DOING.:"""
todo = os.path.join(self.todo_dir, mutation)
todo_dir = os.path.dirname(todo)
doing = os.path.join(self.doing_dir, mutation)
if not os.path.isdir(os.path.dirname(doing)):
os.makedirs(os.path.dirname(doing))
if self.is_recurrent(mutation):
shutil.copy2(todo, doing)
else:
shutil.move(todo, doing)
if todo_dir != self.todo_dir and not os.listdir(todo_dir):
shutil.rmtree(todo_dir)
def todo_releases(self):
"""Return ordered list of releases to process."""
releases = []
noname_release = False
development_release = False
for name in os.listdir(self.todo_dir):
if os.path.isdir(os.path.join(self.todo_dir, name)):
if name == 'development':
development_release = True
elif name == 'recurrent':
pass
else:
releases.append(name)
else:
noname_release = True
releases.sort()
if noname_release:
releases.insert(0, '')
if development_release:
releases.append('development')
return releases
def todo_recurrent(self):
"""Return ordered list of recurrent mutations."""
files = os.listdir(os.path.join(self.todo_dir, 'recurrent'))
files.sort()
return [os.path.join('recurrent', name) for name in files]
def todo_mutations(self, release):
files = []
recurrent_mutations = self.todo_recurrent()
absolute_release = os.path.join(self.todo_dir, release)
for filename in os.listdir(absolute_release):
if os.path.isfile(os.path.join(absolute_release, filename)):
relative_filename = os.path.join(release, filename)
files.append((filename, relative_filename))
for recurrent in recurrent_mutations:
files.append((recurrent[len('recurrent/'):], recurrent))
files.sort()
files = [mutation for f, mutation in files]
return files
def forward_mutation(self, mutation):
print('## FORWARD mutation "{name}"'.format(name=mutation))
session = LocalSession()
sh = session.sh
result = sh.run(os.path.join(self.doing_dir, mutation))
print(result.stdout)
def backward_mutation(self, mutation):
print('## BACKWARD mutation "{name}"'.format(name=mutation))
session = LocalSession()
sh = session.sh
result = sh.run([
os.path.join(self.doing_dir, mutation),
'--backward'])
print(result.stdout)
def run_mutation(self, mutation):
do_backward = (self.is_done(mutation)
and self.is_in_development(mutation))
do_forward = True
if do_backward:
self.backward_mutation(mutation)
if do_forward:
self.forward_mutation(mutation)
def success_mutation(self, mutation):
"""Mark mutation as DONE.:"""
doing = os.path.join(self.doing_dir, mutation)
doing_dir = os.path.dirname(doing)
done = os.path.join(self.done_dir, mutation)
if not os.path.isdir(os.path.dirname(done)):
os.makedirs(os.path.dirname(done))
if not self.is_recurrent(mutation):
shutil.move(doing, done)
if doing_dir != self.doing_dir and not os.listdir(doing_dir):
shutil.rmtree(doing_dir)
def error_mutation(self, mutation):
"""Register error and warn user."""
print('ERROR with mutation "{name}"'.format(name=mutation))
def run_mutations(self):
for mutation in self.collect_mutations():
self.register_mutation(mutation)
for release in self.todo_releases():
print('#### Processing release "{name}" ####'.format(name=release))
for mutation in self.todo_mutations(release):
self.start_mutation(mutation)
try:
self.run_mutation(mutation)
except:
self.error_mutation(mutation)
else:
self.success_mutation(mutation)
recurrent_dir = os.path.join(self.todo_dir, 'recurrent')
if os.path.exists(recurrent_dir) and os.listdir(recurrent_dir):
shutil.rmtree(recurrent_dir)
| [
"benoit@marmelune.net"
] | benoit@marmelune.net |
0b281d03ea9a0a92a7cdb82652e65812e7c55bce | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2145/60708/269546.py | 04f455752d5ae779e8aa18a822389bff6d25d85e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | N=int(input())
for n in range(0,N):
temp=input().split(" ")
l=int(input())
list=[]
for item in temp:
list.append(int(item))
maxresult=0
for x in range(1,l+1):
for y in range(0,l-x+1):
h=min(list[y:y+x])
if(h*x>maxresult):
maxresult=h*x
print(maxresult) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f42c0bc6db94794cbf3dbc31077f0801d2b140d3 | 804ce3c2897a8720a27e0d86ac3b868ebd41cd20 | /project-data/django/mango/mango/wsgi.py | 518142e6201c5cda775ff0c78d6761836370bc36 | [] | no_license | hoboland21/mango | 383359aa85b685bfe77c6336974600038454cf80 | be8bf3398612a0c3dbb4498eb5eb18407c574ce3 | refs/heads/master | 2023-07-13T06:25:39.508434 | 2021-08-25T03:25:37 | 2021-08-25T03:25:37 | 399,520,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 491 | py |
import sys,os
"""
WSGI config for main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
#if "/usr/local/django/mango" not in sys.path :
# sys.path.insert(0,"/usr/local/django/mango")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mango.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"jc@saipantech.com"
] | jc@saipantech.com |
aa6d701c19dc52cbb0c3abdfa5fa1970d39343be | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2482/60829/280705.py | 5973613bdc5d7fe19ce40088faaa5a99d02f2080 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | n=int(input())
for p in range(n):
a=int(input())
b=int(input())
k=a/b
if k==1.6666666666666667:
k=1.(6)
if k==2.6666666666666665:
k=2.(6)
print(a/b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0ee978c945a22cfd723c0a2e287d0e327ea507df | 48fcd5b9203c5f34dcad9483259c0f3d46f5d48b | /codeacademy-python3/files/how_many_lines.py | 79a082fa607ebffa16e832bc8a67fed867241a6f | [] | no_license | ssaulrj/codes-python | 438dd691815d0a688d264928eb07187ba30c2138 | 04b75b001de60a5e202ad373f3379864753ce203 | refs/heads/master | 2022-11-17T11:40:18.883096 | 2020-07-06T00:57:58 | 2020-07-06T00:57:58 | 234,440,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | with open('how_many_lines.txt') as lines_doc:
#lines_docx = lines_doc.read()
for line in lines_doc.readlines():
print(line)
#print(lines_docx)
| [
"noreply@github.com"
] | ssaulrj.noreply@github.com |
ec62c1d46aabfd5b1918edd414451252d7613fff | f8eea4a4cc079ba830a27a2ce239aef451ed6597 | /test/ec/test_model.py | b9d4383639a0120de5a832c55c6614b5050ba089 | [
"MIT"
] | permissive | qalmaqihir/pyecsca | f37a32a00ea47fff1db0d5bb42b28df7cce6b587 | 28546dad01a25ce101d6b49924f521c2ef5ffa98 | refs/heads/master | 2023-02-18T19:59:11.612457 | 2021-01-22T16:02:25 | 2021-01-23T00:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from unittest import TestCase
from pyecsca.ec.model import (ShortWeierstrassModel, MontgomeryModel, EdwardsModel,
TwistedEdwardsModel)
class CurveModelTests(TestCase):
def test_load(self):
self.assertGreater(len(ShortWeierstrassModel().coordinates), 0)
self.assertGreater(len(MontgomeryModel().coordinates), 0)
self.assertGreater(len(EdwardsModel().coordinates), 0)
self.assertGreater(len(TwistedEdwardsModel().coordinates), 0)
| [
"johny@neuromancer.sk"
] | johny@neuromancer.sk |
f2f95abfa48576405b22de0fe042f561eb265d28 | c8453f83242cd525a98606f665d9f5d9e84c6335 | /lib/surface/container/images/list_tags.py | 31d68c01369d00dbd98fec0fd6289bf87e7c0617 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | paulfoley/GCP-Cloud_SDK | 5188a04d8d80a2709fa3dba799802d57c7eb66a1 | bec7106686e99257cb91a50f2c1b1a374a4fc66f | refs/heads/master | 2021-06-02T09:49:48.309328 | 2017-07-02T18:26:47 | 2017-07-02T18:26:47 | 96,041,222 | 1 | 1 | NOASSERTION | 2020-07-26T22:40:49 | 2017-07-02T18:19:52 | Python | UTF-8 | Python | false | false | 3,412 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List tags command."""
import argparse
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image
from googlecloudsdk.api_lib.container.images import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import http
# Add to this as we add columns.
_DEFAULT_KINDS = [
'BUILD_DETAILS',
'IMAGE_BASIS',
'PACKAGE_VULNERABILITY',
]
class ListTags(base.ListCommand):
"""List tags and digests for the specified image."""
detailed_help = {
'DESCRIPTION':
"""\
The container images list-tags command of gcloud lists metadata about
tags and digests for the specified container image. Images must be
hosted by the Google Container Registry.
""",
'EXAMPLES':
"""\
List the tags in a specified image:
$ {{command}} gcr.io/myproject/myimage
""",
}
def Collection(self):
return 'container.tags'
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'--show-occurrences',
action='store_true',
default=False,
help=argparse.SUPPRESS)
parser.add_argument(
'--occurrence-filter',
default=' OR '.join(
['kind = "{kind}"'.format(kind=x) for x in _DEFAULT_KINDS]),
help=argparse.SUPPRESS)
parser.add_argument(
'image',
help='The name of the image. Format: *.gcr.io/repository/image')
# Does nothing for us, included in base.ListCommand
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
InvalidImageNameError: If the user specified an invalid image name.
Returns:
Some value that we want to have printed later.
"""
repository = util.ValidateRepositoryPath(args.image)
http_obj = http.Http()
with docker_image.FromRegistry(
basic_creds=util.CredentialProvider(),
name=repository,
transport=http_obj) as image:
try:
return util.TransformManifests(
image.manifests(),
repository,
show_occurrences=args.show_occurrences,
occurrence_filter=args.occurrence_filter)
except docker_http.V2DiagnosticException as err:
raise util.GcloudifyRecoverableV2Errors(err, {
403: 'Access denied: {0}'.format(repository),
404: 'Not found: {0}'.format(repository)
})
| [
"Nexu@Nexu.local"
] | Nexu@Nexu.local |
eb100eed015d6d6c69d5645791a5c9cc4b19b5cd | 6114a1313ca1193343fac049d0f3cf9e15438829 | /Chap0/project/guess.py | d1d399e95d5685e53d126aa80a8656a4ac77bad9 | [] | no_license | AIHackerTest/Hansoluo_Py101-004 | 0d49bb12158d2d6f8c430c407d739336de7d0ef3 | 1bb2d1810ec286e16cf12165e75472edd7c5d29a | refs/heads/master | 2021-06-28T01:54:57.478192 | 2017-09-12T08:23:02 | 2017-09-12T08:23:02 | 103,240,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # -*- coding: utf-8 -*-
# 功能描述
# 程序随机生成一个20以内的数字,用户有10次机会猜测
# 程序根据用户输入,给予一定提示(大了,小了,正确)
# 猜对或用完10次机会,游戏结束
import random
# random.randint(a, b):Return a random integer N such that a <= N <= b
a = random.randint(1,20)
for i in range(1,11):
b = int(input("请猜测20以内的数字:"))
if a > b:
print("小了")
elif a < b:
print("大了")
else:
print("正确")
break
print("你还有 {0} 次机会".format(10-i))
i += 1
print ('游戏结束')
| [
"xiaowan5219@gmail.com"
] | xiaowan5219@gmail.com |
8cd41ee8c833fb7d76ec5d6fcc4ef5a36db55050 | a5a7c59b04a1a64fe34653c7970c3cf173f9c1df | /io/swig/io/gnuplot_export.py | a8504306a3b7c530dfb745634aee77975fbd973d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | siconos/siconos | a7afdba41a2bc1192ad8dcd93ac7266fa281f4cf | 82a8d1338bfc1be0d36b5e8a9f40c1ad5384a641 | refs/heads/master | 2023-08-21T22:22:55.625941 | 2023-07-17T13:07:32 | 2023-07-17T13:07:32 | 37,709,357 | 166 | 33 | Apache-2.0 | 2023-07-17T12:31:16 | 2015-06-19T07:55:53 | C | UTF-8 | Python | false | false | 3,345 | py | import os,sys
import h5py
import numpy
filename = '{0}.hdf5'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
withPlot=False
print filename
out= h5py.File(filename, 'r')
def group(h, name):
try:
return h[name]
except KeyError:
return h.create_group(name)
def data(h, name, nbcolumns):
try:
return h[name]
except KeyError:
return h.create_dataset(name, (0, nbcolumns),
maxshape=(None, nbcolumns))
_data = group(out, 'data')
ref = group(_data, 'ref')
joints = group(_data, 'joints')
static_data = data(_data, 'static', 9)
velocities_data = data(_data, 'velocities', 8)
dynamic_data = data(_data, 'dynamic', 9)
cf_data = data(_data, 'cf', 15)
solv_data = data(_data, 'solv', 4)
input = group(_data, 'input')
nslaws = group(_data, 'nslaws')
dpos_data = dynamic_data
max_time = max(dpos_data[:, 0])
times = list(set(dpos_data[:, 0]))
times.sort()
ndyna = len(numpy.where(dpos_data[:, 0] == times[0]))
ntime=len(times)
print('time range :', times[0], times[-1])
print('ndyna :', ndyna)
print('ntime:', ntime)
instances = set(dpos_data[:, 1])
#output_dict = {}
#output_dict[1]= [1,2,3]
######## position output ########
nvalue = ndyna*7+1
position_output = numpy.empty((ntime,nvalue))
#print('position_output shape', numpy.shape(position_output))
position_output[:,0] = times[:]
for t in range(len(times)):
for i in range(ndyna):
position_output[t,1+i*7:1+(1+i)*7] = dpos_data[t*ndyna+ndyna, 2:9]
#print('position_output', position_output)
filename_output = '{0}_position.dat'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
print('output file:', filename_output)
numpy.savetxt(filename_output, position_output)
######## position output ########
nvalue = ndyna*6+1
velocity_output = numpy.empty((ntime,nvalue))
#print('position_output shape', numpy.shape(position_output))
velocity_output[:,0] = times[:]
for t in range(len(times)):
for i in range(ndyna):
velocity_output[t,1+i*6:1+(1+i)*6] = velocities_data[t*ndyna+ndyna, 2:8]
#print('position_output', position_output)
filename_output = '{0}_velocity.dat'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
print('output file:', filename_output)
numpy.savetxt(filename_output, velocity_output)
if withPlot:
import matplotlib
havedisplay = "DISPLAY" in os.environ
if not havedisplay:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.subplot(411)
plt.title('position x')
plt.plot(position_output[:, 0], position_output[:, 1])
plt.subplot(412)
plt.title('position y')
plt.plot(position_output[:, 0], position_output[:, 2])
plt.subplot(413)
plt.title('position z ')
plt.plot(position_output[:, 0], position_output[:, 3])
plt.figure()
plt.subplot(411)
plt.title('orientation q0')
plt.plot(position_output[:, 0], position_output[:, 4])
plt.subplot(412)
plt.title('orientation q1')
plt.plot(position_output[:, 0], position_output[:, 5])
plt.subplot(413)
plt.title('orientation q2 ')
plt.plot(position_output[:, 0], position_output[:, 6])
plt.subplot(414)
plt.title('orientation q3 ')
plt.plot(position_output[:, 0], position_output[:, 7])
if havedisplay:
plt.show()
else:
plt.savefig("bbts.png")
| [
"vincent.acary@inria.fr"
] | vincent.acary@inria.fr |
0ed2f6c7c8ca1dc78db9c05e4e5ca005bb389f3d | 76a61fa52ab282501992ac889665bce01f2cdd62 | /examples/REINFORCE/linear.py | 7fb7f97d19853cd61fb1e43f6ee1644fbdf43297 | [
"Apache-2.0"
] | permissive | diogo149/treeano | 35ae0f9d0c0bbcb9ca1ff8856ba527e2d19b6194 | 9b3fd6bb5eb2f6738c9e5c357e70bef95dcae7b7 | refs/heads/master | 2020-04-06T07:05:19.946985 | 2016-08-11T15:47:58 | 2016-08-11T15:47:58 | 34,579,507 | 45 | 13 | null | 2016-02-03T07:32:45 | 2015-04-25T17:58:17 | Python | UTF-8 | Python | false | false | 2,456 | py | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import REINFORCE
fX = theano.config.floatX
TARGET_WEIGHT = np.random.randn(10, 2).astype(fX)
TARGET_BIAS = np.random.randn(2).astype(fX)
class RewardNode(treeano.NodeImpl):
input_keys = ("state", "sampled")
def compute_output(self, network, state_vw, sampled_vw):
W = T.constant(TARGET_WEIGHT)
b = T.constant(TARGET_BIAS)
target = T.dot(state_vw.variable, W) + b.dimshuffle("x", 0)
reward = -T.sqr(sampled_vw.variable - target).sum(axis=1)
network.create_vw(
"raw_reward",
variable=T.mean(reward),
shape=(),
)
baseline_reward = 100
network.create_vw(
"default",
variable=reward + baseline_reward,
shape=(state_vw.shape[0],),
tags={"output"},
)
BATCH_SIZE = 64
graph = tn.GraphNode(
"graph",
[[tn.InputNode("state", shape=(BATCH_SIZE, 10)),
tn.DenseNode("mu", num_units=2),
tn.ConstantNode("sigma", value=1.),
REINFORCE.NormalSampleNode("sampled"),
RewardNode("reward"),
REINFORCE.NormalREINFORCECostNode("REINFORCE")],
[{"from": "state", "to": "mu"},
{"from": "mu", "to": "sampled", "to_key": "mu"},
{"from": "sigma", "to": "sampled", "to_key": "sigma"},
{"from": "sampled", "to": "reward", "to_key": "sampled"},
{"from": "state", "to": "reward", "to_key": "state"},
{"from": "state", "to": "REINFORCE", "to_key": "state"},
{"from": "mu", "to": "REINFORCE", "to_key": "mu"},
{"from": "sigma", "to": "REINFORCE", "to_key": "sigma"},
{"from": "reward", "to": "REINFORCE", "to_key": "reward"},
{"from": "sampled", "to": "REINFORCE", "to_key": "sampled"},
{"from": "REINFORCE"}]]
)
network = tn.AdamNode(
"adam",
{"subtree": graph,
"cost": tn.ReferenceNode("cost", reference="REINFORCE")},
learning_rate=0.1
).network()
fn = network.function(
["state"], [("reward", "raw_reward")], include_updates=True)
errors = []
for i in range(5000):
error, = fn(np.random.randn(BATCH_SIZE, 10).astype(fX))
if i % 100 == 0:
print("Iter:", i, "Error:", error)
errors.append(error)
print("mean reward:", np.mean(errors))
| [
"diogo149@gmail.com"
] | diogo149@gmail.com |
7e3a3bf22bd64c53ffdb6d059ddd55e06a2f0295 | e81722d244e8647e64f2ffb44e028a1f4c5df410 | /prepare_data.py | 98e49fd258646947ae8b42f4672c8a4727556cfe | [] | no_license | bvillasen/volumeRender | 9c16419d19e361799ef6c1a371e6236c90139b79 | f36586fbf7775d4d39545064b5771cad86d3dfef | refs/heads/master | 2021-08-30T19:43:10.127411 | 2020-10-18T02:05:21 | 2020-10-18T02:05:21 | 198,691,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | import os, sys
import numpy as np
import h5py as h5
currentDirectory = os.getcwd()
srcDirectory = currentDirectory + "/src/"
dataDirectory = currentDirectory + "/data_src/"
sys.path.extend([ srcDirectory, dataDirectory ] )
from tools import create_directory
from load_data_cholla_distributed import load_snapshot_data_distributed
#Load Snapshot Data
nPoints = 1024
# dataDir = '/raid/bruno/data/'
dataDir = '/data/groups/comp-astro/bruno/'
inDir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/output_files_pchw18/'.format(nPoints)
stats_dir = inDir + 'statistics/'
outDir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/snapshots_prepared/'.format(nPoints)
create_directory( outDir )
data_type = 'hydro'
# data_type = 'particles'
# Load Statistics
statistics = h5.File( stats_dir + 'stats_{0}.h5'.format(data_type), 'r')
fields = ['density']
precision = np.float32
Lbox = 5000 #kpc/h
if nPoints == 1024: proc_grid = [ 4, 2, 2]
if nPoints == 2048: proc_grid = [ 8, 8, 8]
box_size = [ Lbox, Lbox, Lbox ]
grid_size = [ nPoints, nPoints, nPoints ] #Size of the simulation grid
subgrid = [ [0, nPoints], [0, nPoints], [0, nPoints] ] #Size of the volume to load
field = 'density'
min_val = statistics[field].attrs['min_global']
max_val = statistics[field].attrs['max_global']
print( "Min: {0} Max: {1}".format(min_val, max_val ))
n_snapshot = 169
# for n_snapshot in range(170):
data = load_snapshot_data_distributed( n_snapshot, inDir, data_type, fields, subgrid, precision, proc_grid, box_size, grid_size, show_progess=True )
data_vals = data[data_type][field]
data_vals -= min_val
# Normalize Data
max_val = (max_val - min_val) / 1000
data_vals = np.clip( data_vals, a_min=None, a_max=max_val )
data_vals = np.log10(data_vals + 1) / np.log10( max_val + 1)
# Change to 256 range
data_vals = (255*(data_vals)).astype(np.uint8)
#Write to file
out_file_name = outDir + '{0}_{1}_{2}.h5'.format( data_type, field, n_snapshot )
out_file = h5.File( out_file_name, 'w')
out_file.create_dataset( field, data=data_vals )
out_file.close()
print( "Saved File: " + out_file_name )
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
fc9da05b724f3cc401ad8e99bf801480a47d99ec | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/bgw/models/LocationSpec.py | 69b6f58d03ef8610c8cc7e5db8003c726aa8d3d1 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,533 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class LocationSpec(object):
def __init__(self, locationCode=None, locationPortSpecCode=None, locationISPCode=None):
"""
:param locationCode: (Optional) 专线创建的地域编码;只在创建自助连接时生效,通过调用[describeLocations](../Location/describeLocations.md)接口获取
:param locationPortSpecCode: (Optional) 专线接入端口规格代码,在创建自助连接和托管专线时生效.通过调用[describeLocations](../Location/describeLocations.md)接口获取
:param locationISPCode: (Optional) 专线接入运营商代码,只在创建自助连接时生效.通过调用[describeLocations](../Location/describeLocations.md)接口获取
"""
self.locationCode = locationCode
self.locationPortSpecCode = locationPortSpecCode
self.locationISPCode = locationISPCode
| [
"jdcloud-api@jd.com"
] | jdcloud-api@jd.com |
1a95c984ef4b479eb1dafc39164ee5b439a1e1ac | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /project/tests/test_project_ui.py | 08ef944eea3a788fc496ef58c731307bf53c4486 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Part of ALWAFI. See LICENSE file for full copyright and licensing details.
import odoo.tests
@odoo.tests.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_project_tour(self):
self.phantom_js("/web", "odoo.__DEBUG__.services['web_tour.tour'].run('project_tour')", "odoo.__DEBUG__.services['web_tour.tour'].tours.project_tour.ready", login="admin")
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
b734607bb98232aba5c08fbf7b0d204352c8348e | cac43e8d506ab79074ea4c5fb469f70ea7e6da81 | /simulation/simulation_results_parser.py | 85d557483acb2cb51db1cb71f98c18df0047687f | [] | no_license | fubuloubu/ad-hoc-networking | 63b14cb80c6013a84764f65b7fcef275dd7c673e | b63f266ab6b90c2b77182cecf2f04749a5e7fa25 | refs/heads/master | 2020-06-10T20:37:25.883649 | 2016-12-23T00:07:17 | 2016-12-23T00:07:17 | 75,881,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,694 | py | #!/usr/bin/python3
import print_data_model
# Typical Results looks like:
example = '''
Statistics:
Total Messages: 64
Succesfully Received Messages: 6
Success Rate: 9.38%
Number of Retransmissions: 188
Average Latency: 0.00 [steps]
Statistics:
Total Messages: 52
Succesfully Received Messages: 4
Success Rate: 7.69%
Number of Retransmissions: 208
Average Latency: 0.00 [steps]
Statistics:
Total Messages: 53
Succesfully Received Messages: 4
Success Rate: 7.55%
Number of Retransmissions: 188
Average Latency: 0.00 [steps]
'''
# NOTE: Multiple Simulations possible...
def sanitize(resultsStr):
resultsStr = resultsStr.lstrip().rstrip()
oldLen = 0
while (len(resultsStr) != oldLen):
resultsStr = resultsStr.replace('\n\n','\n')
oldLen = len(resultsStr)
return resultsStr
import re
def extractMetrics(metricString):
metric = {}
metricString = metricString.split(': ')
metric["title"] = metricString[0]
metric["mname"] = metricString[0].lower().replace(' ','-')
match = re.search(r'([0-9.]+) *(.*)', metricString[1])
if match:
(data, units) = match.group(1,2)
metric["value"] = data
metric["units"] = 'none' if units == '' else \
units.lstrip().replace('[','').replace(']','')
else:
raise ValueError("'{}' does not parse with regex".format(metricString[1]))
return metric
# Parse output of simulation run
class SimulationMetrics(print_data_model.MetricContainer):
def __init__(self, datastring):
# Clean data string and split by simulation run
simStats = sanitize(datastring).split('Statistics:\n')
# Remove empty entries and split by line
simStats = filter(None, simStats)
simStats = map(lambda s: s.rstrip().split('\n'), simStats)
# Parse each raw metric line into a metric object
# NOTE: Using list here because below we need to use it twice
simStats = list(map(lambda s: list(map(lambda ms: extractMetrics(ms), s)), simStats))
# Make sure metric names in each simulation line up
# e.g. there are N duplicates of every metric in list
metricNames = map(lambda s: [ m["mname"] for m in s], simStats)
def checkEqual(iterator):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
# Raise error if fault is found
if not checkEqual(metricNames):
raise ValueError("Simulations do not have matching metrics")
# Create lists by mapping each simulation metric
# to unique metric name using position in list
metricNames = [ m["mname"] for m in simStats[0] ]
metricTitles = [ m["title"] for m in simStats[0] ]
metricUnits = [ m["units"] for m in simStats[0] ]
metric_list = []
title_list = []
for i in range(len(simStats)):
for j in range(len(metricNames)):
metric_list.append("{1}-{0:02d}".format(i+1, metricNames[j]))
title_list.append("Simulation {0} {1}".
format(i+1, metricTitles[j], metricUnits[j]))
# Get data list by extracting value from metrics and flattening that list
from ast import literal_eval
# NOTE: Using list here because below we need to use it twice
metricData = list(map(lambda s: [ literal_eval(m["value"]) for m in s], simStats))
data_list = [item for sublist in metricData for item in sublist]
# Create and append average metrics
# First transpose list of lists
avgMetricData = map(lambda *a: list(a), *metricData)
# Then do average by summing and dividing by number of entries
avgMetricData = map(lambda l: sum(l), avgMetricData)
avgMetricData = map(lambda s: s/float(len(simStats)), avgMetricData)
# NOTE: Using list here because below we need use subscripts
avgMetricData = list(avgMetricData)
# Finally append all average metrics to list
for i in range(len(metricNames)):
metric_list.append("avg-{0}".format(metricNames[i]))
title_list.append("Simulation Average {0}".
format(metricTitles[i], metricUnits[i]))
data_list.append(avgMetricData[i])
# Initialize container for all metrics we discovered
print_data_model.MetricContainer.__init__(self, metric_list, title_list, data_list)
# Use argparsing from base module
if __name__ == '__main__':
print_data_model.main(SimulationMetrics, example)
| [
"fubuloubu@gmail.com"
] | fubuloubu@gmail.com |
970052a55f375ecee9553f24eb9852ddfc9a8962 | 116acf603f5db8d626247355bf786c339ba95ea9 | /libs/options.py | 2cc4a75cb027d45077396d591f2b05a0f1016b80 | [] | no_license | dahunuaa/ZhihuiSMB_python3 | 0857afeec2337b44571986a9c70c26e716142ccb | 8db2708efccd5eefa393738500e326bd7fb65c21 | refs/heads/master | 2021-01-25T14:32:32.201879 | 2018-03-11T05:59:10 | 2018-03-11T05:59:10 | 123,703,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | # -*- coding:utf-8 -*-
"""
alter by:dahu
alter on:2016-11-17
"""
import os
import logging
from tornado.options import parse_command_line, options, define
from ZhihuiSMB.libs import configlib
def get_base_config():
root_path = configlib.root_path
os.chdir(root_path+'/configs')
cfg=configlib.Config('base.icfg')
cfg.addNamespace(configlib)
os.chdir(root_path)
return cfg
def parse_config_file(path):
"""Rewrite tornado default parse_config_file.
Parses and loads the Python config file at the given path.
This version allow customize new options which are not defined before
from a configuration file.
"""
config = {}
with open(path, 'r', encoding='utf-8') as f:
code = compile(f.read(), path, 'exec')
exec(code, config, config)
# execfile(path, config, config)
for name in config:
if name in options:
options[name].set(config[name])
else:
define(name, config[name])
def parse_options():
_root = ''
_settings = os.path.join(_root, "settings.py")
# _projects_configs = [os.path.join(_root, "package2.icfg"),os.path.join(_root, "package.icfg")]
# _settings_local = os.path.join(_root, "settings_local.py")
try:
parse_config_file(_settings)
# parse_projects_config_file(_projects_configs)
logging.info("Using settings.py as default settings.")
except Exception as e:
import traceback
print(traceback.format_exc())
logging.error("No any default settings, are you sure? Exception: %s" % e)
'''
try:
parse_config_file(_settings_local)
logging.info("Override some settings with local settings.")
except Exception, e:
logging.error("No local settings. Exception: %s" % e)
'''
parse_command_line()
config = get_base_config() | [
"dahu yao"
] | dahu yao |
842b0e029a9d3e87a5e0a33a7d76de2cb72a3ccd | c85a6d674679780ee510b5c8c3dbcbdecc859f64 | /swagger_client/__init__.py | 3d74d68b52cf9561f2ad3314cd8c0b2e8674ea96 | [] | no_license | cbrowet-axway/APIM_sdk | d4f4a124e86a7b2e65d0ef07b54c68e95de68337 | 4f82df67ebe3dd6eae645bab8f86e72c0347ee24 | refs/heads/master | 2020-05-25T13:22:35.802350 | 2020-04-16T09:25:21 | 2020-04-16T09:25:21 | 187,820,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,955 | py | # coding: utf-8
# flake8: noqa
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: support@axway.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.api_discovery_api import APIDiscoveryApi
from swagger_client.api.api_manager_services_api import APIManagerServicesApi
from swagger_client.api.api_proxy_registration_api import APIProxyRegistrationApi
from swagger_client.api.api_repository_api import APIRepositoryApi
from swagger_client.api.applications_api import ApplicationsApi
from swagger_client.api.current_user_api import CurrentUserApi
from swagger_client.api.login_api import LoginApi
from swagger_client.api.metrics_api import MetricsApi
from swagger_client.api.migrate_api import MigrateApi
from swagger_client.api.o_auth_authorizations_api import OAuthAuthorizationsApi
from swagger_client.api.organizations_api import OrganizationsApi
from swagger_client.api.quotas_api import QuotasApi
from swagger_client.api.users_api import UsersApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.api import API
from swagger_client.models.api_access import APIAccess
from swagger_client.models.api_definition import APIDefinition
from swagger_client.models.api_key import APIKey
from swagger_client.models.api_promotion import APIPromotion
from swagger_client.models.alert_config import AlertConfig
from swagger_client.models.application import Application
from swagger_client.models.application_request import ApplicationRequest
from swagger_client.models.authenticated_user_attributes import AuthenticatedUserAttributes
from swagger_client.models.authentication_profile import AuthenticationProfile
from swagger_client.models.authorization import Authorization
from swagger_client.models.authorization_code import AuthorizationCode
from swagger_client.models.backend_blob import BackendBlob
from swagger_client.models.backend_export import BackendExport
from swagger_client.models.backend_method_export import BackendMethodExport
from swagger_client.models.ca_cert import CACert
from swagger_client.models.cors_profile import CORSProfile
from swagger_client.models.config import Config
from swagger_client.models.custom_properties_config import CustomPropertiesConfig
from swagger_client.models.custom_property import CustomProperty
from swagger_client.models.custom_property_option import CustomPropertyOption
from swagger_client.models.custom_property_permission import CustomPropertyPermission
from swagger_client.models.discovery_api import DiscoveryAPI
from swagger_client.models.error_response import ErrorResponse
from swagger_client.models.export_options import ExportOptions
from swagger_client.models.external_client import ExternalClient
from swagger_client.models.frontend_export import FrontendExport
from swagger_client.models.grant_types import GrantTypes
from swagger_client.models.group import Group
from swagger_client.models.host import Host
from swagger_client.models.implicit import Implicit
from swagger_client.models.inbound_profiles import InboundProfiles
from swagger_client.models.lock import Lock
from swagger_client.models.login_endpoint import LoginEndpoint
from swagger_client.models.method import Method
from swagger_client.models.metric_field import MetricField
from swagger_client.models.metric_timeline import MetricTimeline
from swagger_client.models.number import Number
from swagger_client.models.o_auth_app_scope import OAuthAppScope
from swagger_client.models.o_auth_client import OAuthClient
from swagger_client.models.o_auth_protected_resource import OAuthProtectedResource
from swagger_client.models.o_auth_resource import OAuthResource
from swagger_client.models.operation import Operation
from swagger_client.models.organization import Organization
from swagger_client.models.outbound_profiles import OutboundProfiles
from swagger_client.models.param_value import ParamValue
from swagger_client.models.parameter import Parameter
from swagger_client.models.permission_dto import PermissionDTO
from swagger_client.models.portal_traffic_listener import PortalTrafficListener
from swagger_client.models.quota_api_constraint_dto import QuotaApiConstraintDTO
from swagger_client.models.quota_dto import QuotaDTO
from swagger_client.models.referenced_entity import ReferencedEntity
from swagger_client.models.registration_token import RegistrationToken
from swagger_client.models.remote_host import RemoteHost
from swagger_client.models.response_code import ResponseCode
from swagger_client.models.schema_object import SchemaObject
from swagger_client.models.scope import Scope
from swagger_client.models.security_device import SecurityDevice
from swagger_client.models.security_profile import SecurityProfile
from swagger_client.models.series import Series
from swagger_client.models.service import Service
from swagger_client.models.service_profiles import ServiceProfiles
from swagger_client.models.swagger import Swagger
from swagger_client.models.swagger_security_device import SwaggerSecurityDevice
from swagger_client.models.swagger_security_profile import SwaggerSecurityProfile
from swagger_client.models.system_config import SystemConfig
from swagger_client.models.token_endpoint import TokenEndpoint
from swagger_client.models.token_request_endpoint import TokenRequestEndpoint
from swagger_client.models.topology import Topology
from swagger_client.models.user import User
from swagger_client.models.virtualized_api import VirtualizedAPI
from swagger_client.models.virtualized_api_method import VirtualizedAPIMethod
from swagger_client.models.virtualized_method_export import VirtualizedMethodExport
| [
"cbro@semperpax.com"
] | cbro@semperpax.com |
2c7e1af7be7fc0e028d39a61eaecff78f3e51fbf | 76e62ddbfdfba19c80b37e855a4df67672ef0808 | /PINp/2015/GOLOVIN_A_I/task_6_7.py | f44cf765622b99a9db68592a554beeabdd89cf01 | [
"Apache-2.0"
] | permissive | stasvorosh/pythonintask | 9d30f3cd492e89783b7221402375c1ebe4690baa | 8169ed26510022fe0d589f4013f11749131957df | refs/heads/master | 2021-01-17T16:49:32.778063 | 2016-10-10T14:08:04 | 2016-10-10T14:08:04 | 52,255,539 | 6 | 0 | null | 2016-02-22T07:33:16 | 2016-02-22T07:33:15 | null | UTF-8 | Python | false | false | 812 | py | # Задача 6. Вариант 7.
# Создайте игру, в которой компьютер загадывает имя одного из двух сооснователей компании #Google, а игрок должен его угадать.
# Golovin A.I.
# 02.06.2016
import random
avtori = ("Ларри Пейдж", "Сергей Михайлович Брин")
zagadka = random.choice(avtori)
predpologenie = input("Программа загадала одного из основателей гугл\nВаше предположение: ")
if predpologenie.lower() == zagadka.lower():
print("ХААААААААРООООООООШ")
else:
print ("Неправильно\nПравильный ответ - " + zagadka)
input("\n\nВведите ENTER для выхода")
| [
"stasyan.v@gmail.com"
] | stasyan.v@gmail.com |
293c0cc89b6d785d75a702398378df58540c5f14 | 51b6d2fc53d5c632fcf01319842baebf13901e84 | /atcoder.jp/abc198/abc198_d/Main.py | 84b6d70e1c6e06e31d599ed119a69b9ab833910f | [] | no_license | mono-0812/procon | 35db3b2c21eff74fbd7b52db07f249380f6834ef | 68a4b53880a228a0164052b23d1326363efcbc20 | refs/heads/master | 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import bisect,collections,copy,heapq,itertools,math,string,sys
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
INF=float("inf")
MOD=10**9+7
##############################################################################
s1=I();s2=I();s3=I()
li=list(set(s1+s2+s3))
if len(li)>=11:
print("UNSOLVABLE")
exit()
t=0
for l in itertools.permutations(range(10),len(li)):
dic={}
S1=0
S2=0
S3=0
i=0
for key in li:
dic[key]=l[i]
i+=1
if dic[s1[0]]==0 or dic[s2[0]]==0 or dic[s3[0]]==0:continue
for i in range(len(s1)):
S1=S1*10+dic[s1[i]]
for i in range(len(s2)):
S2=S2*10+dic[s2[i]]
for i in range(len(s3)):
S3=S3*10+dic[s3[i]]
if S1+S2==S3:
print(S1)
print(S2)
print(S3)
exit()
print("UNSOLVABLE")
| [
"frisk02.jar@gmail.com"
] | frisk02.jar@gmail.com |
6b8ed594d7010e2c8bdc88e05eaafdead4a82e25 | e97e727972149063b3a1e56b38961d0f2f30ed95 | /test/test_dispositions_api.py | 72054a85865322636021709bc99a39b437ac9845 | [] | no_license | knetikmedia/knetikcloud-python-client | f3a485f21c6f3e733a864194c9acf048943dece7 | 834a24415385c906732437970db105e1bc71bde4 | refs/heads/master | 2021-01-12T10:23:35.307479 | 2018-03-14T16:04:24 | 2018-03-14T16:04:24 | 76,418,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | # coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: support@knetik.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import knetik_cloud
from knetik_cloud.rest import ApiException
from knetik_cloud.apis.dispositions_api import DispositionsApi
class TestDispositionsApi(unittest.TestCase):
""" DispositionsApi unit test stubs """
def setUp(self):
self.api = knetik_cloud.apis.dispositions_api.DispositionsApi()
def tearDown(self):
pass
def test_add_disposition(self):
"""
Test case for add_disposition
Add a new disposition
"""
pass
def test_delete_disposition(self):
"""
Test case for delete_disposition
Delete a disposition
"""
pass
def test_get_disposition(self):
"""
Test case for get_disposition
Returns a disposition
"""
pass
def test_get_disposition_counts(self):
"""
Test case for get_disposition_counts
Returns a list of disposition counts
"""
pass
def test_get_dispositions(self):
"""
Test case for get_dispositions
Returns a page of dispositions
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"shawn.stout@knetik.com"
] | shawn.stout@knetik.com |
de65e17d5e6e9ac507ce87a8fcceec8ca660929e | 2e5314c4a1816301508e1d9d8094a5f99c808ff0 | /phase_model_svm.py | b916556b431694c261a483e85a9ff3e1405b3f64 | [
"MIT"
] | permissive | cahya-wirawan/phase-classification | 0ad387547c2bbdce3a1fe4cea785c8f95b04619d | ca65442c4f2a30004a17cf79cbe54cf9c2f6925d | refs/heads/master | 2022-12-14T04:58:45.215718 | 2019-01-11T12:07:29 | 2019-01-11T12:07:29 | 119,407,522 | 2 | 2 | MIT | 2022-12-08T00:54:07 | 2018-01-29T16:18:14 | Jupyter Notebook | UTF-8 | Python | false | false | 478 | py | from sklearn import svm
from sklearn.model_selection import GridSearchCV
# define baseline model
def model_svm(layers, dropout=0.1, layer_number=None):
params_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
# num_round = 30 # the number of training iterations
model = GridSearchCV(svm.SVC(), params_grid, cv=5, scoring='accuracy', n_jobs=10)
return model
| [
"cahya.wirawan@gmail.com"
] | cahya.wirawan@gmail.com |
cf533b5c6e6480bfc4190c6806832be62525289a | b8fd7e01a7069a0666eb2fe21991753fd5ff7860 | /Dynamic Programming/746. Min Cost Climbing Stairs rec.py | 82f373d18007ee68c7f89698e1626d4bd217d94d | [] | no_license | Jafoor/Leet-Code-Solved-Problems | 0b6be0f3c82b1bc13c0c484782db65601cefa7b8 | 935e5679e04bf6f9c9d8a0bdf8b204923a2bc7a5 | refs/heads/master | 2023-07-02T13:38:59.690783 | 2021-07-19T16:20:48 | 2021-07-19T16:20:48 | 256,105,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | class Solution(object):
def minCostClimbingStairs(self, cost):
memo = [0]*(len(cost)+10)
m1 = solve(0,cost,memo)
m2 = solve(1,cost,memo)
return min(m1,m2)
def solve(i,cost,memo):
if i>=len(cost):
return 0
if memo[i] == 0:
x1 = cost[i] + solve(i+1,cost,memo)
x2 = cost[i] + solve(i+2,cost,memo)
memo[i] = min(x1,x2)
return memo[i]
| [
"abujafor.cse11@gmai.com"
] | abujafor.cse11@gmai.com |
18e9295e97ab81fcc39d33ebd4605505a63da9db | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/7/usersdata/74/4449/submittedfiles/esferas.py | 19dc11065165f216271b0a42907b5e5cdbc2e5b2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | # -*- coding: utf-8 -*-
from __future__ import division
e1 = input('Volume da primeira esfera')
e2 = input('Volume da segunda esfera')
e3 = input('Volume da terceira esfera')
e4 = input('Volume da quarta esfera')
a = e2+e3+e4
d = e2+e3
if a = e1 and d = e4 and e2 = e3:
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
99760e10fa9e33679326968433083b8d2d910f35 | 894ed667dae7e299f472a0b531ea1783ed58fd27 | /src/Basic.py | 162deaa3803d9e523e6464c785115c853ffb4632 | [] | no_license | satpreetsingh/OpenAgent | dd2a8ade47159ee6b3345b9328e068e1dc419052 | 09985fc45c0efa7fffa8a15127a0e7f48d5de30d | refs/heads/master | 2021-05-03T07:45:26.230037 | 2017-04-10T12:42:46 | 2017-04-10T12:42:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | class Sub:
def f (self, inputs):
return inputs[0] - inputs[1]
class Add:
def f (self, inputs):
return inputs[0] + inputs[1]
class Mult:
def f (self, inputs):
return inputs[0] * inputs[1]
class Less:
def f (self, inputs):
return inputs[0] < inputs[1]
class Equal:
def f (self, inputs):
return inputs[0] == inputs[1]
class More:
def f (self, inputs):
return inputs[0] > inputs[1]
class Not:
def f (self, inputs):
return inputs[0] == 0
class Or:
def f (self, inputs):
return inputs[0] == 1 or inputs[1] == 1
class And:
def f (self, inputs):
return inputs[0] == 1 and inputs[1] == 1
class Abs:
def f (self, inputs):
return abs(inputs[0]) | [
"carsonjscott14@gmail.com"
] | carsonjscott14@gmail.com |
63ddf4b52d4ca7de34fe3edee0bee60935ab4325 | a73b1f7876cadf0d9bc0c2c3c68400b2007bff4d | /bookmarks/settings.py | ab07f451146cb84b1a36762b0453006937f90105 | [] | no_license | mrk24251/social-networking | 31c717ace60413086056f396cc786bcb5cef8747 | 0f8e0c9ea390dbd84df2b1daa1b95f05e58deb1b | refs/heads/master | 2022-12-11T05:56:00.652044 | 2021-06-18T08:36:12 | 2021-06-18T08:36:12 | 249,624,656 | 0 | 0 | null | 2022-12-08T04:26:35 | 2020-03-24T05:53:17 | CSS | UTF-8 | Python | false | false | 5,105 | py | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@)w-21#*$lr!@pl-2a2*^ha&3rgn7-#-)0msg$_k05t$3@a3l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'annoying',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
"sslserver",
'django.contrib.messages',
'cloudinary_storage',
'django.contrib.staticfiles',
'social_django',
'images.apps.ImagesConfig',
'actions.apps.ActionsConfig',
'sorl.thumbnail',
'django.contrib.postgres',
'cloudinary',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
SETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(SETTINGS_PATH, 'templates'),
)
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'd4qe3u5blhrbam',
'USER': 'exausuvjcqmvse',
'PASSWORD': '712ff4460c544145b4cabc9b6cc78822eacba4b0670e2b660a173b0be8839e2e',
'HOST': 'ec2-52-200-82-50.compute-1.amazonaws.com',
'PORT': '5432'
}
}
ADMINS = (
('Mohammadreza Karami', 'mohammadreza.karami22@yahoo.com'),
)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: reverse_lazy('user_detail',
args=[u.username])
}
THUMBNAIL_DEBUG = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.google.GoogleOAuth2',
]
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '915019433080-sn5o3ue35inhvpgfoq572r7ufgaigka0.apps.googleusercontent.com' # Google Consumer Key
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'ySLD3I7esB-SjOJaQzqtat_Q' # Google Consumer Secret
REDIS_HOST = 'ec2-54-197-124-167.compute-1.amazonaws.com'
REDIS_PORT = 25580
REDIS_PASSWORD = 'pa1f0a5e4291cc48d7081c8a5195ab2ece84789299ebc80e35fe49c3df8cb99b2'
REDIS_USER = 'h'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tehran'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
CLOUDINARY_STORAGE = {
'CLOUD_NAME': 'dt0x3ff8y',
'API_KEY': '842463339847471',
'API_SECRET': 'd4CUuUKhO4JSVfy9DA41a4KhGGw',
}
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' | [
"mohammadreza.karami22@yahoo.com"
] | mohammadreza.karami22@yahoo.com |
32409fe7d932b0f82a86871030a3e70d8e6e1acc | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /evolutionary_algorithms/playground/fusion_V1001/procedure.py | fde6017dbb421b32ef689007b42182c302416ab6 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 1,332 | py | import numpy as np
from crossover import *
from mutation import *
from tools import *
def objective_function(point_in):
x_in = point_in[0]
y_in = point_in[1]
return (((x_in + 50) ** 2 + (y_in + 15) ** 2) / 4000) - (np.cos(x_in / 4) * np.cos(y_in / 4)) + 1
def initialize(mu_in, boundary_in):
x_new_generation = np.random.uniform(size=mu_in)
x_new_generation = x_new_generation * (boundary_in[1] - boundary_in[0]) + boundary_in[0]
y_new_generation = np.random.uniform(size=mu_in)
y_new_generation = y_new_generation * (boundary_in[3] - boundary_in[2]) + boundary_in[2]
new_gen = np.array([x_new_generation, y_new_generation])
return new_gen.T
def operate(gen_in, mu_in, lamb_da_in, boundary_in):
lambda_gen = crossover_UNDX(gen_in, mu_in, lamb_da_in)
lambda_gen = mutation_normal(lambda_gen)
return reflect_fix(lambda_gen, boundary_in)
def nominate(gen_in, lambda_gen_in):
cand = np.concatenate((gen_in, lambda_gen_in))
return cand
def evaluate(cand_in):
fit = []
for i in cand_in:
f = objective_function(i)
fit.append(f)
return np.array(fit)
def select(cand_in, fit_in, mu_in):
ind = np.argpartition(fit_in, -1 * mu_in)[-1 * mu_in:]
new_gen = []
for i in ind:
new_gen.append(cand_in[i])
return np.array(new_gen)
| [
"geemguang@gmail.com"
] | geemguang@gmail.com |
07f5b6e825f41b3d2981885837b11dd11464e4c4 | 5cf9fb9362559a69a3feb2e572c1089fbfd9dc24 | /setup.py | 11bf68236d6f20392396504839453e5c5e3c99f7 | [
"MIT"
] | permissive | akb89/nonce2vec | 5f42943271a0054caa645d91c75e0f9cf6eacefe | 23d3852904eb337d7ca24ea519463ee9ffa50fa5 | refs/heads/master | 2021-06-21T23:17:42.035144 | 2019-07-29T11:53:25 | 2019-07-29T11:53:25 | 129,858,554 | 4 | 1 | MIT | 2019-07-29T11:53:26 | 2018-04-17T06:42:39 | Python | UTF-8 | Python | false | false | 2,006 | py | #!/usr/bin/env python3
"""nonce2vec setup.py.
This file details modalities for packaging the nonce2vec application.
"""
from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='nonce2vec',
description='A python module to generate word embeddings from tiny data',
author=' Alexandre Kabbach and Aurélie Herbelot',
author_email='akb@3azouz.net',
long_description=long_description,
long_description_content_type='text/markdown',
version='2.0.0',
url='https://github.com/minimalparts/nonce2vec',
download_url='https://github.com/minimalparts/nonce2vec/#files',
license='MIT',
keywords=['word2vec', 'word-embeddings', 'incremental-learning'],
platforms=['any'],
packages=['nonce2vec', 'nonce2vec.utils', 'nonce2vec.models',
'nonce2vec.exceptions', 'nonce2vec.logging',
'nonce2vec.resources'],
package_data={'nonce2vec': ['logging/*.yml', 'resources/*']},
include_package_data=True,
entry_points={
'console_scripts': [
'n2v = nonce2vec.main:main'
],
},
install_requires=['pyyaml>=4.2b1', 'gensim==3.4.0', 'numpy==1.15.4',
'scipy==1.2.0'],
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing :: Linguistic'],
zip_safe=False,
)
| [
"akb@3azouz.net"
] | akb@3azouz.net |
da7cf50f5c8f9940b3e8f242219bd6e283dc4926 | 9433617418eab26490bb75e73cdd47317cbb935a | /python/script.py | b26f778319248cc21da82ba34ba0c56b0bc366bd | [] | no_license | cdufour/tsrs21-scripting | 3033d8413a8ca313986e612b73ef3e7b931c61b8 | 50b821176d7346001f49854791b6d47c090833c8 | refs/heads/master | 2022-04-18T10:16:47.048124 | 2020-04-20T07:50:53 | 2020-04-20T07:50:53 | 255,838,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # script python
# firewall est une variable de type str
firewall = "192.168.0.17"
nom = "Chris" # type str
age = 120 # type int
tva = 5.5 # type float
contaminé = False # type bool
# Affichage
print(nom) # affiche le contenu de la variable
print("Formation Scripting") # affiche la chaîne de caractères
# Récupérer des saisies utilisateur
# saisie = input() # exemple: blabla
# Attention, la fonction input renvoie toujours un str
# il faut convertir la valeur en int si l'on souhaite faire des calculs
# avec la valeur saisie
saisie = int(input()) # conversion en int de la chaîne saisie
print("Valeur saisie: ", saisie) # affichage de la valeur saisie => blabla
print(type(saisie))
| [
"opusidea@gmail.com"
] | opusidea@gmail.com |
c6c6ddfa1a690619e1526367e462d7a01825013f | 165478aa697ba81d7adc1dc3b081fc97ffafb723 | /scripts/bdt_looper/xgboost/python/others/plot_prune.py | b9640c0234a648de143dd8419517f6f39fdcb3e6 | [] | no_license | zhangzc11/WVZLooper | d55c24127a65a36bd4a0ac25a8c53c007b5a71a1 | 4b2eb46392c228672d5a2db30539b49aeb58cd1c | refs/heads/readBDTNtuple | 2020-05-20T16:28:18.838367 | 2019-09-27T19:19:57 | 2019-09-27T19:19:57 | 185,660,975 | 0 | 0 | null | 2019-05-08T18:39:19 | 2019-05-08T18:39:19 | null | UTF-8 | Python | false | false | 681 | py | import numpy as np
import matplotlib.pyplot as plt
import os
test_name = 'xgb_wwz_vs_ttz_nbAll_full'
plotDir = "/home/users/zhicaiz/public_html/WWZ/BDT/"
name = []
AUC = []
with open("result_prune_ttZ.txt") as f:
lines = f.readlines()
for line in lines:
line_items = line.strip('\n').split()
name.append(line_items[0])
AUC.append(float(line_items[1]))
plt.figure()
plt.plot(name, AUC, lw=2)
plt.xticks(rotation=90)
plt.xlabel('cumulative removed features (left to right)')
plt.ylabel('AUC after removal')
plt.savefig(plotDir+'training/AUC_vs_removed_features_'+test_name+'.png', bbox_inches='tight')
os.system("chmod 755 "+plotDir+"training/*")
| [
"zzhang2@caltech.edu"
] | zzhang2@caltech.edu |
1fb6a3c298864d61c8bcb5c823457898adc4494b | 3e917645a0e1375189c8ee8c1e93ed15348111ef | /projects/synthesis/intensification/archive/intensification_w_napplication/intensification_n_application.py | 7c3748d0774424a4b983cf49852706984812818c | [] | no_license | mbougie/gibbs | d4544e688ce2b63530535e1f5102328aece30e0d | 39d5dc0866fc0dd149d0cf1f22bfd20911a9d29e | refs/heads/master | 2021-01-12T06:59:27.214123 | 2020-01-07T15:48:12 | 2020-01-07T15:48:12 | 83,906,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import arcpy, os
from arcpy import env
from arcpy.sa import *
import glob
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
env.workspace = 'D:\\projects\\usxp\\deliverables\\maps\\synthesis\\intensification\\eric\\n_application\\n_application.gdb'
scene_list = list(range(1, 6))
years_list = list(range(2007, 2011))
# years_list = [2007]
print years_list
for scene in scene_list:
print 'scene', scene
processed_list = []
print 'processed_list', processed_list
for year in years_list:
print 'year', year
raster_list = glob.glob('D:\\projects\\usxp\\deliverables\\maps\\synthesis\\intensification\\eric\\n_application\\Scen{}\\*_{}.tif'.format(str(scene), str(year)))
print 'raster_list', raster_list
# Execute CellStatistics
processed_list.append(CellStatistics(raster_list, "SUM", "DATA"))
raster_list = None
raster_mean = CellStatistics(processed_list, "MEAN", "DATA")
del processed_list[:]
# Save the output
raster_mean.save("Napplication2007_2016mean_Scen{}".format(str(scene)))
raster_mean = None
| [
"mbougie@wisc.edu"
] | mbougie@wisc.edu |
f240c30cb5172ea541bcbfd8c0f9809f51eb3e65 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/problems/993.cousins-in-binary-tree.py | 02241b00093f60a537071aa39a894753f03a8bcc | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | #
# @lc app=leetcode id=993 lang=python3
#
# [993] Cousins in Binary Tree
#
# https://leetcode.com/problems/cousins-in-binary-tree/description/
#
# algorithms
# Easy (51.80%)
# Total Accepted: 31.2K
# Total Submissions: 60.2K
# Testcase Example: '[1,2,3,4]\n4\n3'
#
# In a binary tree, the root node is at depth 0, and children of each depth k
# node are at depth k+1.
#
# Two nodes of a binary tree are cousins if they have the same depth, but have
# different parents.
#
# We are given the root of a binary tree with unique values, and the values x
# and y of two different nodes in the tree.
#
# Return true if and only if the nodes corresponding to the values x and y are
# cousins.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4], x = 4, y = 3
# Output: false
#
#
#
# Example 2:
#
#
#
# Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
# Output: true
#
#
#
# Example 3:
#
#
#
#
# Input: root = [1,2,3,null,4], x = 2, y = 3
# Output: false
#
#
#
#
#
# Note:
#
#
# The number of nodes in the tree will be between 2 and 100.
# Each node has a unique integer value from 1 to 100.
#
#
#
#
#
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
d36c884945479b6457878265527fd5a0c4ee9cf7 | c833c6e9c1e1a4c387763cac2cc75079341e3122 | /ua/univer/lesson04/matrix7.py | 595c9937287e1d3726564633d5bb16e4345a2be3 | [] | no_license | mmveres/python05_12_2020 | 1513aa97d577f4921655ce0e58f28811df2bd14e | d21e6d3ecd90bdc3bd2a9b780bb58b65deb671f1 | refs/heads/master | 2023-03-02T16:00:26.036232 | 2021-02-13T17:58:13 | 2021-02-13T17:58:13 | 318,794,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | # 7. Характеристикой строки целочисленной матрицы
# назовем сумму ее положительных элементов.
# Переставляя строки заданной матрицы,
# расположить их в соответствии с ростом характеристик
def sort_matrix(matrix):
sum_row_list = []
for row in matrix:
sum = 0
for cell in row:
sum+=cell
sum_row_list.append(sum)
print(sum_row_list)
for j in range(len(sum_row_list)-1):
for i in range(len(sum_row_list)-1-j):
if sum_row_list[i]>sum_row_list[i+1]:
temp = sum_row_list[i]
temp_row = matrix[i]
sum_row_list[i] = sum_row_list[i+1]
matrix[i] = matrix[i+1]
sum_row_list[i+1] =temp
matrix[i+1] = temp_row
print(sum_row_list)
if __name__ == '__main__':
matrix=[[1,2,3,4],
[2,2,7,7],
[1,1,1,1]
]
matrix[1]
sort_matrix(matrix)
print(matrix) | [
"mmveres@gmail.com"
] | mmveres@gmail.com |
20f55a20d271b66ded1ca1334804084325a6f804 | 7451cdc3c0a82fbca55268e2456541ca527899ff | /bioutils.py | 693c817d3af32b0376bd038094ad58b83cc8b4aa | [] | no_license | sefakilic/genome_parsing | e59bd5893f9f81f358385e18bd81dcd072d49207 | abc2a55e8f443dc629b2da9316d0bed58911068b | refs/heads/master | 2021-01-01T19:01:16.575138 | 2013-01-08T21:12:55 | 2013-01-08T21:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | from Bio import Entrez
from Bio import SeqIO
Entrez.email = "sefa1@umbc.edu"
def read_genbank(genome_accession_no, genbank_file=None):
"""Read genbank file. If the file is not given, based on the
genome_accession_no, grab it from NCBI and parse it. Return Sequence Record
object."""
if genbank_file:
print "reading genbank file %s" % genbank_file
seq_record = SeqIO.read(genbank_file, "genbank")
else:
print "downloading and parsing genbank file for %s" % genome_accession_no
handle = Entrez.efetch(db="nucleotide", rettype="gb",
retmode="text", id=genome_accession_no)
seq_record = SeqIO.read(handle, "gb")
handle.close()
return seq_record
def extract_genes(seq_record):
"""Given BioPython SeqRecord object as argument, return the list of all
genes where each gene is a SeqFeature object)"""
return [f for f in seq_record.features if f.type == "gene"]
def extract_cds(seq_record):
"""Given BioPython SeqRecord object as argument, return the list of all
coding sequences where each one is a SeqFeature object"""
return [f for f in seq_record.features if f.type == "CDS"]
def reverse_complement(seq):
return Seq(seq).reverse_complement().tostring()
def split_len(seq, length):
"""Given a string, returns a list containing _length_ sized pieces of the seq. For
example, split_len('abcdefgh', 3) = ['abc', 'def', 'gh']"""
return [seq[i:i+length] for i in range(0, len(seq), length)]
| [
"sefakilic@gmail.com"
] | sefakilic@gmail.com |
a4b9d47b216c8c2e086a0b545b18c0341dd97400 | 7970601ede43b4a35eb38aa4f04f55b20148af63 | /migrations/schemas/app_schema.py | 9026f0d5a5bec7659b38f032fae9a7946dc73cd8 | [
"Apache-2.0"
] | permissive | pythononwheels/copow | 2d4423b12a8be4a13a06c4a7c55b9f2bb6bda542 | 49260b064223838962eb8cff4364580b3beb0067 | refs/heads/master | 2020-04-01T22:49:05.366052 | 2014-08-12T16:55:58 | 2014-08-12T16:55:58 | 20,783,739 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | #
#
# schema for the app model
# as an example there are already some attributes filled in.
# Generated: 2013/07/06 22:29:03
#
app = {
"name" : { "type" : "string", "default" : "#APPNAME" },
"path" : { "type" : "string" },
"lastversion" : { "type" : "integer" },
"currentversion" : { "type" : "integer" },
"maxversion" : { "type" : "integer" },
"last_updated" : { "type" : "date" },
"_id" : { "type" : "objectid" }
}
app_relations = {
#"comments" : "has_many"
} | [
"khz@tzi.org"
] | khz@tzi.org |
7bd99954c1fecb313a53baa279142700dd5f728f | eba3e4a3935d6422d1ed85aaf69337f5ba15fc74 | /pylons/tests/test_units/test_decorator_https.py | 2275f5de94535a1ec3ecf161f07288940831d9e5 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | arianepaola/tg2jython | 2ae74250ca43b021323ef0951a9763712c2eb3d6 | 971b9c3eb8ca941d1797bb4b458f275bdca5a2cb | refs/heads/master | 2021-01-21T12:07:48.815690 | 2009-03-27T02:38:11 | 2009-03-27T02:38:11 | 160,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | from paste.fixture import TestApp
from paste.registry import RegistryManager
from routes.middleware import RoutesMiddleware
from pylons.decorators.secure import https
from pylons.controllers import WSGIController
from pylons.testutil import ControllerWrap, SetupCacheGlobal
from __init__ import TestWSGIController
class HttpsController(WSGIController):
def index(self):
return 'index page'
index = https('/pylons')(index)
def login(self):
return 'login page'
login = https(controller='auth', action='login')(login)
def get(self):
return 'get page'
get = https()(get)
class TestHttpsDecorator(TestWSGIController):
def setUp(self):
TestWSGIController.setUp(self)
from routes import Mapper
map = Mapper()
map.connect('/:action')
map.connect('/:action/:id')
map.connect('/:controller/:action/:id')
map.connect('/:controller/:action')
app = ControllerWrap(HttpsController)
app = SetupCacheGlobal(app, self.environ, setup_cache=False)
app = RoutesMiddleware(app, map)
app = RegistryManager(app)
self.app = TestApp(app)
def test_https_explicit_path(self):
self.environ['pylons.routes_dict']['action'] = 'index'
response = self.app.get('/index', status=302)
assert response.header_dict.get('location') == \
'https://localhost/pylons'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/index', status=200)
assert 'location' not in response.header_dict
assert 'index page' in response
def test_https_disallows_post(self):
self.environ['pylons.routes_dict']['action'] = 'index'
response = self.app.post('/index', status=405)
def test_https_url_for_kwargs(self):
self.environ['pylons.routes_dict']['action'] = 'login'
response = self.app.get('/login', status=302)
assert response.header_dict.get('location') == \
'https://localhost/auth/login'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/login', status=200)
assert 'location' not in response.header_dict
assert 'login page' in response
def test_https_redirect_to_self(self):
self.environ['pylons.routes_dict']['action'] = 'get'
response = self.app.get('/get', status=302)
assert response.header_dict.get('location') == \
'https://localhost/get'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/get', status=200)
assert 'location' not in response.header_dict
assert 'get page' in response
| [
"ariane@venus.(none)"
] | ariane@venus.(none) |
b52ec39cb80d05dec46a86f5a6d0b39823d6ce17 | 5eb9c3473902c20eac6401ec234d0ec496b19b90 | /tests/test_utils.py | c04b49a12b01d7c30beaf52edfbc41b51a4d380f | [
"ISC"
] | permissive | associatedpress/datakit-project | 236650f61621a6403581d86664d403a096acd0d6 | 6906a3da3a957cfdc3c798a47dcd9e9cf5166f8f | refs/heads/main | 2023-01-15T13:03:42.766693 | 2022-12-06T14:57:02 | 2022-12-06T14:57:02 | 80,869,394 | 25 | 4 | ISC | 2022-12-26T19:45:41 | 2017-02-03T21:08:31 | Python | UTF-8 | Python | false | false | 1,103 | py | import os
import cookiecutter.config as cc_config
import pytest
from datakit_project.utils import resolve_repo_dir
# TODO: Update resolve_repo_dir to use cookiecutter DEFAULT_CONFIG
# then monkeypatch the variable here
def test_repo_dir_for_local_repo():
"""
Should be fully-qualified path to local directory
"""
local_dir = '/Local/path/to/fake-repo'
actual_dir = resolve_repo_dir(local_dir)
assert local_dir == actual_dir
def test_repo_dir_for_alias():
"""
Should be path inside of cookiecutter's dir.
"""
cc_home = cc_config.DEFAULT_CONFIG['cookiecutters_dir']
expected_dir = os.path.join(cc_home, 'fake-repo')
actual_dir = resolve_repo_dir('gh:associatedpress/fake-repo')
assert expected_dir == actual_dir
def test_repo_dir_for_url():
"""
Should be path inside of cookiecutter's dir.
"""
cc_home = cc_config.DEFAULT_CONFIG['cookiecutters_dir']
expected_dir = os.path.join(cc_home, 'fake-repo')
actual_dir = resolve_repo_dir('https://github.com/associatedpress/fake-repo.git')
assert expected_dir == actual_dir
| [
"zstumgoren@gmail.com"
] | zstumgoren@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.