blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
898d3d53a9099c0e07097c9a03539f243ab5c965 | d1377b1246fe0b8bab1f71a4f6ff247299c72b91 | /ginortS.py | 8448b515518224f0b1c1d55cc704d0642f3ee1fe | [] | no_license | m-bansal/build_up | 7a7c16e4fe695280b1d8d0271ef9f5cbe87242f6 | 5dc1bbc48fd3adbbeb1ae51429a9e725a6a08ecf | refs/heads/master | 2022-12-24T19:58:21.792903 | 2020-10-08T09:43:48 | 2020-10-08T09:43:48 | 276,450,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | l,u,o,e=[],[],[],[]
for i in sorted(input()):
if i.isalpha():
x = u if i.isupper() else l
else:
x = o if int(i)%2 else e
x.append(i)
print("".join(l+u+o+e))
| [
"noreply@github.com"
] | m-bansal.noreply@github.com |
4f72ee67e49904ac8a1776ec39f2e3ca2e040f48 | f9b2b55e919a2ddf2a3d2c3bc72619a36f60f645 | /leetcode/insertion_sort.py | 01c5c1a594d02e1c00410be184f8a04911c0e495 | [] | no_license | gwuah/ds-algorithms | ff47adf377096316bfd50bd8e0625d7e88b29e27 | 657b20a648525a7328297faea6774110f2eb6ad5 | refs/heads/master | 2022-04-25T05:13:55.511328 | 2020-04-25T14:54:20 | 2020-04-25T14:54:20 | 228,055,544 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | # def insertion_sort(nums):
# length = len(nums)
# for i in range(length):
# for j in range(i):
# element_at_i = nums[i]
# element_at_j = nums[j]
# if element_at_i < element_at_j:
# nums[i] = element_at_j
# nums[j] = element_at_i
# return nums
def insertion_sort(array):
length = len(array)
current_element = 0
for i in range(1, length):
current_element = array[i]
for j in range(i - 1, -1, -1):
# print(current_element, "current_element")
number_at_index_j = array[j]
# print(number_at_index_j, "naij")
print("comparing {} and {}".format(current_element, number_at_index_j))
if current_element < array[j]:
array[j] = current_element
array[i] = number_at_index_j
print(array)
print("-------------------")
return array
print(insertion_sort([10, 9, 8]))
| [
"griffithawuah15@gmail.com"
] | griffithawuah15@gmail.com |
7ef4a109667ebf6592fdaee6d46b9b761ad1caf6 | 166b7f1047838a0fc50667adafc4cce239d2bac7 | /4.py | 2a2a005458bb7627a84b7152d914f35b2359b678 | [] | no_license | RuwanRanaweera/python_2 | c0482744b0ddcc9eeae9fe1974fbbef019009fbd | c452b25347f776cfb166747fdcb995e233f5f169 | refs/heads/master | 2023-02-28T11:38:15.023565 | 2021-02-08T10:01:00 | 2021-02-08T10:01:00 | 337,030,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | var = 10
while var> 0:
print('Current Number : ',var)
var = var-1
if var==5:
break
| [
"rwnranaweera@gmail.com"
] | rwnranaweera@gmail.com |
adecfa8e509cf00629988a9d6177bebdf22800ec | e46a62faa757340a05feb1eaa1dd31b2c010d0d5 | /Scripts/scram/plotObsStatus.py | 46dccf2409a001836293287022baab4b07e64b30 | [] | no_license | miao0305/alfaburst-survey | 59ca42ade467c9fc153c99faca551fbe237a121c | e1c571197f2bc13baaf419f6afa463ce67de90e0 | refs/heads/master | 2022-01-09T23:25:52.011105 | 2018-05-17T07:50:26 | 2018-05-17T07:50:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,695 | py | #!/usr/bin/env python
"""
Return Arecibo status based on a MJD or Unix timestamp, and HDF5 directory output of buildHDF5.py
"""
import sys,os
import numpy as np
import pandas as pd
import glob
#from astropy.time import Time
from astropy import units
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import pytz
LOCALTZ = 'America/Puerto_Rico' # Arecibo time zone
H5_DIR = '/home/griffin/data/serendip6/h5/'
def printPointing(pointingSeries, raType='str', decType='str'):
print '(RA: %s, DEC: %s)'%(raType, decType)
#for key, val in pointingSeries.iteritems():
for beamId in range(7):
ra = Angle(pointingSeries['RA%i'%beamId], unit=units.hour)
dec = Angle(pointingSeries['DEC%i'%beamId], unit=units.deg)
pnt = SkyCoord(ra=ra, dec=dec)
pntStr = 'Beam %i: '%beamId
if raType=='str':
pntStr += 'RA: %s '%ra.to_string(sep=':')
elif raType=='hours':
pntStr += 'RA: %f '%ra.hour
elif raType=='deg':
pntStr += 'RA: %f '%ra.deg
elif raType=='rad':
pntStr += 'RA: %f '%ra.radian
if decType=='str':
pntStr += 'DEC: %s '%dec.to_string(sep=':')
elif decType=='deg':
pntStr += 'DEC: %f '%dec.deg
elif decType=='rad':
pntStr += 'DEC: %f '%dec.radian
pntStr += '(l=%f, b=%f)'%(pnt.galactic.l.deg, pnt.galactic.b.deg)
print pntStr
if __name__ == '__main__':
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] MJD/Unix time')
o.set_description(__doc__)
o.add_option('-d', '--h5dir', dest='h5dir', default=H5_DIR,
help='Directory which contains HDF5-based pandas dataframes, default: %s'%H5_DIR)
o.add_option('-u', '--unix', dest='unix', action='store_true',
help='Interpret time as a Unix timestamp')
o.add_option('--ra', dest='ra', default='str',
help='RA print mode: \'str\': HH:MM:SS.SSSS, \'hours\': decimal hours, \'deg\': decimal degrees, \'rad\': decimal radians, default: str')
o.add_option('--dec', dest='dec', default='str',
help='RA print mode: \'str\': DD:MM:SS.SSSS, \'deg\': decimal degrees, \'rad\': decimal radians, default: str')
o.add_option('-w', '--window', dest='window', default=60, type='int',
help='Window in seconds to plot around event')
o.add_option('-S', '--savefig', dest='savefig', default=None,
help='Save figure to filename')
opts, args = o.parse_args(sys.argv[1:])
if opts.unix:
unixTime = int(args[0])
jd = (float(unixTime) / 86400.) + 2440587.5
mjd = jd - 2400000.5
else:
# convert to unix time
#mjd = Time(float(args[0]), format='mjd')
#print 'UNIX TIME:', mjd.unix
#unixTime = int(mjd.unix)
# simple conversion, should be equivalent to astropy.time conversion as leap seconds are not important when converting between MJD and unix time
mjd = float(args[0])
jd = mjd + 2400000.5
unixTime = int((jd - 2440587.5) * 86400)
print 'UNIX TIME:', unixTime
print 'MJD:', mjd
utc = pytz.utc.localize(datetime.datetime.fromtimestamp(int(unixTime)))
print 'UTC:', utc.strftime('%Y-%m-%d %H:%M:%S')
local = utc.astimezone(pytz.timezone(LOCALTZ))
print 'ARECIBO:', local.strftime('%Y-%m-%d %H:%M:%S')
# find the correct file ID based on unixtime
derH5files = glob.glob(opts.h5dir + '*derived.h5')
fnPrefix = None
for h5fn in derH5files:
baseH5fn = os.path.basename(h5fn)
fnUnixTime = int(baseH5fn.split('.')[1])
timeDiff = fnUnixTime - unixTime
if timeDiff > 0 and timeDiff < 86400: # each file contains at most 24*60*60 seconds
print 'Timestamp in', h5fn
fnPrefix = baseH5fn.split('derived')[0]
break
if fnPrefix is None:
print 'No file found which contains this Unix timestamp'
exit(1)
print '\nUnix Time:', unixTime
sns.set(style="ticks", context="talk")
plt.style.use("dark_background")
fig = plt.figure(figsize=(16,12)) # (width, height)
validH5fn = opts.h5dir + fnPrefix + 'derived.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
derivedDf = df.loc[unixTime-opts.window:unixTime+opts.window]
# RAx stored as decimal Hours
# Decx stored as decimal degrees
# Plot: RA vs Dec
plt.subplot(3,3,1)
for bid in range(7): plt.plot(derivedDf['RA%i'%bid], derivedDf['DEC%i'%bid], label='Beam%i'%bid)
plt.axvline(df.loc[unixTime]['RA0'], color='w', ls='--')
plt.xlabel('RA')
plt.ylabel('DEC')
plt.legend(fontsize='x-small')
# Plot: RA vs time
plt.subplot(3,3,2)
for bid in range(7): plt.plot(derivedDf.index, derivedDf['RA%i'%bid], label='Beam%i'%bid)
plt.axvline(unixTime, color='w', ls='--')
plt.ylabel('RA')
plt.xlabel('Unix Time')
plt.legend(fontsize='x-small')
plt.xlim(derivedDf.index[0], derivedDf.index[-1])
# Plot: DEC vs time
plt.subplot(3,3,3)
for bid in range(7): plt.plot(derivedDf.index, derivedDf['DEC%i'%bid], label='Beam%i'%bid)
plt.axvline(unixTime, color='w', ls='--')
plt.ylabel('DEC')
plt.xlabel('Unix Time')
plt.legend(fontsize='x-small')
plt.xlim(derivedDf.index[0], derivedDf.index[-1])
validH5fn = opts.h5dir + fnPrefix + 'if1.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
if1Df = df.loc[unixTime-opts.window:unixTime+opts.window]
#print if1Df
else:
print 'WARN: no IF1 HDF5 file %s found, skipping'%validH5fn
validH5fn = opts.h5dir + fnPrefix + 'if2.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
if2Df = df.loc[unixTime-opts.window:unixTime+opts.window]
#print if2Df
# Plot: Synth vs time
plt.subplot(3,3,4)
plt.plot(if1Df.index, if1Df['IF1SYNHZ']/1e6, label='IF1SYNHZ')
plt.plot(if1Df.index, if1Df['IF1RFFRQ']/1e6, label='IF1RFFRQ')
plt.plot(if2Df.index, if2Df['IF2SYNHZ']/1e6, label='IF2SYNHZ')
plt.axvline(unixTime, color='w', ls='--')
plt.ylabel('Freq. (MHz)')
plt.xlabel('Unix Time')
plt.legend(fontsize='x-small')
plt.xlim(if1Df.index[0], if1Df.index[-1])
# Plot: ALFA IF status
plt.subplot(3,3,5)
plt.plot(if1Df.index, if1Df['IF1ALFFB'], label='IF1ALFFB')
plt.plot(if2Df.index, if2Df['IF2ALFON'], label='IF2ALFON')
plt.axvline(unixTime, color='w', ls='--')
plt.ylabel('Status')
plt.xlabel('Unix Time')
plt.legend(fontsize='x-small')
plt.xlim(if1Df.index[0], if1Df.index[-1])
plt.ylim(-0.1, 1.1)
else:
print 'WARN: no IF2 HDF5 file %s found, skipping'%validH5fn
validH5fn = opts.h5dir + fnPrefix + 'pnt.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
pntDf = df.loc[unixTime-opts.window:unixTime+opts.window]
#print pntDf
else:
print 'WARN: no PNT HDF5 file %s found, skipping'%validH5fn
validH5fn = opts.h5dir + fnPrefix + 'tt.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
ttDf = df.loc[unixTime-opts.window:unixTime+opts.window]
#print ttDf
# Plot: Encoding vs time
plt.subplot(3,3,6)
plt.plot(ttDf.index, ttDf['TTTURENC'], label='TTTURENC')
plt.axvline(unixTime, color='w', ls='--')
plt.xlabel('Unix Time')
plt.ylabel('Encoding Index')
plt.legend(fontsize='x-small')
plt.xlim(ttDf.index[0], ttDf.index[-1])
# Plot: Turret angle vs time
plt.subplot(3,3,7)
plt.plot(ttDf.index, ttDf['TTTURDEG'], label='TTTURDEG')
plt.axvline(unixTime, color='w', ls='--')
plt.xlabel('Unix Time')
plt.ylabel('Turret Angle')
plt.legend(fontsize='x-small')
plt.xlim(ttDf.index[0], ttDf.index[-1])
else:
print 'WARN: no TT HDF5 file %s found, skipping'%validH5fn
validH5fn = opts.h5dir + fnPrefix + 'agc.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
agcDf = df.loc[unixTime-opts.window:unixTime+opts.window]
#print agcDf
else:
print 'WARN: no AGC HDF5 file %s found, skipping'%validH5fn
validH5fn = opts.h5dir + fnPrefix + 'alfashm.h5'
if os.path.exists(validH5fn):
df = pd.read_hdf(validH5fn)
alfashmDf = df.loc[unixTime-opts.window:unixTime+opts.window]
#print alfashmDf
# Plot: Encoding vs time
plt.subplot(3,3,8)
plt.plot(alfashmDf.index, alfashmDf['ALFBIAS1'], label='ALFBIAS1')
plt.plot(alfashmDf.index, alfashmDf['ALFBIAS2'], label='ALFBIAS2')
plt.axvline(unixTime, color='w', ls='--')
plt.xlabel('Unix Time')
plt.ylabel('Bias')
plt.xlim(alfashmDf.index[0], alfashmDf.index[-1])
plt.legend(fontsize='x-small')
# Plot: Turret angle vs time
plt.subplot(3,3,9)
plt.plot(alfashmDf.index, alfashmDf['ALFMOPOS'], label='ALFMOPOS')
plt.axvline(unixTime, color='w', ls='--')
plt.xlabel('Unix Time')
plt.ylabel('ALFMOPOS')
plt.xlim(alfashmDf.index[0], alfashmDf.index[-1])
plt.legend(fontsize='x-small')
else:
print 'WARN: no ALFASHM HDF5 file %s found, skipping'%validH5fn
plt.tight_layout()
if opts.savefig: plt.savefig(opts.savefig)
plt.show()
| [
"griffin.foster@gmail.com"
] | griffin.foster@gmail.com |
1477a124a0ad09140985594aa29e424ca3c7a864 | a9195a0272b215f1495f596cd856514a9948d487 | /src/clustering_analysis.py | 09d60eb9801739bfae693acdc0c47f2d522f89ec | [] | no_license | tonca/PWguessing | f8b143e564e521b243bfc757bb79cedbadfdda9e | e605f8436bc38daf84c0a002bbf6abbfaad44fc1 | refs/heads/master | 2021-01-01T19:48:18.914976 | 2017-07-28T21:43:46 | 2017-07-28T21:43:46 | 98,690,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas as pd
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.cluster import KMeans
from sklearn.cluster import dbscan
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import AffinityPropagation
import distance_functions as dist
import precompute_distances
print("helo")
def read_list() :
fname = "used";
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
def compute_mat(X,measurer):
return pairwise_distances(X, metric=measurer.mixed_metric)
if __name__ == "__main__":
list_used = read_list()
df = pd.read_csv(list_used[0], delimiter=':', names=['user','password'])
print(df.describe())
data = df['password'][:]
set_size = 500
X = np.random.choice(len(data), set_size).reshape(-1, 1)
n_clusters = 10
measurer = dist.distance_measurer(data)
similarity_mat = compute_mat(X,measurer)
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='complete', affinity='precomputed')
model.fit(similarity_mat)
print model.labels_
for cluster_id in range(0, n_clusters) :
print "--------------------------------------"
cluster_elems = np.where(model.labels_ == cluster_id)
cluster = X[cluster_elems].reshape(-1)
for el in data[cluster] :
print "%s : %s : %s" % (el, dist.mask_string(el), dist.flatten_string(el)) | [
"istsamtonca@gmail.com"
] | istsamtonca@gmail.com |
7790803d582e5e4aa27c50140cc07eda2cc21f14 | d49a4e86138f1d4089dc75507efe19d718c60dc7 | /multiple_inheritance.py | b9daf90f290ca237669f097638e27e7a2d00ff11 | [] | no_license | udaykumarbhanu/algo | 5580075bc522df86ab94e93180e7762d596238a7 | c1ff336749ac66234756549ab9b05dfc6e0f9470 | refs/heads/master | 2021-01-10T02:04:06.075645 | 2017-05-25T05:55:37 | 2017-05-25T05:55:37 | 43,909,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class A(object):
def __init__(self):
pass
def print_hello(self):
print "Hello from class A"
class B(object):
def __init__(self):
pass
def print_hello(self):
print "Hello from class B"
class B2(B):
def __init__(self):
pass
def print_hello(self):
print "Hello from class B2"
class C(B2, A):
def __init__(self):
pass
# def print_hello(self):
# print "Hello from class C"
c = C()
c.print_hello()
print C.__mro__
| [
"udaykumarbhanu@gmail.com"
] | udaykumarbhanu@gmail.com |
08ee64a3abfaa80af0362f984d4b169035ad8946 | ca88fc26aa17d26a405d3daf928809ab2c0d196a | /简单/122.py | 16d5699bc45bca3938e6b43213c0a35eaf8a4942 | [] | no_license | Liujiuzhen/leetcod | d65f745a81bdd501d4424002c2e640a261a3a2f4 | 6e2e610f8f4471ccd48377d99a26c120f9cc4d1f | refs/heads/master | 2023-06-16T18:09:21.983731 | 2021-07-11T12:34:13 | 2021-07-11T12:34:13 | 384,936,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | class Solution:
def maxProfit(self, prices):
res=0
for i in range(len(prices)-1):
if prices[i+1]-prices[i]>0:
res=res+prices[i+1]-prices[i]
return res
if __name__ == '__main__':
so=Solution()
lists=[7,1,5,3,6,4]
print(so.maxProfit(lists)) | [
"1574285142@qq.com"
] | 1574285142@qq.com |
c4397dba28cc82e40e2139258eef8020658eee5f | b462e3d4e9145d29f410b32227663150e1c28bc0 | /python/app/wechat_subscriptions/wechat_msg_util.py | 6e020684feb50c078fb6f677d0eb74c128be7a55 | [
"Apache-2.0"
] | permissive | lsieun/web_scraping | 4d29770f90c39798b5dcdbce56849df519aafae2 | ab69c2add7d8cec92d76529039dca7a067d4af9a | refs/heads/master | 2020-03-22T10:38:33.417932 | 2018-07-24T05:49:10 | 2018-07-24T05:49:10 | 139,916,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | import time
from urllib import parse
def current_milliseconds():
timestamp = time.time()
milliseconds = int(round(timestamp * 1000))
return milliseconds
def parse_querystring_dict(url):
parsed_url = parse.urlparse(url)
qs_dict = parse.parse_qs(parsed_url.query)
return qs_dict
def parse_qs(url, key, default=None):
qs_dict = parse_querystring_dict(url)
if qs_dict is None:
return default
value = qs_dict.get(key, default)[0]
return value
def replace(target_str):
replace_list = ["'", "'", """, '"', " ", " ", ">", ">", "<", "<", "&", "&", "¥", "¥"]
index = 0
while index < len(replace_list):
old_str = replace_list[index]
new_str = replace_list[index + 1]
target_str = target_str.replace(old_str, new_str)
index += 2
return target_str;
def get_simple_msg(raw_msg):
title = raw_msg.get("title")
title = replace(title)
content_url = raw_msg.get("content_url")
content_url = replace(content_url)
mid = parse_qs(content_url, "mid", "#")
idx = parse_qs(content_url, "idx", "*")
mid_idx = "{}_{}".format(mid, idx)
return mid_idx, title, content_url
| [
"331505785@qq.com"
] | 331505785@qq.com |
11bca878aa5063c41f11550c4a9cc2585e8ac6f9 | 45320f0f22c21ea55ed843e4aecc0ae37ea1f7d5 | /bootcamp/authentication/migrations/0004_auto_20161105_0236.py | a32c2c1bf037c9c1f510777417b12d6dea2833f0 | [
"MIT"
] | permissive | krishnajaju/ApartmentSocialNetwork | 9cd042516c7e609e565fb08040e52cc1499797c2 | d6d18682d6793e7208d748dd01cb4c3ea5aa4552 | refs/heads/master | 2022-12-11T07:28:10.271629 | 2022-11-22T18:10:59 | 2022-11-22T18:10:59 | 72,892,512 | 0 | 0 | MIT | 2022-11-22T18:11:01 | 2016-11-04T23:20:55 | JavaScript | UTF-8 | Python | false | false | 440 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-11-04 21:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20161105_0219'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='job_type',
new_name='job_title',
),
]
| [
"krishna.jaju24@gmail.com"
] | krishna.jaju24@gmail.com |
31a75b0e1666fc8c547df4149c074212f79b1a22 | 2892083b0ca34d4c0f8ebb959805cc51c3730345 | /TODO_app/migrations/0004_auto_20181118_1615.py | a0fb6404667bf94125980ebdb4e73393459744f8 | [] | no_license | atthipatikumar/TO-DO-TASK | b9eb3b4f3962b9124125e22f4568746dcd02c3aa | 79dcd202581013090e025bede1c90bef6fa76bc3 | refs/heads/master | 2020-04-07T06:01:41.143590 | 2018-11-18T19:44:24 | 2018-11-18T19:44:24 | 158,119,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('TODO_app', '0003_auto_20181118_1542'),
]
operations = [
migrations.AlterField(
model_name='register',
name='register_logo',
field=models.ForeignKey(primary_key=True, unique=True, related_name='logo_text', to='TODO_app.RegisterLogo'),
),
migrations.AlterField(
model_name='register',
name='status',
field=models.CharField(verbose_name='status', max_length=255, default=False),
),
migrations.AlterUniqueTogether(
name='register',
unique_together=set([('task_id', 'register_logo')]),
),
]
| [
"atthipatikumar@outlook.com"
] | atthipatikumar@outlook.com |
35cea6f477b99f6f8a7cc26fbc10717219904709 | 063a236ba52e596d2871abaf43992d57337215b9 | /orkut_communities/wsgi.py | e2d1f0814f19101834235f47c685ebe2d3ef5a72 | [] | no_license | matheusdemicheli/orkut_communities | 2c277412dc929ebb48477ea523667607e405bdd5 | 7e2414e1b2c0791c34f93f95f3c6dfd2148d2eac | refs/heads/master | 2016-09-06T01:03:01.454509 | 2015-12-23T14:24:52 | 2015-12-23T14:24:52 | 29,220,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for orkut_communities project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "orkut_communities.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"matheusdemicheli@gmail.com"
] | matheusdemicheli@gmail.com |
708491272c2a93f6a37acf259817f7eaeb82e257 | 333cb73b0fcbba984f0c23f7056e9670ffd3d599 | /reservations/views.py | 261108a923dc4c3e5e186141859d72952392f473 | [] | no_license | wy0353/airbnb-clone | 4736cfb808b2f6a47991c888380bf26f94b90e6a | a53718e8d3437ee4ac0765c7361b599e9dcda55b | refs/heads/master | 2021-07-21T01:09:44.267252 | 2021-01-31T08:56:24 | 2021-01-31T08:56:24 | 239,310,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | import datetime
from django.http import Http404
from django.shortcuts import render, redirect, reverse
from django.views.generic import View
from django.contrib import messages
from . import models as reservation_models
from rooms import models as room_models
from reviews import forms as review_forms
class CreateError(Exception):
pass
def create(request, room, year, month, day):
try:
date = datetime.datetime(year, month, day)
room = room_models.Room.objects.get(pk=room)
reservation_models.BookedDay.objects.get(date=date, reservation__room=room)
raise CreateError()
except (room_models.Room.DoesNotExist, CreateError):
messages.error(request, "Can't reserve the room.")
return redirect(reverse("core:home"))
except reservation_models.BookedDay.DoesNotExist:
reservation = reservation_models.Reservation.objects.create(
check_in=date,
check_out=date + datetime.timedelta(days=1),
guest=request.user,
room=room,
)
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
class ReservationDetailView(View):
""" Reservation Detail View Definition """
def get(self, *args, **kwargs):
pk = kwargs.get("pk")
reservation = reservation_models.Reservation.objects.get_or_none(pk=pk)
if reservation is None or (
reservation.guest != self.request.user
and reservation.room.host != self.request.user
):
raise Http404()
form = review_forms.ReviewCreateForm()
context = {
"reservation": reservation,
"form": form,
}
return render(self.request, "reservations/detail.html", context=context)
def reservation_update_view(request, pk, verb):
reservation = reservation_models.Reservation.objects.get_or_none(pk=pk)
if reservation is None or (
reservation.guest != request.user
and reservation.room.host != request.user
):
raise Http404()
if verb == "confirm":
reservation.status = reservation_models.Reservation.STATUS_CONFIRMED
elif verb == "cancel":
reservation.status = reservation_models.Reservation.STATUS_CANCELED
reservation_models.BookedDay.objects.filter(reservation=reservation).delete()
reservation.save()
messages.success(request, "Reservation Updated.")
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
| [
"wy0353@gmail.com"
] | wy0353@gmail.com |
d8af869514e85bb7f90c8dcc16396d6126bc46f2 | 32f0b0de3bb2b96d968696ca94754b4a0c3f0be7 | /sikuli/examples/test_helper.sikuli/display_log.py | be5b75a62948b17bde7a30cee505e72f07ccac2a | [
"MIT"
] | permissive | rrmhearts/Integration-Testing-Framework | 5db9be018f6f69022b90eaba6aae224e264890f6 | f477b3cd857d51326f59da3ea41c0761894f419c | refs/heads/master | 2022-10-20T01:30:26.890594 | 2022-10-14T20:16:52 | 2022-10-14T20:16:52 | 37,489,963 | 0 | 1 | MIT | 2022-10-14T20:16:53 | 2015-06-15T20:37:26 | HTML | UTF-8 | Python | false | false | 3,295 | py | from __future__ import with_statement
import glob
import os
import platform
import sys
import webbrowser
import subprocess
try:
if "lin" in platform.platform().lower():
sys.path.insert(0, '/home/vagrant/Integration-Testing-Framework/sikuli/examples/test_and_log')
from yattag import Doc
except ImportError:
subprocess.call("pip install yattag")
from yattag import Doc
#
# Display a log that has been generated by running tests and logging with TestHelper.
#
# Default log folder: /vagrant/log on VM, ./log on HM
pf = platform.platform().lower()
if "win" in pf:
# In sikuli, use getBundlePath(), otherwise use os.path stuff
if "java" in pf:
log_folder = os.path.dirname(getBundlePath()) + "/log"
else:
log_folder = os.path.dirname(os.path.realpath(__file__)) + "/log"
elif "lin" in pf:
log_folder = "/vagrant/log"
else:
print("lulz, wut os r u using?\n\n*Ahem*\n\nMy most sincere apologies, " +
"Sir/Ma'am, but I only support Windows and Linux so far.")
exit(1)
# Make sure the folder exists
if not os.path.exists(log_folder):
print("Folder not found: " + log_folder + "\n")
exit(1)
# Make sure there is one and only one .log file in the folder
glob_result_log = glob.glob(log_folder + "/*.log")
if len(glob_result_log) < 1:
print("No .log file found in folder: " + log_folder + "\n")
exit(1)
elif len(glob_result_log) > 1:
print("Multiple .log files found in folder: " + log_folder + "\n")
exit(1)
else:
log_file = glob_result_log[0]
# Make sure there is one and only one .css file in the folder
glob_result_css = glob.glob(log_folder + "/*.css")
if len(glob_result_css) < 1:
print("No css file found in folder: " + log_folder + "\n")
exit(1)
elif len(glob_result_css) > 1:
print("Multiple css files found in folder: " + log_folder + "\n")
exit(1)
else:
css_file = os.path.basename(glob_result_css[0])
# Build the html log
with open(log_folder + "/log.html", "w") as html_file:
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
with tag("html", lang="en_us"):
with tag("head"):
with tag("title"):
text("Test Results")
doc.stag("link", href=css_file, rel="stylesheet", type="text/css")
with tag("body"):
with tag("table"):
with tag("thead"):
with tag("tr"):
with tag("th"):
text("Time and Date")
with tag("th"):
text("Test name")
with tag("th"):
text("Action")
with tag("th"):
text("Expected")
with tag("th"):
text("Screenshot")
with tag("tbody"):
# Add in the .log file, which should contain table rows
with open(log_file, "r") as f:
doc.asis(f.read())
# Write the html document to the file
html_file.write(doc.getvalue())
# Open a browser tab with the file displayed in it
new = 2 # open in a new tab if possible
url = "file://" + log_folder + "/log.html"
webbrowser.open(url, new=new)
| [
"jess.n.dawson@gmail.com"
] | jess.n.dawson@gmail.com |
107893fb2b15ddc0c6eb0ec97b753cd841c8c0ad | f1227f4fd0ba762e71be08d24b0c2ee9c05e724d | /syncdiff/adapter/adapter_ae.py | 0df4cd9ce335d4936ab981da5011dc2c92ae15c8 | [] | no_license | Lydia5477/sdtcm | 2981a63316545db2aa2abe7f7588f54c04e7feeb | 6ccf93c19f30aec47afe8b6dcbcbac447dd560f5 | refs/heads/master | 2022-07-12T19:03:26.993616 | 2020-02-27T05:48:54 | 2020-02-27T05:48:54 | 243,439,216 | 0 | 0 | null | 2022-06-22T01:17:29 | 2020-02-27T05:32:01 | Python | UTF-8 | Python | false | false | 858 | py | from torch.utils.data import Dataset
import os
import json
import torch
class AESet(Dataset):
def __init__(self, data_folder: str):
super(AESet, self).__init__()
self._data_folder = data_folder
self._input_size = 0
self._data = []
self._load_data()
def _load_data(self):
with open(os.path.join(self._data_folder, "data.json"), "r", encoding="utf-8") as f:
data = json.loads(f.read())
data = [(data["inputs"][i], [t + 1 for t in data["inputs"][i]]) for i in range(len(data["inputs"]))]
if len(data) > 0: self._input_size = len(data[0][0])
self._data.extend(data)
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
item = self._data[idx]
return torch.tensor(item[0]).float(), torch.tensor(item[1])
| [
"Lydiady5477@gmail.com"
] | Lydiady5477@gmail.com |
16c26985278c7fa1cb832ab6e6ecc01fb8ce15cd | f4f1aefc32072b15c573651bca7928490ef78d6b | /codingenv/Scripts/pilprint.py | e390db2a608bccce6db52439171713e7226dc3c1 | [] | no_license | MarcStocker/Billing-Site | f32cabaa5d14b829f94818b81d3706fa695380d6 | dcd23391f153092cd775885515ede1d856614dba | refs/heads/master | 2021-01-11T12:18:39.660902 | 2017-01-24T17:50:19 | 2017-01-24T17:50:19 | 76,473,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | #!c:\dropbox\school\2016-2017_school_year\cins_465_web_programming\assignments\first-website-project\codingenv\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"mstocker.gbit@gmail.com"
] | mstocker.gbit@gmail.com |
72ede8035d37cd5a94867f3819ebc02093493b9d | 956a665a3e4b2d2112e001b6576ed14c8ab9a3d6 | /blog/urls.py | e4ae10194f6c258ace4ec127aede61a0ce112e8e | [] | no_license | UBegimai/python11_blog | bab03066ad3da10ce124e66b13dd8647b157184f | 5ef5e8c8da73433ec25c4b5554d35cb5915d685a | refs/heads/master | 2023-05-02T16:47:48.084709 | 2021-06-10T11:29:20 | 2021-06-10T11:29:20 | 375,338,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from main import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('main.urls')),
path('accounts/', include('account.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"umeibegimai@gmail.com"
] | umeibegimai@gmail.com |
cf781f9f57b15754c829f24e953344d81ac9fd5c | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_attr_set_none-70.py | e1073d079a8e28f1b7e3a1afa8aedb78621abe09 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
$Target = B()
print(a.a)
b.a = 1
b.b = False
print(b.a)
print(b.b)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
8ec2a713ac10633fd0ef75c47bb6352c308d1574 | 6ce7bdfec6c2f27c9be190d53b53a7772ebb65f0 | /venv/Scripts/pip3.6-script.py | 70459eef8f18445bc59dc969216d90213ff0e650 | [] | no_license | nesdown/CowRendering | 50918de022ea225014a04596f8c4b63739b576cf | c8c47c3a607ff4bd0e3c6c353a068daf2510b402 | refs/heads/master | 2020-03-19T16:08:47.571747 | 2018-06-09T12:37:51 | 2018-06-09T12:37:51 | 136,702,584 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!D:\Study\CowRender\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"newsdownloads12@gmail.com"
] | newsdownloads12@gmail.com |
e33a73a2c566351605bb2880bb00de100fb20805 | 485cb6c597463b16f745644ff331cc61c670a545 | /quick_sort.py | f10d8cc25c1c16bcac01741ca64b91b7c1675f84 | [] | no_license | huangyue312/algorithm | aca3e630c92e503899e89090674c241fb2ba77bc | 9333e786a9050cb7d840431a88401665c172f1e9 | refs/heads/master | 2022-07-29T00:57:42.180894 | 2020-05-14T06:13:37 | 2020-05-14T06:13:37 | 263,830,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py |
def quick_sort1(l,start,end):
if start >= end:
return
flag=l[start]
left=start
right=end
while left<right:
while left<right and l[right]>=flag:
right = right - 1
l[left]=l[right]
while left<right and l[left]<=flag:
left = left + 1
l[right] = l[left]
#l[start] = l[left]
l[left]=flag
quick_sort1(l,start,left-1)
quick_sort1(l,left+1,end)
def swap(l,i,j):
t=l[i]
l[i]=l[j]
l[j]=t
return l
def quick_sort2(l,start,end):
if start >= end:
return
flag=l[start]
left=start+1
right=end
done=False
while not done:
while left<=right and l[left]<=flag:
left += 1
while left<=right and l[right]>=flag:
right -= 1
if left>right:
done=True
else:
swap(l,right,left)
swap(l,start,right)
print(l)
quick_sort2(l,start,right-1)
quick_sort2(l,right+1,end)
l=[8,4,6,9,7,2,3,5,64,12,23,54,86,1,2,12]
quick_sort2(l,0,len(l)-1)
print(l) | [
"huangyue312@163.com"
] | huangyue312@163.com |
d66383b7ddda80247fe89a5eff7d18e9c69bcc3b | 33804b6a6c4a476410a819684b734890124841a0 | /tester.py | 2c4bd2ac6423e114f66a0c07e8105c162dbb98c4 | [] | no_license | WycliffeAssociates/8woc2018_reversi | 310a31ee59fbee49ef2fb6ba5a41b5c04ef2c207 | e03f0072085c518ef4ed822c2d9c1aa593f96302 | refs/heads/master | 2022-04-08T10:58:17.640734 | 2020-03-05T14:30:19 | 2020-03-05T14:30:19 | 109,281,289 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | #!/usr/bin/python3
""" Tests an executable for correct reversi states """
# Libs
import argparse
import json
import subprocess
import sys
def main():
""" Tests the given executable for correct reversi states """
args = parse_arguments()
tests = json.load(args.test_file)
results = execute_tests(tests, args.executable)
json.dump(results, args.outfile)
for result in results:
if not result["pass"]:
sys.exit(1)
def parse_arguments():
""" Configures and parses command-line arguments """
argparser = argparse.ArgumentParser(
description=("Test an executable for Reversi plays. Prints "
"result file to outfile. Exits with return code 1 "
"if any tests fail."))
argparser.add_argument("--test-file",
nargs="?",
type=argparse.FileType("r"),
default="tests/tests.json",
help=("Filename of JSON file containing "
"reversi tests, default tests.json"))
argparser.add_argument("--outfile",
nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help=("Filename of JSON test results, "
"default stdout"))
argparser.add_argument("executable",
help="Name of command line to invoke")
return argparser.parse_args()
def execute_tests(tests, executable):
""" Execute all tests """
results = []
for test in tests:
result = execute_test(test, executable)
results.append(result)
return results
def execute_test(test, executable):
""" Execute single test """
result = {}
stdin = json.dumps(test["input"])
if sys.platform == 'win32':
executable = ["python.exe", executable]
stdout = subprocess.check_output(executable,
input=stdin.encode("utf-8"))
output = json.loads(stdout.decode("utf-8"))
if output == test["expected"]:
result["pass"] = True
else:
result["pass"] = False
result["input"] = test["input"]
result["expected"] = test["expected"]
result["actual"] = output
return result
if __name__ == "__main__":
main()
| [
"craig.robert.oliver@gmail.com"
] | craig.robert.oliver@gmail.com |
8273d2ebcfc280a59543693813c5eadf1926658c | fd0411915468c3b3fe16d29bca2747ddd785cf8a | /sketch.py | eb979e6d229e3e6d455e7942d0845058ced2a140 | [
"MIT"
] | permissive | timothydmorton/fpp-old | 31f5f9a0084dd465870298203d253312e01f7379 | 6a2175d4bd9648b61c244c7463148632f36de631 | refs/heads/master | 2021-01-19T02:23:18.360514 | 2016-07-10T21:31:28 | 2016-07-10T21:31:28 | 63,019,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,860 | py | """
A module for visualizing Kepler data.
The idea is to keep it rough.
"""
import atpy
from numpy import *
import glob
import matplotlib.pylab as plt
from matplotlib.pylab import *
from matplotlib import rcParams
from matplotlib.gridspec import GridSpec,GridSpecFromSubplotSpec
from keptoy import *
import keptoy
import qalg
def cdpp():
t = atpy.Table('sqlite','all.db',table='ch1cdpp')
cdppmin = min(t.cdpp12hr)
cdppmax = max(t.cdpp12hr)
nplots = 8
cdpp = logspace(log10(20),log10(200),nplots)
for i in range(nplots):
print cdpp[i]
closest = argsort( abs( t.cdpp12hr - cdpp[i] ))[0]
starcdpp = t.cdpp12hr[closest]
keplerid = t.KEPLERID[closest]
file = glob.glob('archive/data3/privkep/EX/Q3/kplr%09i-*_llc.fits' %
keplerid)
star = atpy.Table(file[0],type='fits')
ax = plt.subplot(nplots,1,i+1)
med = median(star.SAP_FLUX )
ax.plot(star.TIME,(star.SAP_FLUX/med-1),'.',ms=2,
label='KIC-%i, CDPP-12hr %.2f' % (keplerid,starcdpp) )
ax.legend()
def markT(ax,tT,**kwargs):
for t in tT:
ax.axvline(t,**kwargs)
return ax
def inspectT(t0,f0,P,ph,darr=None):
"""
Take a quick look at a transit
"""
size = 150
pad = 0.1 # amount to pad in units of size
cW = 2
linscale = 3
fig = plt.gcf()
f = f0.copy()
t = t0.copy()
f -= f.mean()
t -= t[0]
tbase = t.ptp()
nt = int(ntrans( tbase, P, ph ))
otT = P * (arange(nt) + ph )
print otT,t0[0]
# Plot the time series
nstack = int(ceil( t.ptp() / size))
gs = GridSpec(2,1)
gsStack = GridSpec(nstack, 1)
gsStack.update(hspace=0.001,bottom=.3,left=0.03,right=0.98)
gsT = GridSpec(1, nt)
gsT.update(top=0.28,wspace=0.001,left=0.03,right=0.98)
axStackl = []
for i in range(nstack):
axStackl.append( plt.subplot( gsStack[i]) )
ax = axStackl[i]
offset = size*i
ax.plot(t,f,marker='.',ms=2,lw=0,alpha=.6)
ax.set_xlim(offset-pad*size,offset+(1+pad)*size)
ax.axvline(offset,ls='--',lw=1,label='Padded')
ax.axvline(offset+size,ls='--',lw=1)
ax.annotate('Offset = %i' % offset,xy=(.01,.1),
xycoords='axes fraction')
xa = ax.get_xaxis()
ya = ax.get_yaxis()
rms = std(f)
linthreshy = linscale*rms
ax.set_yscale('symlog',linthreshy=linthreshy)
ax.axhline(linthreshy,color='k',ls=':')
ax.axhline(-linthreshy,color='k',ls=':')
ax = markT(ax,otT,color='red',lw=3,alpha=0.4)
if darr != None:
inT = int(ntrans( tbase, darr['P'], darr['phase'] ))
itT = darr['P']*arange(inT) + darr['phase'] * darr['P'] - t0[0]
ax = markT(ax,itT,color='green',lw=3,alpha=0.4)
if i == 0:
xa.set_ticks_position('top')
ax.legend(loc='upper left')
else:
xa.set_visible(False)
ya.set_visible(False)
tdur = a2tdur(P2a(P))
axTl = []
for i in range(nt):
axTl.append( plt.subplot( gsT[i] ) )
axT = axTl[i]
axT.plot(t,f,'.')
tfit,yfit = lightcurve(tbase=tbase,phase=ph,P=P,df=f)
axT.plot(tfit,yfit-1,color='red')
axT.set_xlim( otT[i] - cW*tdur , otT[i] + cW*tdur )
lims = axT.axis()
tm = ma.masked_outside( t,lims[0],lims[1] )
fm = ma.masked_array(f,mask=tm.mask)
axT.axis( ymax=fm.max() , ymin=fm.min() )
xticklabels = axT.get_xticklabels()
[xtl.set_rotation(30) for xtl in xticklabels]
ya = axT.get_yaxis()
if i != 0:
plt.setp(ya,visible=False)
limarr = array([ ax.axis() for ax in axTl ])
yMi = min(limarr[:,2])
yMa = max(limarr[:,3])
for ax in axTl:
ax.axis(ymax=yMa , ymin=yMi)
def stack(axL,xmin,size,pad=0.1,lfsize='small'):
"""
Given a list of axis, we'll adjust the x limits so we can fit a very long
data string on the computer screen.
"""
nAx = len(axL)
for i in range(nAx):
ax = axL[i]
offset = xmin + i*size
ax.set_xlim(offset-pad*size,offset+(1+pad)*size)
ax.axvline(offset,ls='--',label='Padded')
ax.axvline(offset+size,ls='--')
ax.annotate(r'$\Delta$ T = %i' % (i*size) ,xy=(.01,.1),
xycoords='axes fraction',fontsize=lfsize)
xa = ax.get_xaxis()
ya = ax.get_yaxis()
if i == 0:
xa.set_ticks_position('top')
elif i ==nAx-1:
ax.legend(loc='lower right')
xa.set_visible(False)
ya.set_visible(False)
else:
xa.set_visible(False)
ya.set_visible(False)
def stackold(x,y,size,pad=0.1,axl=None,**kw):
"""
"""
# How many regions
npanel = int(ceil( x.ptp() / size))
gs = GridSpec(npanel,1)
gs.update(hspace=0.001)
for i in range(npanel):
if axl != None:
ax = axl[i]
offset = size*i
ax = plt.subplot( gs[i])
ax.plot(x,y,**kw)
ax.set_xlim(offset-pad*size,offset+(1+pad)*size)
ax.axvline(offset,ls='--',lw=1,label='Padded')
ax.axvline(offset+size,ls='--',lw=1)
ax.annotate('Offset = %i' % offset,xy=(.01,.1),
xycoords='axes fraction')
xa = ax.get_xaxis()
ya = ax.get_yaxis()
if i == 0:
xa.set_ticks_position('top')
ax.legend(loc='upper left')
else:
xa.set_visible(False)
ya.set_visible(False)
if axl != None:
axl[i] = ax
if axl != None:
return axl
from keptoy import lc
import tfind
def DM(dM,P):
plt.clf()
Pcad = int(round(P/lc))
dMW = tfind.XWrap(dM,Pcad,fill_value=nan)
dMW = ma.masked_invalid(dMW)
dMW.fill_value=0
nT = dMW.shape[0]
ncad = dMW.shape[1]
t = arange(ncad)*lc
[plt.plot(t,dMW[i,:]+i*2e-4,aa=False) for i in range(nT)]
dMW = ma.masked_invalid(dMW)
plt.plot(t,dMW.mean(axis=0)*sqrt(nT) - 5e-4,lw=3)
def XWrap(XW,step=1):
"""
Plot XWrap arrays, folded on the right period.
"""
nT = XW.shape[0]
ncad = XW.shape[1]
[plt.plot(XW[i,:]+i*step,aa=False) for i in range(nT)]
def FOM(t0,dM,P,step=None,**kwargs):
"""
Plot the figure of merit
"""
if step is None:
step = np.nanmax(dM.data)
Pcad = int(round(P/lc))
dMW = tfind.XWrap(dM,Pcad,fill_value=np.nan)
dMW = ma.masked_invalid(dMW)
dMW.fill_value=np.nan
res = tfind.ep(t0,dM,Pcad)
fom = res['fom']
color = ['black','red']
ncolor = len(['black','red'])
for i in range(dMW.shape[0]):
x = ma.masked_array(res['epoch'],mask=dMW[i,:].mask).compressed()
y = dMW[i,:].compressed()
plt.plot(x,y+i*step,color=color[mod(i,ncolor)] ,)
plot(res['epoch'],res['fom'] -step )
return dMW
def FOMblock(t0,dM,P,**kwargs):
nt = int(dM.size *keptoy.lc / P)
tpb = 20
nblock = nt / tpb
print nblock
Pcad = int(P / keptoy.lc)
if nblock >2 :
for i in range(nblock):
dMp = dM[Pcad*i*tpb:Pcad*(i+1)*tpb]
print dMp.size
FOM(t0 + i *( P+0.5),dMp,P,**kwargs)
else:
FOM(t0,dM,P,**kwargs)
def window(tRES,tLC):
PcadG = (tRES.PG[0]/keptoy.lc).astype(int)
filled = tfind.isfilled(tLC.t,tLC.f,20)
win = tval.window(filled,PcadG)
plot(PcadG*keptoy.lc,win)
xlabel('Period (days)')
ylabel('Window')
import tval
import copy
def LDT(t,fm,p):
"""
Visualize how the local detrender works
"""
ax = gca()
p1L,idL = tval.LDT(t,fm,p,wd=2)
twd = 2./lc
step = 3
for i in range(len(p1L)):
id = idL[i]
p1 = p1L[i]
trend = keptoy.trend(p1[3:],t[id])
ffit = keptoy.P051T(p1,t[id])
ho = np.mean(t[id])
ho -= step*np.floor(ho/p['P'])
vo = np.mean(fm[id])
plot(t[id]-ho,fm[id]-vo,',')
plot(t[id]-ho,ffit-vo,'r',lw=2)
plot(t[id]-ho,trend-vo,'c',lw=2)
color = rcParams['axes.color_cycle'][mod(i,4)]
def tfit(tsim,tfit):
plot(tset.RES.PG[0],tset.RES.ddd[1]/tset.RES.sss[1],'o')
def eta(tres,KIC):
"""
Plot detection efficency as a function of depth for a given star.
"""
PL = unique(tres.Pblock)
for P in PL:
dfL = unique(tres.df)
fgL = []
efgL = []
for df in dfL:
cut = (tres.KIC == KIC ) & (tres.Pblock == P) & (tres.df == df)
tc = tres.where( cut )
tg = tres.where( cut & tres.bg )
tb = tres.where( cut & ~tres.bg )
nc,ng,nb = tc.data.size,tg.data.size,tb.data.size
print "%s %03d %7.5f %02d %02d %02d" % (KIC,P,df,ng,nb,nc)
fgL.append(1.*ng/nc )
efgL.append(1./sqrt(nc) )
errorbar(dfL,fgL,efgL,label='P-%03d' % P)
xlabel(r'$\Delta F / F$')
ylabel('Detection Efficiency')
title('%d' % KIC)
legend(loc='best')
draw()
def markT(f,p,wd=2):
P = p['P']
epoch = p['epoch']
tdur = p['tdur']
twd = round(tdur/lc)
Pcad = int(round(P/lc))
epochcad = int(round(epoch/lc))
wdcad = int(round(wd/lc))
f0W = tfind.XWrap(f,Pcad,fill_value=np.nan)
### Determine the indecies of the points to fit. ###
ms = np.arange( f0W.shape[0] ) * Pcad + epochcad
# Exclude regions where the convolution returned a nan.
sLDT = [slice(m - wdcad/2 , m+wdcad/2) for m in ms]
return sLDT
def ROC(tres):
"""
"""
assert len(unique(tres.Pblock))==1,'Periods must be the same'
assert len(unique(tres.KIC))==1,'Must compare the same star'
KIC = unique(tres.KIC)[0]
dfL = unique(tres.df)
for df in dfL:
t = tres.where( tres.df == df)
fapL,etaL = qalg.ROC(t)
plot(fapL,etaL,lw=2,label='df = %03d ' % (df) )
x = linspace(0,1,100)
plot(x,x)
legend(loc='best')
title( 'ROC for %i' % KIC )
xlabel('FAP' )
ylabel('Detection Efficiency' )
def hist(tres):
"""
"""
assert len(unique(tres.Pblock))==1,'Periods must be the same'
assert len(unique(tres.KIC))==1,'Must compare the same star'
KIC = unique(tres.KIC)[0]
dfL = unique(tres.df)
fig,axL = subplots(nrows=len(dfL),sharex=True,figsize=( 5, 12))
for df,ax in zip(dfL,axL):
tg = tres.where( (tres.df == df) & tres.bg)
tb = tres.where( (tres.df == df) & ~tres.bg)
ax.hist(tg.os2n,color='green',bins=arange(100),
label='Good %d' % len(tg.data))
ax.hist(tb.os2n,color='red',bins=arange(100),
label='Fail %d' % len(tb.data))
ax.legend()
label = r"""
$\Delta F / F$ = %(df)i ppm
""" % {'df':df}
ax.annotate(label,xy=(.8,.1),xycoords='axes fraction',
bbox=dict(boxstyle="round", fc="w", ec="k"))
xlabel('s2n')
title('%d, %i days' % (KIC,tres.Pblock[0]) )
def simplots(tres):
PL = unique(tres.Pblock)
fcount = 0
for P in PL:
t = tres.where(tres.Pblock == P)
hist(t)
fig = gcf()
fig.savefig('%02d.png' % fcount )
fcount +=1
fig.clf()
for P in PL:
t = tres.where(tres.Pblock == P)
ROC(t)
fig = gcf()
fig.savefig('%02d.png' % fcount )
fcount +=1
fig.clf()
def inspSim():
tLC = atpy.Table('tLC.fits')
tRED = atpy.Table('tRED.fits')
P = np.unique(tRED.Pblock)
dfL = np.unique(tRED.df)
tfail = tRED.where((tRED.df == dfL[1]) & (tRED.Pblock == P[2]) & ~tRED.bg)
LDTfail = []
seeds = []
nl = []
Aseeds = tfail.seed
for seed in Aseeds:
try:
tPAR = tRED.where(tRED.seed == seed)
tRES = atpy.Table('tRES%04d.fits' % seed)
ikwn = argmin(abs( tRES.PG - tPAR.P ))
nT = tRES.nT[0][ikwn]
nl.append(nT)
sketch.inspFail(tPAR,tLC,tRES)
fig = gcf()
fig.savefig('insp%04d.png' % tPAR.seed)
close('all')
except ValueError:
LDTfail.append( tPAR.seed[0] )
def inspVAL(tLC,tRES,*pL):
f = tLC.fdt - tLC.fcbv
t = tLC.t
nrows = 4 + 2*len(pL)
fig = gcf()
fig.clf()
ax0 = fig.add_subplot(nrows,1,1)
ax1 = fig.add_subplot(nrows,1,2,sharex = ax0)
ax0.plot(t,f)
fm = ma.masked_invalid(f)
fm.fill_value=0
dM = tfind.mtd(t,fm.filled(),14)
dM.fill_value = np.nan
dM.mask = fm.mask | ~tfind.isfilled(t,f,14)
ax1.plot(t,dM)
ax2 = fig.add_subplot(nrows,1,3)
ax3 = fig.add_subplot(nrows,1,4,sharex = ax2)
sca(ax2)
periodogram(tRES)
sca(ax3)
pep(tRES)
axL = [ax0,ax1,ax2,ax3]
for i in range(5,nrows+1):
axL.append( fig.add_subplot(nrows,1,i) )
for i in range(len(pL)):
p = pL[i]
ifom = 4+2*i
ildt = 5+2*i
sca( axL[ifom] )
FOM(tLC.t[0],dM,p['P'])
epoch = p['epoch']+np.ceil((tLC.t[0]-p['epoch'])/p['P'])*p['P']
axvline(epoch)
sca( axL[ildt] )
LDT(t,f,p)
plt.subplots_adjust(hspace=0.16)
draw()
def pep(tRES):
"""
Show best epoch as a function of period
"""
ax = gca()
x = tRES.PG
y = tRES.epoch
c = tRES.s2n
sid = argsort(c)
x = x[sid]
y = y[sid]
c = c[sid]
ax.scatter(x,y,c=c,cmap=cm.gray_r,edgecolors='none',vmin=7)
def periodogram(tRES):
ax = gca()
x = tRES.PG
y = tRES.s2n
ax.plot(x,y)
def dMLDT(t,f,p,axL):
"""
Plot folded mean depth and local detrending
"""
assert axL.size==2
sca(axL[0])
FOM(dM,pknown['P'])
axvline(pknown['epoch']/lc)
sca(axL[1])
LDT(tLC.t,f,p)
def pp(tLCbase,tLC):
"""
"""
fig,axL = subplots(nrows=5,sharex=True)
fig.subplots_adjust(hspace=0.0001,bottom=0.03,top=0.97,left=0.06,right=0.97)
ll = [axL[0].plot(t.TIME,t.f,',r',mew=0) for t in tLCbase]
ll[0][0].set_label('Original Time Series')
axL[0].plot(tLC.TIME,tLC.f,',k',mew=0,label='Pre-processing')
axL[1].plot(tLC.TIME,tLC.fdtm,',k',mew=0,label='Filtered Data')
axL[1].plot(tLC.TIME,tLC.fcbv,'r',mew=0,label='CBV detrend')
dM,x,x,x,x = tfind.MF(tLC.f,20)
dMcbv,x,x,x,x = tfind.MF(tLC.f-tLC.fcbv,20)
axL[2].plot(tLC.TIME,dM,'k')
axL[2].plot(tLC.TIME,dMcbv,'r')
sca(axL[3])
waterfall(tLC.TIME,tLC.f,cmap=cm.hot )
ylabel('Specgram LC')
sca(axL[4])
waterfall(tLC.TIME,tLC.fcbv,cmap=cm.hot)
ylabel('Specgram DT')
def waterfall(t,f,**kwargs):
fm = ma.masked_invalid(f)
fmdt = fm.copy()
fmdt.fill_value=0
sL = ma.notmasked_contiguous(fm)
dt3 = lambda x,y: y - polyval(polyfit(x,y,3),x)
for s in sL:
fmdt[s] = dt3(t[s],fm[s])
fdt0 = fmdt.filled()
n = 10
NFFT = 2**n
Fs = 48
Pxx, freqs, bins, im = \
specgram(fdt0, NFFT=2**n, Fs=Fs,xextent=(t[0],t[-1]) ,
interpolation='nearest',scale_by_freq=False,pad_to=2**14,
noverlap=2**n-2**6)
cla()
fMaId = argsort(abs(freqs-1))[0]
fMa = freqs[fMaId]
Pxx = Pxx[:fMaId,::]
per = percentile(Pxx,50)
bins += t[0] - NFFT/ 2 /Fs
imshow(Pxx,aspect='auto',extent=[bins[0],bins[-1],0,freqs[fMaId]],
origin='left',vmin=per,**kwargs)
def phfold(t,fm,p,**kwargs):
"""
"""
p1L,idL = tval.LDT(t,fm,p)
for p1,id in zip(p1L,idL):
trend = keptoy.trend(p1[3:],t[id])
ffit = keptoy.P051T(p1,t[id])
tmod = mod(t[id]-t[0],p['P'])+t[0]
scatter(tmod,fm[id]-trend,**kwargs)
plot(tmod,ffit-trend,'--',lw=2)
| [
"tim.morton@gmail.com"
] | tim.morton@gmail.com |
b2f4cea09f35c57c0a0b1a27e31cde3747cfc379 | 0cc2272755f051e1758facaaa7e7ade948a2d20f | /orcid_api/models/source.py | 4dc7c4dfc619b56c8d69aa6d977380d2c38fcdff | [
"MIT"
] | permissive | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | f10b34bfb9823fffdb5bbde434b5aa71ea365089 | 98c3320c3663efa5e46aa0d262ece9d9b5029499 | refs/heads/master | 2023-07-08T03:36:36.867612 | 2023-03-18T08:00:07 | 2023-03-18T08:00:07 | 80,788,800 | 16 | 7 | MIT | 2023-03-23T03:32:41 | 2017-02-03T02:07:02 | Python | UTF-8 | Python | false | false | 4,354 | py | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Source(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, source_orcid=None, source_client_id=None, source_name=None):
"""
Source - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'source_orcid': 'SourceOrcid',
'source_client_id': 'SourceClientId',
'source_name': 'SourceName'
}
self.attribute_map = {
'source_orcid': 'source-orcid',
'source_client_id': 'source-client-id',
'source_name': 'source-name'
}
self._source_orcid = source_orcid
self._source_client_id = source_client_id
self._source_name = source_name
@property
def source_orcid(self):
"""
Gets the source_orcid of this Source.
:return: The source_orcid of this Source.
:rtype: SourceOrcid
"""
return self._source_orcid
@source_orcid.setter
def source_orcid(self, source_orcid):
"""
Sets the source_orcid of this Source.
:param source_orcid: The source_orcid of this Source.
:type: SourceOrcid
"""
self._source_orcid = source_orcid
@property
def source_client_id(self):
"""
Gets the source_client_id of this Source.
:return: The source_client_id of this Source.
:rtype: SourceClientId
"""
return self._source_client_id
@source_client_id.setter
def source_client_id(self, source_client_id):
"""
Sets the source_client_id of this Source.
:param source_client_id: The source_client_id of this Source.
:type: SourceClientId
"""
self._source_client_id = source_client_id
@property
def source_name(self):
"""
Gets the source_name of this Source.
:return: The source_name of this Source.
:rtype: SourceName
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this Source.
:param source_name: The source_name of this Source.
:type: SourceName
"""
self._source_name = source_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Source):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"nad2000@gmail.com"
] | nad2000@gmail.com |
f1fcb2dc427b1be364386487f375a7ca3c2eea49 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Conqueror of Empires/project/game/calculations.py | 18cac829dd2315ea7c9cda18bc6a98acccd1743d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bbc46d12b635a6a06be7a7479e276101e201315e628ef6a050fe99e5561a6532
size 466
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
bcf8e579e306bd3b6fd3400f4b0ae78666be7bff | 9b6895363cb512105b8d8f5c15616e09bc7bb0e6 | /models.py | ca53242e9ac94a644b1f28744ecd99dc8ed1d671 | [] | no_license | sudheersam/Edyoda-lets-meet-project | 15329703391091d5ef1dea528ee2daa78d165a1a | d089e80b081e7d873082ea5f87d0cf0679597a1f | refs/heads/main | 2023-03-17T14:04:24.026960 | 2021-03-15T13:55:09 | 2021-03-15T13:55:09 | 347,942,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | from django.db import models
from django.contrib.auth import models as django_models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
is_manager = models.BooleanField(default= False)
def __str__(self):
return str(self.user) +"-->" + str(self.is_manager)
class Event(models.Model):
Name = models.ForeignKey(User,on_delete=models.CASCADE,null = True,blank = True)
Eventname = models.CharField(max_length = 250)
thumbnail = models.ImageField(blank = True, null = True)
category = models.CharField(max_length=250)
body = models.TextField()
venue = models.CharField(max_length=250)
DateAndTime = models.DateTimeField()
TotalAvailableSeats = models.IntegerField()
def __str__(self):
return self.Eventname
| [
"noreply@github.com"
] | sudheersam.noreply@github.com |
379ccd4a0da9061c9567adae0e5cc506a2c925fb | 8b3776414128eb17607b3c0f1b6745371af5ec12 | /discovery/tweets/migrations/0004_tweet_user.py | 9a7dd5dd78bbd7a8ccbb1b327fbb2bb9b1563358 | [] | no_license | ShiroshT/tweetX | 7fda7ba46e11342a66a0bb0a4bb23f6a63b580b4 | b6836b8555f8c39d168c629c4e390fec23de4b28 | refs/heads/master | 2021-07-20T02:16:13.669533 | 2017-10-29T08:58:28 | 2017-10-29T08:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-22 11:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweets', '0003_auto_20171022_1035'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"devwork.rocketbox@gmail.com"
] | devwork.rocketbox@gmail.com |
8f99be10dd9404253f9f15b06055a89fc3f771ee | 9668e961d5aabd2cfa103a9b8f25d20e1700d400 | /constants/game_loop_states.py | 10b2ca4b1fe1afe8d14d65689dcb5b0ef3575fe4 | [] | no_license | Sam-Macpherson/1920 | d79df3f8cdcda68fb0d0b6bb888ba80feb1d4824 | d6f5da4d2b3a3f0bb1e78f0f14af7e113189f084 | refs/heads/master | 2022-07-28T13:21:34.654400 | 2020-05-25T12:25:06 | 2020-05-25T12:25:06 | 256,360,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # The different phases in the game loop
import constants.scenes as scenes
LISTENING_TO_RADIO = scenes.ALARM_CLOCK_SCENE
GOING_TO_WORK = scenes.GOING_TO_WORK_SCENE
AT_WORK = scenes.RESTAURANT_SCENE
MANAGE_BOOKS = scenes.MANAGE_BOOKS_SCENE
LEAVING_WORK = scenes.LEAVING_WORK_SCENE
# The initial game loop is ordered here, this wil be built upon by the game
# state manager as the game progresses, and the order of this list will always
# be the order of the states in the game loop.
INITIAL_GAME_LOOP_STATES = [
LISTENING_TO_RADIO,
GOING_TO_WORK,
AT_WORK,
MANAGE_BOOKS,
LEAVING_WORK
]
| [
"sam.macpherson15@gmail.com"
] | sam.macpherson15@gmail.com |
31820bbd46117310b0b76e8a4adb2a19182660a5 | 81b7126cb2622edfcc2f8c11e5124b95b07683ee | /bin/lean | badf37bd83f8bc3112bea1c3982f0b61c9372a74 | [
"MIT"
] | permissive | Julian/dotfiles | f902c96c67ad83f56aeb4e8e25242d88365fb0cf | e8567440eef36dcc98687cbd06bb27573c845080 | refs/heads/main | 2023-08-28T14:09:24.244528 | 2023-08-10T12:50:39 | 2023-08-10T12:50:39 | 2,197,724 | 37 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | #!/usr/bin/env python3
from pathlib import Path
import os
import re
import subprocess
import sys
LEAN3_MARKER = re.compile(r'"leanprover(?:|-community)/lean:(.*)"')
LEANPKG = Path("leanpkg.toml")
LEAN3_DIR = Path(
os.environ.get("LEAN3_DIR", os.path.expanduser("~/.local/share/lean/")),
)
LEAN4_DIR = Path(
os.environ.get("LEAN4_DIR", Path("/opt/homebrew/Cellar/lean@4/")),
)
LEAN_DEFAULT_VERSION = os.environ.get("LEAN_DEFAULT_VERSION")
MAYBE_DEBUG = "-debug" if "LEAN_DEBUG" in os.environ else ""
parent = subprocess.run(
["ps", "-o", "command", "-p", str(os.getppid())],
capture_output=True,
).stdout.strip()
needs_lean3 = any(
each in parent
for each in {b"lean-language-server", b"leanproject", b"lean3ls"}
)
def lean3(version):
return LEAN3_DIR / f"lean-{version}-darwin{MAYBE_DEBUG}"
def lean4(version):
return next(LEAN4_DIR.glob("HEAD-*"))
if LEANPKG.exists():
match = LEAN3_MARKER.search(LEANPKG.read_text())
version = match.group(1) if match is not None else LEAN_DEFAULT_VERSION
else:
version = LEAN_DEFAULT_VERSION
if not version:
if needs_lean3:
available = {
tuple(map(int, path.name.split("-")[1].split("."))): path
for path in LEAN3_DIR.glob(f"lean-*-darwin{MAYBE_DEBUG}")
}
version = ".".join(map(str, max(available)))
else:
version = "4.0.0"
fn = lean3 if version.startswith("3") else lean4
executable = Path(sys.argv[0]).name
lean = fn(version) / "bin" / executable
try:
os.execvp(lean, [lean] + sys.argv[1:])
except FileNotFoundError:
sys.exit(f"lean \033[95mv{version}\033[0m is not installed.")
| [
"Julian@GrayVines.com"
] | Julian@GrayVines.com | |
7c255d5fc398aa48637961bbfa8a5626ea5fc7bd | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L79/79-bp_wat_20Abox/set_4.py | 243ef874827bee179687512fa8cdb959f8f21467 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L79/wat_20Abox/ti_one-step/79_bp/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
34c8ce587fd06c0637ab8c39b7b5c855aa9614e9 | 526c5e67fa653b0046586a069a2ed9b59d189a71 | /kivy/kivymd/MDIcons.py | 9770aa724d81662ad21cf78381d46c772c2dcfb0 | [] | no_license | vps4618/buildwithpython | 6a55064f2c9e333a6fd4742fd985aa6bce0df685 | a16abb9ed3425a3fc5925fa45e83f13b12955d76 | refs/heads/main | 2023-08-07T13:26:47.762322 | 2021-09-27T17:48:13 | 2021-09-27T17:48:13 | 409,471,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from kivymd.app import MDApp
from kivymd.uix.label import MDIcon
from kivymd.uix.boxlayout import MDBoxLayout
class IconApp(MDApp):
def build(self):
layout = MDBoxLayout()
icon1 = MDIcon(icon='account-cash', halign='center')
icon2 = MDIcon(icon='account-child', halign='center')
icon3 = MDIcon(icon="airballoon", halign='center')
icon4 = MDIcon(icon="airplane", halign='center')
layout.add_widget(icon1)
layout.add_widget(icon2)
layout.add_widget(icon3)
layout.add_widget(icon4)
return layout
IconApp().run()
| [
"noreply@github.com"
] | vps4618.noreply@github.com |
15d9796422807d6d1ef08b7b70e4682a52ec0b77 | 8fb5ca43ac38f6241a1b8693419148509db8613d | /broker/broker/wsgi.py | 75226026f1faad0e9e1c631e4ea2dd1cd3ceb3f5 | [] | no_license | lorenzotan/brokerV2 | 0e07760ad2b57f2b204e0d43d088c0f79efb172b | 23120f4c2a29e56452191680c1c47424f9f66d5e | refs/heads/master | 2020-04-02T06:58:43.375618 | 2019-02-25T01:29:46 | 2019-02-25T01:29:46 | 154,176,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for broker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "broker.settings")
application = get_wsgi_application()
| [
"lorenzo.tan@gmail.com"
] | lorenzo.tan@gmail.com |
83df72e7016f36def68c8278a636e0ca9aa3a0f4 | 874d2d734dfe9e13bede7ec60c146e9ac9a90be9 | /energypy/envs/flex/flex.py | ff6ee957506f983d13088f79bee70ca88f0bce92 | [
"MIT"
] | permissive | daleyuda/energy-py | 9e19c5c750a6f2a0d3a05f880294f4126fc15899 | 52553870a4c454fb36151a55b59f7715fcacff73 | refs/heads/master | 2020-03-28T15:17:30.416988 | 2018-09-13T00:04:38 | 2018-09-13T00:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,973 | py | """ v3 of a price responsive flexible electricity asset
TODO
Add units onto all of the info dict lists
"""
from collections import deque
import logging
import numpy as np
from energypy.envs import BaseEnv
from energypy.common import ContinuousSpace, DiscreteSpace, GlobalSpace
logger = logging.getLogger(__name__)
class Flex(BaseEnv):
"""
Price responsive flexible demand model
Asset can operate in four dimensions
1. store demand (reducing site electricity consumption)
2. release demand (increasing site electricity consumption)
3. storing supply (increases site electricity consumption)
4. releasing supply (decreases site electricity consumption)
Storing and releasing demand is the classic use of demand side response
where the control is based on reducing the asset electricity consumption
Storing and releasing supply is the the inverse - controlling an asset by
increasing electricity consumption - avoiding consuming electricity later
## structure of the env
Demand is stored and released from a deque. A deque structure is used so that
even if the agent keeps the setpoint raised, the demand will be released
The time difference between store and release is the length of the deque
Supply is stored using a float. Because supply is a cost, the agent not using
it by releasing is behaviour I want the agent to learn to avoid
The structure of the agent is inspired by anecdotal observation of
commerical chiller plants reacting to three different setpoints
- increased (demand stored)
- no_op
- decreased (supply stored - i.e. precooling)
"""
def __init__(
self,
capacity=4.0, # MWh
supply_capacity=0.5, # MWh
release_time=12, # num 5 mins
supply_power=0.05, # MW
**kwargs
):
self.capacity = float(capacity)
self.supply_capacity = float(supply_capacity)
self.release_time = int(release_time)
super().__init__(**kwargs)
"""
action space has a single discrete dimension
0 = no op
1 = increase setpoint
2 = decrease setpoint
"""
self.action_space = GlobalSpace('action').from_spaces(
DiscreteSpace(3), 'setpoint'
)
self.action_space.no_op = np.array([0]).reshape(1, 1)
self.state_space.extend(
[ContinuousSpace(0, self.episode_length),
ContinuousSpace(0, self.capacity),
ContinuousSpace(0, self.supply_capacity)],
['Step', 'C_stored_demand [MWh]', 'C_stored_supply[MWh]'],
)
# let our agent see the stored demand
# let our agent see the stored supply
# see precool power?
# obs space is created during env init
self.observation_space.extend(
[ContinuousSpace(0, self.capacity),
ContinuousSpace(0, self.supply_capacity)],
['C_stored_demand [MWh]', 'C_stored_supply[MWh]'],
)
# supply = precooling
# i.e how much power we consume during precooling
self.supply_power = float(max(
float(supply_power),
self.state_space.data.loc[:, 'C_demand [MW]'].max()
))
def __repr__(self):
return '<energypy flex environment>'
def _reset(self):
"""
Resets the environment
returns
observation (np.array) the initial observation
"""
self.steps = 0
self._charge = 0 #MWh
# use a deque for stored demand (all MWh per 5min)
self.storage_history = deque(maxlen=self.release_time)
[self.storage_history.appendleft(0) for _ in range(self.release_time)]
# float for stored supply
self.stored_supply = 0 # MWh
self.state = self.state_space(
self.steps, np.array(
[self.steps, self.stored_demand, self.stored_supply]
)
)
self.observation = self.observation_space(
self.steps, np.array(
[self.stored_demand, self.stored_supply]
)
)
return self.observation
@property
def stored_demand(self):
return self._stored_demand
@stored_demand.getter
def stored_demand(self):
return sum(self.storage_history)
def release_supply(self, demand):
""" net off our demand with some stored supply """
""" args MW return MW """
released = min(demand / 12, self.stored_supply)
self.stored_supply -= released
return released * 12
def store_demand(self, demand):
""" always store - check for the capacity done elsewhere """
""" args MW return MW """
self.storage_history.appendleft(demand / 12)
logger.debug('storing {}'.format(self.storage_history))
return demand
def dump_demand(self):
""" returns MW """
dumped = sum(self.storage_history)
[self.storage_history.appendleft(0)
for _ in range(self.storage_history.maxlen)]
assert self.stored_demand == 0
return dumped * 12
def store_supply(self, demand):
""" args MW return MW """
old_precool = self.stored_supply
power_capacity_for_supply = self.supply_power - demand
stored_supply = np.min(
[self.supply_capacity - old_precool,
power_capacity_for_supply / 12]
)
self.stored_supply += stored_supply
precooling = stored_supply * 12
return demand + precooling
def _step(self, action):
"""
One step through the environment
args
action (np.array) shape=(1, 1)
returns
observation (np.array) shape=(1, self.observation_space.shape*)
reward (float)
done (bool)
info (dict)
"""
action = action[0][0] # could do this in BaseAgent
site_demand = self.get_state_variable('C_demand [MW]') / 12
site_consumption = site_demand
# these can be simplified - unless info wanted for debug
# ie move from var = self.fun(). site_cons += var
# to site_cons += self.fun() etc
released_demand = self.storage_history.pop()
# no-op
if action == 0:
released_supply = self.release_supply(site_consumption)
self.storage_history.appendleft(0)
# print('released supply {}'.format(released_supply))
site_consumption -= released_supply
# raising setpoint (reducing demand)
if action == 1:
stored_demand = self.store_demand(site_consumption)
site_consumption -= stored_demand
# print('{} stored demand {} site_consumption'.format(
# stored_demand, site_consumption))
site_consumption += released_demand * 12
# reducing setpoint (increasing demand)
if action == 2:
stored_demand_dump = self.dump_demand()
site_consumption += stored_demand_dump
site_consumption = self.store_supply(site_consumption)
# dump out the entire stored demand if we reach capacity
# this is the chiller ramping up to full when return temp gets
# too high
if self.stored_demand >= self.capacity:
site_consumption += self.dump_demand()
# do the same if the episode is over - dump everything out
if self.steps == self.state_space.episode.shape[0] - 1:
site_consumption += self.dump_demand()
logging.debug('released demand {}'.format(released_demand))
# if action == 1:
# print('test before save')
# print('{} stored demand {} site_consumption'.format(
# stored_demand, site_consumption))
setpoint = 0
if action == 1:
setpoint = 1
elif action == 2:
setpoint = -1
electricity_price = self.get_state_variable(
'C_electricity_price [$/MWh]')
baseline_cost = site_demand * electricity_price / 12
optimized_cost = site_consumption * electricity_price / 12
# negative means we are increasing cost
# positive means we are reducing cost
reward = baseline_cost - optimized_cost
done = False
if self.steps == self.state_space.episode.shape[0] - 1:
done = True
next_state = np.zeros((1, *self.state_space.shape))
next_observation = np.zeros((1, *self.observation_space.shape))
else:
next_state = self.state_space(
self.steps + 1,
np.array([self.steps + 1, self.stored_demand,
self.stored_supply])
)
next_observation = self.observation_space(
self.steps + 1,
np.array([self.stored_demand, self.stored_supply])
)
info = {
'step': self.steps,
'state': self.state,
'observation': self.observation,
'action': action,
'reward': reward,
'next_state': next_state,
'next_observation': next_observation,
'done': done,
'electricity_price': electricity_price,
'stored_demand': self.stored_demand,
'stored_supply': self.stored_supply,
'site_demand': site_demand,
'site_consumption': site_consumption,
'net_discharged': site_consumption - site_demand,
'setpoint': setpoint,
}
self.info = self.update_info(**info)
[logger.debug('{} {}'.format(k, v)) for k, v in info.items()]
self.steps += 1
self.state = next_state
self.observation = next_observation
return self.observation, reward, done, self.info
| [
"adam.green@adgefficiency.com"
] | adam.green@adgefficiency.com |
6af9c2ef36096d3ebeeefd45291472877aabdc5d | 47ae678aa432deb0eb4f99b6a9787853315ab899 | /qikan/spiders/SAGE150.py | 3ec3cf0fa807faeb520fadb2327f9da5e2fe66c8 | [] | no_license | RoggerLuo/python-scrapy-journal | 1f3fb2ac41d90d25a0b635932600ff2327bf22d1 | 38d8e714f346e5951bcb55487fc0056a834f30d8 | refs/heads/master | 2020-04-05T20:32:42.201876 | 2018-11-13T06:35:59 | 2018-11-13T06:35:59 | 157,185,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,251 | py | # -*- coding: utf-8 -*-
# 于丽美
# http://journals.sagepub.com/toc/bnaa/2
import scrapy
from qikan.items import QikanItem
import re
import time
from .config import Config,postItemWithPdf,postItem,proxyRequest
class Sage150Spider(scrapy.Spider):
name = 'SAGE150'
# url = input('请输入网址:')
start_urls = ['http://journals.sagepub.com/toc/jmh/current']
base_url = 'http://journals.sagepub.com'
def parse(self, response):
# 文章url
hrefs = response.xpath("//div[@class='art_title linkable']/a[@class='ref nowrap']/@href").extract()
volume = response.xpath(
"//div[@class='pager issueBookNavPager']/span[@class='journalNavCenterTd']/div[@class='journalNavTitle']/text()").extract()[
0]
for i in range(len(hrefs)):
yield proxyRequest(url=self.base_url + hrefs[i], meta={'annualVolume': volume}, callback=self.parse2)
def parse2(self, response):
item = QikanItem()
# 文章题目
item['title'] = ''
titles = response.xpath("//div[@class='hlFld-Title']//div[@class='publicationContentTitle']//h1").extract()
pat = re.compile('<[^>]+>', re.S)
for title in titles:
item['title'] = item['title'] + pat.sub('', title).strip()
# item['title'] = response.xpath("//div[@class='hlFld-Title']//div[@class='publicationContentTitle']//h1/text()").extract()[0].strip()
# # titles = response.xpath("//h2[@class='citation__title']/text()").extract()
# pat = re.compile('<[^>]+>', re.S)
# 作者
item['author'] = ''
# 通讯作者
# 通讯作者单位
aus = []
if response.xpath("//div[@class='header']/a[@class='entryAuthor']").extract():
authors = response.xpath("//div[@class='header']/a[@class='entryAuthor']").extract()
for author in authors:
item['author'] = item['author'] + pat.sub('', author).strip() + ","
else:
item['author'] = 'NULL'
if response.xpath(
"//div[@class='hlFld-ContribAuthor']/span[@class='NLM_contrib-group']/div[@class='artice-info-affiliation']/text()").extract():
item['authorAffiliation'] = response.xpath(
"//div[@class='hlFld-ContribAuthor']/span[@class='NLM_contrib-group']/div[@class='artice-info-affiliation']/text()").extract()[
0]
elif response.xpath(
"//div[@class='hlFld-ContribAuthor']/div[@class='artice-info-affiliation'][1]/text()").extract():
item['authorAffiliation'] = response.xpath(
"//div[@class='hlFld-ContribAuthor']/div[@class='artice-info-affiliation'][1]/text()").extract()[0]
elif response.xpath("//div[@class='artice-notes']//corresp//text()").extract():
item['authorAffiliation'] = response.xpath("//div[@class='artice-notes']//corresp//text()").extract()[
0].replace('Email:', '')
else:
item['authorAffiliation'] = 'NULL'
item['authorAffiliation'] = item['authorAffiliation'].replace('\n', '').replace('\r', '').replace('\t',
'').replace(
' ', ' ')
# print(item['authorAffiliation'])
item['correspongdingauthorEmail'] = ''
if response.xpath("//a[@class='email']/span[@class='nobrWithWbr']").extract():
correspongdingauthorEmails = response.xpath("//a[@class='email']/span[@class='nobrWithWbr']").extract()
for correspongdingauthorEmail in correspongdingauthorEmails:
item['correspongdingauthorEmail'] = item['correspongdingauthorEmail'] + pat.sub('',
correspongdingauthorEmail).strip() + '||'
else:
item['correspongdingauthorEmail'] = 'NULL'
# item['correspongdingauthorEmail'] = response.xpath("//a[@class='email']/span[@class='nobrWithWbr']").extract()
if response.xpath(
"//div[@class='hlFld-ContribAuthor']/span[@class='contribDegrees'][1]/div[@class='authorLayer']/div[@class='header']/a[@class='entryAuthor']/text()").extract():
item['correspongdingauthor'] = response.xpath(
"//div[@class='hlFld-ContribAuthor']/span[@class='contribDegrees'][1]/div[@class='authorLayer']/div[@class='header']/a[@class='entryAuthor']/text()").extract()[
0] + '||'
else:
item['correspongdingauthor'] = 'NULL'
# # DOI号
if item['correspongdingauthor'] == 'NULL':
item['correspongdingauthor'] = 'NULL'
elif item['correspongdingauthor'] != '':
correspongdingau = item['correspongdingauthor'].split("||")
correspongdingEm = item['correspongdingauthorEmail'].split("||")
item['correspongdingauthor'] = ''
for i in range(len(correspongdingau)):
if correspongdingau[i] != '':
item['correspongdingauthor'] += '(' + correspongdingau[i] + ',' + correspongdingEm[i] + '),'
else:
item['correspongdingauthor'] = 'NULL'
# print(item['correspongdingauthor'])
item['DOI'] = response.xpath(
"//div[@class='widget-body body body-none body-compact-all']/div[@class='doiWidgetContainer']/a[@class='doiWidgetLink']/text()").extract()[
0]
# # print(item['DOI'])
# # 没有关键词
item['keyword'] = ''
if response.xpath("//div[@class='hlFld-KeywordText']/kwd-group/a[@class='attributes']/text()").extract():
keywords = response.xpath(
"//div[@class='hlFld-KeywordText']/kwd-group/a[@class='attributes']/text()").extract()
for keyword in keywords:
item['keyword'] = item['keyword'] + keyword + ','
else:
item['keyword'] = 'NULL'
# # 摘要
item['abstract'] = ''
pat = re.compile('<[^>]+>', re.S)
if response.xpath("//div[@class='hlFld-Abstract']//div[@class='abstractSection abstractInFull']//p"):
coninfos = response.xpath(
"//div[@class='hlFld-Abstract']//div[@class='abstractSection abstractInFull']//p").extract()
for coninfo in coninfos:
item['abstract'] = item['abstract'] + pat.sub('', coninfo).strip() + '<br>'
else:
item['abstract'] = 'NULL'
item['abstract'] = item['abstract'].replace('\n', '')
# print(item['abstract'])
header = {
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3013.3 Safari/537.36'
}
if response.xpath(
"//div[@class='rightMobileMenuButton articleToolsButton PDFTool pdf-access redButton smallButton']/a/@href").extract():
pdf = response.xpath(
"//div[@class='rightMobileMenuButton articleToolsButton PDFTool pdf-access redButton smallButton']/a/@href").extract()[
0]
item['pdf'] = self.base_url + pdf
yield proxyRequest(url=self.base_url + pdf, meta={'filename': pdf.split('/')[-1] + '.pdf'}, headers=header,
callback=postItemWithPdf(item)
)
else:
item['pdf'] = 'NULL'
postItem(item)
# print(item['pdf'])
# 卷,期,年
item['annualVolume'] = response.meta['annualVolume'].strip()
# item['annualVolume'] = response.xpath("//div[@class='Article information']/div[1]/text()").extract()[0].strip()
# item['annualVolume'] = pat.sub('', annualVolume).strip()
# print(item['annualVolume'])
# 页码
item['pageNumber'] = 'NULL'
# print(pageNumber)
# ru2 = re.compile(r'pp (.*)')
# # 页码
# item['pageNumber'] = ru2.search(pageNumber).group(1)
# print(item['pageNumber'])
# 期刊名
item['journalTitle'] = pat.sub('', response.xpath(
"//div[@id='e3c018c7-8573-4acd-93ae-0ff4b1f3baf3']/div[@class='wrapped ']").extract()[0]).strip()
# print(item['journalTitle'])
# 有些期刊目录有一张图片
item['imageUrlList'] = 'NULL'
# 12 July 2018
item['publishTime'] = response.xpath("//span[@class='publicationContentEpubDate dates']/text()").extract()[
1].strip()
# 改成2018-07-12
temp = time.strptime(item['publishTime'], "%B %d, %Y")
item['publishTime'] = time.strftime("%Y-%m-%d", temp)
# print(item['publishTime'])
yield item
# # 下载pdf
def downloadpdf(self, response):
file_path = Config().pdf_url + response.meta['filename']
with open(file_path, 'wb') as f:
f.write(response.body)
# #下载图片
def downloadimg(self, response):
file_path = Config().img_url + response.meta['filename']
with open(file_path, 'wb') as f:
f.write(response.body)
| [
"luojie.5408@163.com"
] | luojie.5408@163.com |
d0b5c147e5772ec9cb099d566c35eb532ab75c4e | 4518699229752baea849a53264033010997bb3ed | /graph_peak_caller/__init__.py | 368911282687569e68ea0ea6b5ecabb3444f1fb8 | [
"BSD-3-Clause"
] | permissive | cgroza/graph_peak_caller | 09926bce71ef0d3c35a7b315499814be9f06db35 | 52c48ed30bbfac859594c6f5b2cfb191b7191481 | refs/heads/master | 2022-02-11T21:04:00.547375 | 2022-01-28T14:19:52 | 2022-01-28T14:19:52 | 203,458,900 | 0 | 0 | null | 2019-08-20T21:43:53 | 2019-08-20T21:43:53 | null | UTF-8 | Python | false | false | 69 | py | from .callpeaks import CallPeaks, Configuration, CallPeaksFromQvalues | [
"ivar.grytten@gmail.com"
] | ivar.grytten@gmail.com |
3df1ccfcd66c67fa11c7694478c2e22377c2fadb | e607cf6c01b25a2c454f9421081a230caba1cfcc | /order_photo_locations.py | c28a2ca9d38ac84ee4b10f1fe07ceb1046e0c736 | [] | no_license | walterdevalk/order_photo_locations | 2cef7e540eaa111d061aa1f5e407be9457268632 | 6944d04133693bc71041dc4eff4927ae478d4b0d | refs/heads/master | 2021-01-01T19:42:40.736081 | 2015-02-18T22:47:33 | 2015-02-18T22:47:33 | 21,285,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | #! /usr/bin/env python
"""
This script will reorganize the pictures in the given folder recursive and resort them
based on the date, found in the ExifTag DateTimeOriginal.
Example:
vacation/spain/1234422.JPG with date 2012:03:03 11:11:00
will be moved to 2012/03/03/1234422.JPG
"""
__author__ = "Walter de Valk"
__version__ = 0.1
import os
import shutil
import sys
try:
import pyexiv2
except ImportError:
print "This script depends on pyexiv2 to get the date from the files."
print "Please install python-pyexiv2 and try again."
def Usage():
message = """
{0} version {1}
Organize your photo's by date.
Usage:
python {0} <dir>
<dir> is the directory where your photo's are stored'
{2}
""".format(sys.argv[0], __version__, __doc__)
return message
def select_files(folder):
"""Select files recursive starting from folder"""
for path, dirs, files in os.walk(folder):
for f in files:
yield os.path.join(path, f)
def get_date(f):
"""get the date tag with pyexiv2
returns a String or None
"""
try:
metadata = pyexiv2.ImageMetadata(f)
metadata.read()
except IOError:
# print f, 'is not readable'
return None
try:
dt = metadata['Exif.Photo.DateTimeOriginal'].value
return dt
except KeyError:
# print f, 'No valid datetime tag found'
return None
def parse_date(dt):
"""extract the date from the tag Exif.Photo.DateTimeOriginal"""
exifdate, exiftime = str(dt).split(' ')
# get the day, month and year
# if the day month and year are separated with an '-'
try:
ed = exifdate.split('-')
exifday = ed[2]
exifmonth = ed[1]
exifyear = ed[0]
except IndexError:
# doesn't work
# try again with seperator ':'
try:
ed = exifdate.split(':')
exifday = ed[2]
exifmonth = ed[1]
exifyear = ed[0]
except IndexError:
# try again with seperator '/'
ed = exifdate.split('/')
exifday = ed[2]
exifmonth = ed[1]
exifyear = ed[0]
return (exifyear, exifmonth, exifday)
def compare_date_with_folder(f, dt):
# try to extract date from folder
d, f = os.path.split(f)
d = d.split(os.sep)
try:
day = d[-1]
month = d[-2]
year = d[-3]
except IndexError:
return False
# extract date from exiftag
try:
exifyear, exifmonth, exifday = parse_date(dt)
except IndexError:
# can't extract date
# return True, saying the file is already on the right place
# so this file will be skipped
print "Skipping file %s can't extract date" % f
return True
# compare date with folders
if year == exifyear and month == exifmonth and day == exifday:
return True # folder is same as date
else:
return False # folder is not same as date
def copy_to_destination(filename, destination_folder):
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
try:
shutil.move(filename, destination_folder)
# print to screen what's happening
print f, '-->', destination_folder
except:
print "ERROR can't move %s to %s" % (filename, destination_folder)
if '__main__' in __name__:
try:
startfolder = sys.argv[1]
except IndexError:
print Usage()
sys.exit(1)
folder_for_undetermined_date = os.path.join(startfolder, 'zonder_datum') # destination to put dateless photo's
process_list = []
total_amount = 0
notOK = 0
moved = 0
for f in select_files(startfolder):
# perform action per file recursive
# get the date from the file with pyexiv2
dt = get_date(f)
destdir = None
if dt: # there is a date tag in file
if not compare_date_with_folder(f, dt): # if file is not in folder with date, move file to root/year/month/day
# print f, dt
fi = os.path.split(f)[1] # filename
# create destination directory root/year/month/day
dr = os.path.join(*parse_date(dt))
destdir = os.path.join(startfolder, dr)
else: # no date tag present
# print f, dt
pt, fi = os.path.split(f)
if pt == folder_for_undetermined_date:
# if the photo is already in the
# folder without_date nothing to do
continue
else:
destdir = folder_for_undetermined_date
if destdir:
process_list.append( (f, destdir))
# to process
print( "There are found "+ str(len( process_list))+' photos to move' )
print "file", "destination"
print "----", "-----------"
for i in process_list:
print i[0], i[1]
if raw_input("Would you like to continue? ").upper() in ["YES", "Y", ""]:
for f,d in process_list:
try:
copy_to_destination(f, d)
except:
print f, d
sys.exit( "error moving")
else:
sys.exit( "You cancelled the operation") | [
"walterdevalk@gmail.com"
] | walterdevalk@gmail.com |
7c7d3d1d6c523924117004403ed9dceb23fe920b | e298eb03912283655a034472aefb349d975a6def | /src/dcos_migrate/system/backup.py | aef0175ef9019cd89bf44484deb52daa1dd61f14 | [
"Apache-2.0"
] | permissive | fatz/dcos_migrate | b0303bd770b574aa4567ed97ec0b4eb775b0ce7b | 0310c93b76dd0cd7149dbac335de870a30956a68 | refs/heads/master | 2023-02-24T10:01:12.851596 | 2021-01-12T14:30:46 | 2021-01-12T14:30:46 | 322,659,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | import json
class Backup(object):
"""docstring for Backup."""
def __init__(self, pluginName: str, backupName: str, data={}, extension='json'):
super(Backup, self).__init__()
self._plugin_name = pluginName
if "/" in backupName:
raise AttributeError(
"backupName {} contains not allowed '/'".format(backupName))
self._name = backupName
self._data = data
self._extension = extension
self._serializer = json.dumps
self._deserializer = json.loads
@staticmethod
def renderBackupName(name: str) -> str:
# replace path with dashes
return "-".join(list(filter(None, name.split("/"))))
@property
def plugin_name(self) -> str:
return self._plugin_name
@property
def name(self) -> str:
return self._name
@property
def extension(self) -> str:
return self._extension
@property
def data(self) -> object:
return self._data
def serialize(self) -> str:
return self._serializer(self._data)
def deserialize(self, data: str) -> object:
self._data = self._deserializer(data)
| [
"julferts@d2iq.com"
] | julferts@d2iq.com |
183f355e89e5b4ac180269cf4f73b56a2122333e | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/hr_fleet/__manifest__.py | 7d0575750ec9ca1481ce367a05e4944a8f8f018a | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 500 | py | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Fleet History',
'version': '1.0',
'category': 'Human Resources',
'summary': 'Get history of driven cars by employees',
'description': "",
'depends': ['hr', 'fleet'],
'data': [
'views/employee_views.xml',
'views/fleet_vehicle_views.xml',
'wizard/hr_departure_wizard_views.xml'
],
'auto_install': True,
'license': 'LGPL-3',
}
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
bf0f592e4454651e39c035714b612d4bef7edce6 | 25aa34dc6464833005838a31333cd1ea3d02f1de | /TAMUCTF/2020/crypto/eternal_game/solver.py | ba48557a634eb8216031ef3156ce3d97ee0152a2 | [] | no_license | ChaO-0/WriteUps | 4af54d347f0c2452c0dce4c728e6c6b8f30360dc | 3ae188c1b8b2040c65377557d0495a704108d800 | refs/heads/master | 2022-12-23T17:09:44.254508 | 2020-10-14T00:39:04 | 2020-10-14T00:39:04 | 173,588,941 | 10 | 1 | null | 2022-12-13T16:52:10 | 2019-03-03T14:56:03 | Python | UTF-8 | Python | false | false | 624 | py | from pwn import *
from hashpumpy import hashpump
def solve():
p = remote("challenges.tamuctf.com", 8812)
# p = process(["python", "chall.py"])
p.sendline("1")
p.sendline("3")
p.recvuntil("quit\n")
known_hash = p.recvline()[:-1]
log.info("Known Hash: {}".format(known_hash))
for key_length in range(1, 0xb):
p.sendline("2")
new_hash, msg = hashpump(known_hash, '1', '1306172139783549808932216282612057073445238267608', key_length)
p.sendline(msg)
p.sendline(new_hash)
print key_length
p.interactive()
if __name__ == "__main__":
solve() | [
"christophergin88@gmail.com"
] | christophergin88@gmail.com |
163c9397b93227ba72dfe3d6f69453fbc88814af | 7956e2e7db011bcaf90fb3a55c6fb729f7a4ea5b | /mmdet/models/detectors/cascade_rcnn.py | f76bdc53293370348449f0c204d8a0b535a23791 | [
"Apache-2.0"
] | permissive | ducminhkhoi/FAPIS | 152381e5c88f60e62a48a182a5752bef5637aedf | c89703006a2a5250f4d1c71e0aad958d72526885 | refs/heads/main | 2023-08-24T03:22:01.855636 | 2021-10-16T02:29:52 | 2021-10-16T02:29:52 | 417,691,302 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,882 | py | from __future__ import division
import torch
import torch.nn as nn
from .base import BaseDetector
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,
merge_aug_masks)
@DETECTORS.register_module
class CascadeRCNN(BaseDetector, RPNTestMixin):
def __init__(self,
num_stages,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
super(CascadeRCNN, self).__init__()
self.num_stages = num_stages
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if bbox_head is not None:
self.bbox_roi_extractor = nn.ModuleList()
self.bbox_head = nn.ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
self.bbox_head.append(builder.build_head(head))
if mask_head is not None:
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(builder.build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = nn.ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(CascadeRCNN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
for i in range(self.num_stages):
if self.with_bbox:
self.bbox_roi_extractor[i].init_weights()
self.bbox_head[i].init_weights()
if self.with_mask:
if not self.share_roi_extractor:
self.mask_roi_extractor[i].init_weights()
self.mask_head[i].init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(rcnn_train_cfg.assigner)
bbox_sampler = build_sampler(
rcnn_train_cfg.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
for name, value in loss_bbox.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
mask_roi_extractor = self.mask_roi_extractor[i]
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(
x[:mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
# reuse positive bbox feats
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_head = self.mask_head[i]
mask_pred = mask_head(mask_feats)
mask_targets = mask_head.get_target(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
for name, value in loss_mask.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
roi_labels = bbox_targets[0] # bbox_targets is a tuple
with torch.no_grad():
proposal_list = bbox_head.refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_meta[0]['img_shape']
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if self.test_cfg.keep_all_stages:
det_bboxes, det_labels = bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
bbox_head.num_classes)
ms_bbox_result['stage{}'.format(i)] = bbox_result
if self.with_mask:
mask_roi_extractor = self.mask_roi_extractor[i]
mask_head = self.mask_head[i]
if det_bboxes.shape[0] == 0:
mask_classes = mask_head.num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats, i)
mask_pred = mask_head(mask_feats)
segm_result = mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['stage{}'.format(i)] = segm_result
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,
img_meta[0])
cls_score = sum(ms_scores) / self.num_stages
det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
mask_classes = self.mask_head[-1].num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_meta] * self.num_stages,
self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if not self.test_cfg.keep_all_stages:
if self.with_mask:
results = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
else:
if self.with_mask:
results = {
stage: (ms_bbox_result[stage], ms_segm_result[stage])
for stage in ms_bbox_result
}
else:
results = ms_bbox_result
return results
def aug_test(self, img, img_meta, proposals=None, rescale=False):
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,
**kwargs)
| [
"nguyenkh@oregonstate.edu"
] | nguyenkh@oregonstate.edu |
c1a835cb1801ffcaeac3c32f72535c16876255c1 | 161fd6370ffa0b35ecd50719d6266224da597ee0 | /Python/Flask_Fundamentals/Ninja/server.py | f182eceadfc0fa9291b6b6c33e43ff646036d848 | [] | no_license | ebergstein/DojoAssignments | a30fd8b36442bff2a4253902a591ad11f191fc12 | 3ad9ac65073c733ead32b93ce4be19af5369fccf | refs/heads/master | 2021-06-19T09:48:23.100713 | 2017-06-30T04:24:35 | 2017-06-30T04:24:35 | 82,743,546 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def index():
return render_template("index.html")
@app.route('/ninja/')
def none():
session['image'] = "all"
return redirect('/result')
@app.route('/result')
def show():
return render_template('show.html')
@app.route('/ninja/<vararg>')
def one(vararg):
if vararg == "blue":
session['image'] = "blue"
elif vararg == "orange":
session['image'] = "orange"
elif vararg == "red":
session['image'] = "red"
elif vararg == "purple":
session['image'] = "purple"
else:
session['image'] = "none"
print vararg
return redirect('/result')
app.run(debug=True) | [
"ebergstein@sbcglobal.net"
] | ebergstein@sbcglobal.net |
37a6cf00779213d897f049130db765b418d9d2df | 6be59c81f3f6a17c14b812be0de3346a82eb33dd | /cv/image_similarity/cnn/deep_ranking.py | 96d8025db491549a135a0a262ce980ed7011dad4 | [] | no_license | chunhuizhang/bilibili_vlogs | 6851fdcd43f08fcf7195e345b0bc85d99c0b9128 | 0efd921b24f2af43f5972ea6909deb2fc069d305 | refs/heads/master | 2023-08-17T15:47:04.299072 | 2023-08-14T13:46:31 | 2023-08-14T13:46:31 | 220,612,967 | 170 | 70 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py |
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications import Xception, ResNet50
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import *
from tensorflow.keras import backend as K
from keras.applications.vgg16 import VGG16
from keras.applications import Xception
vgg_with_top = VGG16(include_top=True)
# plot_model(vgg_with_top, to_file='vgg16_with_top.png', show_shapes=True)
vgg_without_top = VGG16(include_top=False)
# plot_model(vgg_without_top, to_file='vgg16_without_top.png', show_shapes=True)
inception = InceptionV3()
# plot_model(inception, to_file='inception_v3_withtop.png', show_shapes=True)
xception = Xception()
# plot_model(xception, to_file='xception_with_top.png', show_shapes=True)
resnet = ResNet50()
plot_model(resnet, to_file='resnet_with_top.png', show_shapes=True)
# first_input = Input(shape=(224, 224, 3))
# first_conv = Conv2D(96, kernel_size=(8, 8), strides=(16, 16), padding='same')(first_input)
# print(first_conv)
# first_max = MaxPool2D(pool_size=(3, 3), strides=(4, 4), padding='same')(first_conv)
# print(first_max)
# first_max = Flatten()(first_max)
# first_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_max)
#
# second_input = Input(shape=(224, 224, 3))
# second_conv = Conv2D(96, kernel_size=(8, 8), strides=(32, 32), padding='same')(second_input)
# print(second_conv)
# second_max = MaxPool2D(pool_size=(7, 7), strides=(2, 2), padding='same')(second_conv)
# print(second_max)
# second_max = Flatten()(second_max)
# second_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_max)
# merge_one = concatenate([first_max, second_max])
# print(first_max)
# print(second_max)
# print(merge_one) | [
"zch921005@126.com"
] | zch921005@126.com |
543f1382f2f9737cded78261595edeb2e5d82885 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/12135000.py | 899b37e0cb4bf6dced24a52398718e191cd12450 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12135000.py generated: Wed, 25 Jan 2017 15:25:32
#
# Event Type: 12135000
#
# ASCII decay Descriptor: [B+ -> (D- => K+ pi- pi-) pi+ pi+ ]cc
#
from Configurables import Generation
Generation().EventType = 12135000
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_D-pi+pi+,Kpipi=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12135000
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
a498f6e74476c80f37f9a23e7ce25b3099d4ae25 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-6223.py | e684d511e215eb2534e0c570c9e4aa081bc71b72 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,288 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
$ID.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
1287f543ed12683499d95fffd74dafee2c0cf86b | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /342/main.py | c7002a28e418d4ff9b0c3ff3d06e037f88380d8f | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | """
Given an integer (signed 32 bits), write a function to check whether it is a power of 4.
Example:
Given num = 16, return true. Given num = 5, return false.
Follow up: Could you solve it without loops/recursion?
"""
# TODO: NEEDS CODE FROM BEFORE | [
"shao.zhongren@gmail.com"
] | shao.zhongren@gmail.com |
7a8904d160c29ba67ffed19d22eeb72699e2fc0b | 32df6d2f71bf8fbc1d2e6db834d3a4980c8b1348 | /backend/settings.py | 6d7c01c2e412980117508a419b42f273fef820ee | [] | no_license | allilk/milkbox-js | e9fb458b32db127d6f048b6b07f596d562d8a0d3 | 998a958ea9d92f9391e2437b97677b9a582686cb | refs/heads/master | 2023-01-29T17:20:38.375678 | 2020-04-16T20:01:38 | 2020-04-16T20:01:38 | 256,255,410 | 0 | 0 | null | 2020-12-12T14:20:07 | 2020-04-16T15:29:06 | Python | UTF-8 | Python | false | false | 3,709 | py | """
Django settings for milkbox project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from frontend.config import DB_PSW, DEBUG, SECRET_KEY
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'rest_framework',
'rest_framework_api_key',
'django_filters',
'corsheaders',
'knox',
'frontend.apps.FrontendConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'milkbox',
'USER': 'postgres',
'PASSWORD': DB_PSW,
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend',],
'DEFAULT_AUTHENTICATION_CLASSES':('knox.auth.TokenAuthentication',)
# "DEFAULT_PERMISSION_CLASSES": [
# "rest_framework_api_key.permissions.HasAPIKey",
# ],
}
CORS_ORIGIN_ALLOW_ALL = True | [
"milk@developer.lgbt"
] | milk@developer.lgbt |
1eedc5acffffa6daed86d008c97fc137d4004b7b | 482e28dccb663459e50ac601e8cc376f2441b0f7 | /src/ch05/instructions/stores/Istore.py | 4c7115d1daea3912011fa3f5f1214933164872fe | [] | no_license | wlj5240/JVMByPython | 1487d4f4b8367e9e31d71b60a9d06ff4996ad1b7 | 53502f10f4f19741646d871c46014e023ccad4a5 | refs/heads/master | 2022-10-05T19:55:50.381307 | 2019-10-20T05:03:38 | 2019-10-20T05:03:38 | 270,003,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: Istore.py
@time: 2019/9/15 19:25
@desc: int类型变量存储指令
"""
from instructions.base.Instruction import Index8Instruction, NoOperandsInstruction
def _istore(frame, index):
val = frame.operand_stack.pop_numeric()
frame.local_vars.set_numeric(index, val)
class ISTORE(Index8Instruction):
def execute(self, frame):
_istore(frame, self.index)
class ISTORE_0(NoOperandsInstruction):
def execute(self, frame):
_istore(frame, 0)
class ISTORE_1(NoOperandsInstruction):
def execute(self, frame):
_istore(frame, 1)
class ISTORE_2(NoOperandsInstruction):
def execute(self, frame):
_istore(frame, 2)
class ISTORE_3(NoOperandsInstruction):
def execute(self, frame):
_istore(frame, 3)
| [
"huruifeng1202@163.com"
] | huruifeng1202@163.com |
dff218fc396c0c3506b94d5ed333a84f16c3c22c | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /pvn/networks.py | 18d517d58d9cb1babb6b29af43e7a6360dc961a9 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 5,984 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common networks."""
import chex
from flax import linen as nn
from jax import numpy as jnp
from pvn.utils import mesh_utils
class NatureDqnEncoder(nn.Module):
"""An encoder network for use with Atari."""
num_features: int = 512
width_multiplier: float = 1.0
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype = jnp.float32
apply_final_relu: bool = True
@nn.compact
def __call__(self, x):
chex.assert_type(x, self.dtype)
initializer = nn.initializers.xavier_uniform()
x = nn.Conv(
features=int(32 * self.width_multiplier),
kernel_size=(8, 8),
strides=(4, 4),
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
x = nn.relu(x)
x = nn.Conv(
features=int(64 * self.width_multiplier),
kernel_size=(4, 4),
strides=(2, 2),
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
x = nn.relu(x)
x = nn.Conv(
features=int(64 * self.width_multiplier),
kernel_size=(3, 3),
strides=(1, 1),
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(
features=self.num_features,
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
if self.apply_final_relu:
x = nn.relu(x)
return x
class NatureRndNetwork(nn.Module):
"""A modified Nature DQN network that outputs a single scalar value."""
features: int
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, x):
chex.assert_type(x, self.dtype)
initializer = nn.initializers.xavier_uniform()
x = NatureDqnEncoder(dtype=self.dtype, param_dtype=self.param_dtype)(x)
x = nn.Dense(
features=self.features,
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
return x
class ResidualBlock(nn.Module):
"""Stack of pooling and convolutional blocks with residual connections."""
num_channels: int
num_blocks: int
use_max_pooling: bool = True
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, x):
chex.assert_type(x, self.dtype)
initializer = nn.initializers.xavier_uniform()
conv_out = nn.Conv(
features=self.num_channels,
kernel_init=initializer,
kernel_size=(3, 3),
strides=1,
padding='SAME',
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
if self.use_max_pooling:
conv_out = nn.max_pool(
conv_out, window_shape=(3, 3), padding='SAME', strides=(2, 2))
for _ in range(self.num_blocks):
block_input = conv_out
conv_out = nn.relu(conv_out)
conv_out = nn.Conv(
features=self.num_channels,
kernel_init=initializer,
kernel_size=(3, 3),
strides=1,
padding='SAME',
dtype=self.dtype,
param_dtype=self.param_dtype)(conv_out)
conv_out = nn.relu(conv_out)
conv_out = nn.Conv(
features=self.num_channels,
kernel_init=initializer,
kernel_size=(3, 3),
strides=1,
padding='SAME',
dtype=self.dtype,
param_dtype=self.param_dtype)(conv_out)
conv_out += block_input
return conv_out
class ImpalaEncoder(nn.Module):
"""Impala Network which also outputs penultimate representation layers."""
width_multiplier: float = 1.0
stack_sizes: tuple[int, Ellipsis] = (16, 32, 32)
num_blocks: int = 2
num_features: int = 512
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, x):
chex.assert_type(x, self.dtype)
initializer = nn.initializers.xavier_uniform()
for stack_size in self.stack_sizes:
x = ResidualBlock(
num_channels=int(stack_size * self.width_multiplier),
num_blocks=self.num_blocks,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
x = nn.relu(x)
x = x.reshape(-1)
x = nn.Dense(
features=int(self.num_features),
kernel_init=initializer,
dtype=self.dtype,
param_dtype=self.param_dtype)(x)
x = nn.relu(x)
return x
class DsmNetwork(nn.Module):
"""A network that predicts DSM action-values and thresholds DSM rewards."""
num_actions: int
num_auxiliary_tasks: int
encoder: nn.Module
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype = jnp.float32
input_dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(self, obs):
initializer = nn.initializers.xavier_uniform()
obs = obs.astype(self.input_dtype) / 255.0
phi = self.encoder(obs)
vmap_action_preds = nn.vmap(
nn.Dense,
variable_axes={'params': 0},
split_rngs={'params': True},
in_axes=None,
out_axes=0,
axis_size=self.num_auxiliary_tasks)
action_preds = vmap_action_preds(
features=self.num_actions,
kernel_init=initializer,
param_dtype=self.param_dtype,
name='aux_tasks')(phi)
action_preds = mesh_utils.with_sharding_constraint(
action_preds, mesh_utils.create_partition_spec('model'))
return action_preds
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
a8f47240d377c2a56a6e5d98886f69e10546b8b7 | 550a1121d81a9ca2fff4855b1d07da7479246917 | /module/functions.py | b508de3452b2e83d8892e29463d8e55c145b8bf0 | [] | no_license | omribahumi/pycharm-bug | a018f0fe1a93fd47172023898c1650da6140b1c3 | 683cfde7892de945953a3131b2d5ccbab8fdc3b9 | refs/heads/master | 2020-07-25T15:48:00.606443 | 2014-11-19T14:49:13 | 2014-11-19T14:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | def dummy_function():
pass
| [
"omrib@everything.me"
] | omrib@everything.me |
2df4b279ab495b5dbe6dce329409cf7046330746 | 6689e6e7993e28ae189da07ecc965bd3e22a699c | /AsynchronousWrite.py | c5ac37907067fd819f7832b7ece7f78129825a35 | [] | no_license | abhishekSen999/getblock_implimentation | b723b0490de1c010fa71e3a0b2968a20d5a9865b | 83217f200b44e3e1670d794a7af35668744acd89 | refs/heads/master | 2020-05-17T11:36:19.127705 | 2019-08-06T05:16:42 | 2019-08-06T05:16:42 | 183,688,830 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | import multiprocessing
import BufferHeader
import time
import os
def _writeAsynchronously(lock,bufferDataStructure,blockNumber):
#locking as this is supposed to be a
#lock.acquire()
print("************ Asynchronous Writing of Block number-",blockNumber," ***************")
time.sleep(4) #sleep for 4 seconds to simulate writing to disk
bufferDataStructure.clearDelayedWriteBit(blockNumber)
#lock.release()
print("************ Asynchronous Writing of Block Number-",blockNumber," over ***************")
#adding buffer to head of free list, to follow the LRU algorithm
lock.acquire()
bufferDataStructure.addToFreeListFirst(blockNumber)
lock.release()
#print("reached",buffer.isDelayedWrite(),"pid ",os.getpid())
def asynchronousWrite(lock,bufferDataStructure,blockNumber):
writingProcess=multiprocessing.Process(target=_writeAsynchronously,args=(lock,bufferDataStructure,blockNumber,))
writingProcess.start()
return 1
| [
"abhishek.sen999@gmail.com"
] | abhishek.sen999@gmail.com |
9ae5354fde676db014c198da65188091357a2463 | 310829324890c4ae4fabe36181038f121ee9ee53 | /test_place.py | 365e0a2849c23e9d7061e4241780011a9a0dadb3 | [] | no_license | kreshezy/Travel-Tracker-Final | ba8458ab63f72882a0cb38946f38331df3e8fde1 | fa97dcbce75883b0556a44e69f2fb6de3b4c74f7 | refs/heads/master | 2022-12-26T22:15:17.958426 | 2020-09-27T06:06:34 | 2020-09-27T06:06:34 | 298,151,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | from place import Place
def run_tests():
"""Test Place class."""
print("Test empty place:")
default_place = Place()
print(default_place)
assert default_place.name == ""
assert default_place.country == ""
assert default_place.priority == 0
assert not default_place.is_visited
# Test initial-value place
print("Test initial-value place:")
new_place = Place("Malagar", "Spain", 1, False)
# TODO: Write tests to show this initialisation works
assert new_place.name == 'Malagar'
assert new_place.country == 'Spain'
assert new_place.priority == 1
assert not new_place.is_visited
# TODO: Add more tests, as appropriate, for each method
new_place.mark_visited()
assert new_place.is_visited
assert new_place.is_important()
print(str(new_place))
assert str(new_place) == 'Name: Malagar, Country: Spain, Priority: 1, Visited.'
run_tests()
| [
"noreply@github.com"
] | kreshezy.noreply@github.com |
c876c16d0a0ea296e728c6933d5b894de063bf2a | 45aea658416c2d00bcb5d57eb05c793fa5d660fa | /sql_queries.py | 6d35f136985b9d9eb4f60e62a4c25419c8d98ce0 | [] | no_license | pseudogram/udacity-data-modeling-postgres | fb7c4c83a4ce41793742cd78c04ee3c8fe5d6f32 | 9393d1180e1ea64dd8bf26d2f6d891630b1b2990 | refs/heads/master | 2022-04-19T13:03:49.931631 | 2020-04-13T19:47:30 | 2020-04-13T19:47:30 | 255,244,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays(
songplay_id SERIAL,
start_time TIMESTAMP REFERENCES time(start_time),
user_id INTEGER REFERENCES users(user_id),
level VARCHAR(50),
song_id VARCHAR(18) REFERENCES songs(song_id),
artist_id VARCHAR(18) REFERENCES artists(artist_id),
session_id VARCHAR(255),
location VARCHAR(255),
user_agent VARCHAR(255)
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users(
user_id INTEGER PRIMARY KEY,
first_name VARCHAR(100),
last_name VARCHAR(100),
gender VARCHAR(50),
level VARCHAR(50)
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs(
song_id VARCHAR(18) PRIMARY KEY ,
title VARCHAR(255),
artist_id VARCHAR(255),
year SMALLINT,
duration NUMERIC(11,5)
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists(
artist_id VARCHAR(18) PRIMARY KEY,
name VARCHAR(100),
location VARCHAR(255),
latitude NUMERIC(8,5),
longitude NUMERIC(19,15)
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time(
start_time TIMESTAMP PRIMARY KEY,
hour SMALLINT,
day SMALLINT,
week SMALLINT,
month SMALLINT,
year SMALLINT,
weekday SMALLINT
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays(start_time, user_id,level,song_id,artist_id,session_id,location, user_agent)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s)
ON CONFLICT
DO NOTHING;
""")
user_table_insert = ("""
INSERT INTO users(user_id, first_name, last_name, gender, level)
VALUES(%s,%s,%s,%s,%s)
ON CONFLICT (user_id)
DO
UPDATE
SET
first_name = %s,
last_name = %s,
gender = %s,
level = %s;
""")
song_table_insert = ("""
INSERT INTO songs (song_id, title, artist_id, year, duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (song_id)
DO NOTHING;
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, location, latitude, longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (artist_id)
DO NOTHING;
""")
time_table_insert = ("""
INSERT INTO time(start_time, hour, day, week, month, year, weekday)
values(%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (start_time)
DO NOTHING;
""")
# FIND SONGS
song_select = ("""
SELECT song_id, a.artist_id
FROM
songs AS s JOIN artists AS a
ON s.artist_id = a.artist_id
WHERE title = %s AND name = %s AND duration = %s;
""")
# QUERY LISTS
create_table_queries = [user_table_create, song_table_create, artist_table_create, time_table_create, songplay_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | [
"pseudogrammer@gmail.com"
] | pseudogrammer@gmail.com |
1a68df4f2c767747d4962ef0255bcfaff0ec6a26 | fd27390a80a64de82b0505dff5b7a8f656dc6000 | /mrBlue/base_F.py | 3e3f8d90e49fe2683a40122e59ed6cb755734452 | [] | no_license | t3fox/HighHBOT | 0cc6bfade9a9b79359761e3af8bbc0f37decad7a | 12ef074716eb77d03943d231c31054d491ba7f5c | refs/heads/master | 2022-12-08T13:46:51.057105 | 2020-08-07T02:17:55 | 2020-08-07T02:17:55 | 285,690,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py |
def executeBot():
input_email_id = "benitocamelo4vcs@gmail.com"
input_passw = "desig654?"
driver.get('https://www.facebook.com/login.php?login_attempt=1&lwv=110')
print("...Facebook is open...")
#email = driver.find_element_by_xpath("/html/body/div[1]/div[3]/div[1]/div/div/div/div[2]/form/div/div[1]/input")
email = driver.find_element_by_xpath('//*[@id="email"]')
email.send_keys(input_email_id)
print("Email OK")
password = driver.find_element_by_name('pass')
password.send_keys(input_passw)
print("Password OK")
#(//*[@id="loginbutton"])
button =driver.find_element_by_xpath("//*[@id='loginbutton']")
button.click()
print("Cuenta de facebook abierta...")
#executeBot()
| [
"noreply@github.com"
] | t3fox.noreply@github.com |
0c3ba08e7a764797288a746b8455a0129df23cbe | 72611197a42ef6cba75a2ad682dea5e2c8506548 | /HBM/examples/chlorophyl_layers.py | 3a749d82c3beadf324c9a76a45ecbd94a022881e | [] | no_license | asbjorn-christensen/GridWetData | 6e11247faed414d86371f1d5c40c3126582afabc | c94bc71c2e32ea834d2d1902c3b37ade29ac44d5 | refs/heads/master | 2020-05-21T23:19:21.979715 | 2019-09-06T13:40:35 | 2019-09-06T13:40:35 | 56,060,298 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/env python
#######################################################################################
#
# Extract misc chlorophyl attributes (strat front, bottom, surface, vertical average)
# by looping with GridWetData.HBM.DataManager.loop_over(...)
#
# If GridWetData is not in default search path, then (in bash) apply:
# export PYTHONPATH=<PATH_TO_GridWetData>:${PYTHONPATH}
#######################################################################################
from GridWetData import * # import the GridWetData environment
from GridWetData.HBM import DataManager, HBMGrid_3D
dmg = DataManager("/home/data/GUDP-VIND_test/preprocessed_data", HBMGrid_3D) # could be sys.argv[1]
import netCDF4 as netcdf
ncChlorofyl = netcdf.Dataset("out.nc", "w") # could be sys.argv[2]
times = []
i = 0
for (tim, data3d) in dmg.loop_over(GridData_3D, "chl", "ns"):
times.append(tim)
dt = tim-times[0]
fLayer = derived_layers.StratificationFront(data3d)
bLayer = data3d.get_bottom_layer()
sLayer = data3d.get_surface_layer()
aLayer = data3d.get_vertical_average()
fLayer.write_data_as_netCDF(ncChlorofyl, index=i, dataParam = "front")
bLayer.write_data_as_netCDF(ncChlorofyl, index=i, dataParam = "buttom")
sLayer.write_data_as_netCDF(ncChlorofyl, index=i, dataParam = "surface")
aLayer.write_data_as_netCDF(ncChlorofyl, index=i, dataParam = "average")
i += 1
| [
"asc@aqua.dtu.dk"
] | asc@aqua.dtu.dk |
d7c5fc8f4f253d5933902ec0333e9a30f480a39f | 8ef36062d9215d53afbc3584fb0eccae8a48c670 | /C to F converter.py | 85a406ed574d89d73d81314de65cddc3cb3590d2 | [] | no_license | sbo97t/BootCamp | c1bcf2ee3b570e4d42e5c231126e81451c862848 | 3bf27ba2e51d45cdf52af3412b80eefedd8bc501 | refs/heads/master | 2022-06-16T17:09:53.337368 | 2020-05-10T02:42:54 | 2020-05-10T02:42:54 | 252,709,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # C to F converter
print("Enter a temp in Celsius to convert to Fahrenheit")
temp = float(input("Celsius "))
print((temp * 9/5) + 32)
print("Would you like to convert from Farhneheit to Celsius? (Y or N) ")
answer=input()
if answer=="Y":
print("Enter a temp in Fahrenheit to convert to Celsius")
tempF = float(input("Fahrenheit "))
print((tempF - 32) * 5/9)
else:
print("Thanks for playing!")
| [
"noreply@github.com"
] | sbo97t.noreply@github.com |
35794bba74cad8910ae9c4a15b5203edb84dcfb6 | b352178349036ad5cccc4cb973d7240e99c0fa3f | /ProduceAZ_1/goodsApp/migrations/0015_contactus.py | 06e5ab386989e412229c443878406edb5003e017 | [] | no_license | RaminCH/final_proj_techacademy | 8467729d9acabf6dab26dde6efee857c282cf7fd | af7b763c23c698aea1bb7d3b38a5607bc9c9916f | refs/heads/master | 2022-12-02T02:04:25.866305 | 2020-02-04T17:50:45 | 2020-02-04T17:50:45 | 238,266,063 | 1 | 0 | null | 2022-11-22T05:17:27 | 2020-02-04T17:37:17 | CSS | UTF-8 | Python | false | false | 706 | py | # Generated by Django 3.0.2 on 2020-01-25 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goodsApp', '0014_backgroundimage_headermodel'),
]
operations = [
migrations.CreateModel(
name='ContactUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('phone_number', models.CharField(max_length=200)),
('message', models.TextField()),
],
),
]
| [
"ramin.chopurov@gmail.com"
] | ramin.chopurov@gmail.com |
462b4661effed19ab49cf3cae360488074cec925 | 71ed7467f0ee53a6589867a9b54eedf8f4d27cf1 | /unit6/unit6.2.py | 41207679011fa2cfb741951f1f8ac15f14b053db | [] | no_license | EricSeokgon/dojangPython | 46f7c7ee8762749f79eb0981cde901cacb0c5439 | 4b81cef2738f4f5457eb968bd9d99aea2241a29a | refs/heads/master | 2020-04-13T13:59:23.501146 | 2019-01-01T23:47:15 | 2019-01-01T23:47:15 | 163,248,266 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | a = 10
b = 20
c = a + b
print(c)
a = 10
print(a + 20)
print(a)
a = 10
a = a + 20
print(a)
a = 10
a += 20
print(a)
x = -10
print(+x)
print(-x)
| [
"leesk55@gmail.com"
] | leesk55@gmail.com |
f56516ce958b9557cfa1faae1b3efa4f89917394 | 3d5062883ea4fc446f51146adee8970ce9e6d2ae | /ProjectWeb/wsgi.py | 014019eee07a58f19e1d63c1ce68a40589acefb5 | [] | no_license | Marvin-Fiedacan/ProjectWeb | b32f564b81aa5bf8e11318221e5dd8c28c4e3fd9 | e104f72672384d01f5b764ae8ea5d7ae624c8c03 | refs/heads/main | 2023-07-03T13:53:42.738377 | 2021-08-06T04:13:46 | 2021-08-06T04:13:46 | 392,547,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for ProjectWeb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProjectWeb.settings')
application = get_wsgi_application()
| [
"marvinfiedacan@gmail.com"
] | marvinfiedacan@gmail.com |
87c85a9c698b6e3970fa33ac7f05bc41cd3236b7 | 4d50855fd42b4ea585b2f4f7fd56b1966e0fd42f | /cms/apps/settings/migrations/0004_settings_left_column_text.py | 31b27c2054ac2aa7eb364f02016558fc7ea660dc | [] | no_license | C6H6/django-cms | d1b43a6bc7ae441ea78e4071c906af50114f985a | 2e8e05b5c5276514af67773bc017f6a1354596c0 | refs/heads/master | 2020-04-03T07:27:03.724286 | 2019-03-05T20:14:35 | 2019-03-05T20:14:35 | 155,103,237 | 1 | 0 | null | 2019-03-05T20:14:36 | 2018-10-28T18:39:04 | Python | UTF-8 | Python | false | false | 434 | py | # Generated by Django 2.1.2 on 2019-01-12 16:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0003_settings_mourning_mode'),
]
operations = [
migrations.AddField(
model_name='settings',
name='left_column_text',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"c3c3h3h3@gmail.com"
] | c3c3h3h3@gmail.com |
d4f0b74ea6a91c0ea9ec118c096f874f88485adc | 4424017efe8265f2e304619b179e03082a5e0833 | /server/grok/db/mongo.py | 44f919b67eea74af8ab46848627bc1740a8481f3 | [] | no_license | rganchev/grok | d26bc0670538aa239ca1883263fe79e259a817be | ad9dfb81519ac0d1ad18bcf809ef60ef00b5721d | refs/heads/master | 2021-01-20T02:29:19.120062 | 2017-06-10T10:46:01 | 2017-06-10T10:46:01 | 83,817,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | import json
from pymongo import MongoClient
from bson.objectid import ObjectId
class DB(object):
client = MongoClient()
"""Represents a Mongo database with the given name"""
def __init__(self, name):
self._db = self.client[name]
def __getitem__(self, key):
return self._db[key]
def drop(self):
self.client.drop_database(self._db)
class DBJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
| [
"radan.ganchev@gmail.com"
] | radan.ganchev@gmail.com |
f392d1cadd9417da7f977acdd9a94adcc2f31a67 | c6a959125c09ff0d1ae8ceae5d66849b9879805d | /chapter_04/4-12 . More Loops.py | a59ac7ee58124f3999f8a9d590328e6e21684bf8 | [] | no_license | schartun/pythoncrashcourse | 44775977916cd60d9d36634d70c748f3f3f1ccf4 | 0f4238f78c507911bd83d7c049dc2015764ba729 | refs/heads/master | 2020-04-28T01:18:57.470300 | 2019-08-21T21:49:16 | 2019-08-21T21:49:16 | 174,850,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # All versions of foods.py in this section have avoided using for loops when printing to save space .
# Choose a version of foods.py, and write two for loops to print each list of foods .
my_foods = ['pizza', 'falafel', 'carrot cake']
friend_foods = my_foods[:]
print("My favorite foods are:")
for pizza in my_foods:
print(pizza)
print("\nMy friend's favorite foods are:")
for friend in friend_foods:
print(friend) | [
"schartun@gmail.com"
] | schartun@gmail.com |
6c09095197e24db6aa4f6e31283dd1213e9a9520 | f463414357cf70d156205b189f93121bb3c25a44 | /pet_ct/learn/dataloaders.py | 5764d844562a867fe4607d90cbdf899b92cc202f | [
"Apache-2.0"
] | permissive | geoffreyangus/pet-ct | 6e83e146e56a79a6c7f889c4fbb650b214ed1075 | fa96a07734afade475f6a1e1587ec14965fe2de3 | refs/heads/master | 2021-04-11T21:31:11.022315 | 2020-03-21T21:14:17 | 2020-03-21T21:14:17 | 249,056,432 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,599 | py | """
Defines DataLoader classes to use.
"""
import logging
from collections import defaultdict
from itertools import groupby
import torch
import torch.nn as nn
import numpy as np
from torch._six import int_classes as _int_classes
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler, RandomSampler, Sampler
import torch.nn.functional as F
from pet_ct.util.util import flex_stack, get_batch_size, soft_to_hard
def pad_inputs(inputs, max_length):
"""
"""
inputs_length = inputs.size()[0]
pad_length = max_length - inputs_length
pad_dims = (0, 0, 0, 0, 0, 0, 0, pad_length) # pads first dim evenly
padded_inputs = F.pad(inputs, pad=pad_dims)
return padded_inputs
def pad_scans(scan_inputs, value=0):
"""
"""
max_length = max(scan_inputs, key=lambda x: x.shape[0]).shape[0]
padded_inputs = []
for inputs in scan_inputs:
inputs_length = inputs.size()[0]
pad_length = max_length - inputs_length
pad_dims = (0, 0, 0, 0, 0, 0, 0, pad_length) # pads first dim evenly
padded = F.pad(inputs, pad=pad_dims, value=value)
padded_inputs.append(padded)
return padded_inputs
def pad_targets(targets, value=-1):
max_length = max(targets, key=lambda x: x.shape[-1]).shape[-1]
padded_targets = []
for tgt in targets:
tgt_length = tgt.shape[-1]
pad_length = max_length - tgt_length
pad_dims = (0, pad_length)
padded_targets.append(F.pad(tgt, pad=pad_dims, value=value))
return padded_targets
def exam_collate(batch_list):
""" Collate function fro a multi-task dataset.
args:
exam_list (list) list of exams
"""
max_length = max(batch_list, key=lambda x: x[0].size())[0].size()[0]
all_inputs = []
all_targets = []
all_info = []
for inputs, targets, info in batch_list:
all_inputs.append(pad_inputs(inputs, max_length))
all_info.append(info)
all_targets.append(targets)
# stack targets and inputs
all_targets = flex_stack(all_targets, dim=0)
all_inputs = flex_stack(all_inputs, dim=0)
return all_inputs, all_targets, all_info
def mt_exam_collate(batch_list):
""" Collate function for a multi-task dataset.
args:
exam_list (list) list of exams
"""
max_exam = max(batch_list, key=lambda x: x[0].size()[0])
max_length = max_exam[0].size()[0]
all_inputs = []
all_targets = defaultdict(list)
all_info = []
for inputs, targets, info in batch_list:
all_inputs.append(pad_inputs(inputs, max_length))
all_info.append(info)
for task, target in targets.items():
all_targets[task].append(target)
# stack targets and inputs
all_targets = {task: flex_stack(targets, dim=0) for task, targets in all_targets.items()}
all_inputs = flex_stack(all_inputs, dim=0)
return all_inputs, all_targets, all_info
def mt_mi_exam_collate(batch_list):
""" Collate function for a multi-task multi-input dataset
"""
all_inputs = defaultdict(list)
all_targets = defaultdict(list)
all_info = []
for inputs, targets, info in batch_list:
all_info.append(info)
for name, inpt in inputs.items():
all_inputs[name].append(inpt)
for task, target in targets.items():
all_targets[task].append(target)
if "scan" in all_inputs:
all_inputs["scan"] = pad_scans(all_inputs["scan"], value=-1)
if "mlm" in all_targets:
all_targets["mlm"] = pad_targets(all_targets["mlm"], value=-1)
elif "fdg_abnorm" in all_targets:
all_targets["fdg_abnorm"] = pad_targets(all_targets["fdg_abnorm"], value=-1)
# stack targets and inputs
all_targets = {task: flex_stack(targets, dim=0) for task, targets in all_targets.items()}
all_inputs = {name: flex_stack(inputs, dim=0) for name, inputs in all_inputs.items()}
return all_inputs, all_targets, all_info
class ExamBatchSampler(Sampler):
def __init__(self, batch_size, num_slices, sampler=None,
weights=None, num_samples=None, replacement=None, shuffle=None,
drop_last=False):
"""
Creates batches of exams with same number of slices.
TODO: Implement `drop_last`.
"""
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
if sampler is not None and shuffle:
raise ValueError("sampler option is mutually exclusive with shuffle")
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
self.batch_size = batch_size
self.num_samples = num_samples
self.num_slices = num_slices
self.sampler = sampler
if self.sampler is not None:
self.weights = weights if weights is not None else torch.ones(len(num_slices))
else:
self.weights = torch.ones(len(num_slices))
self.replacement = replacement
self.shuffle = shuffle
self.drop_last = drop_last
def __iter__(self):
"""
"""
if self.sampler is not None:
samples = torch.multinomial(self.weights, self.num_samples,
replacement=self.replacement)
else:
if self.shuffle:
samples = torch.multinomial(self.weights, self.num_samples,
replacement=False)
else:
samples = torch.tensor(range(self.num_samples))
samples = sorted(samples, key=lambda idx: self.num_slices[idx])
curr_iter = 0
batches = []
while curr_iter < self.num_samples:
batch = [samples[curr_iter]]
batch_slices = self.num_slices[samples[curr_iter]]
offset = self.batch_size
for i in range(1, self.batch_size):
if curr_iter + i < self.num_samples and \
batch_slices == self.num_slices[samples[curr_iter + i]]:
batch.append(samples[curr_iter + i])
else:
offset = i
break
batches.append(batch)
curr_iter = curr_iter + offset
batch_idxs = torch.randperm(len(batches)).tolist()
for batch_idx in batch_idxs:
yield batches[batch_idx]
def __len__(self):
"""
This is approximate because we cannot know number of batches a priori.
"""
if self.drop_last:
return self.num_samples // self.batch_size
else:
return (self.num_samples + self.batch_size - 1) // self.batch_size
class ExamDataLoader(DataLoader):
def __init__(self,
dataset,
batch_size=1,
shuffle=False,
num_workers=6,
sampler=None,
num_samples=1000,
replacement=False,
class_probs=None,
pin_memory=False):
"""
"""
# get example weights so examples are sampled according to class_probs
if sampler in {"WeightedRandomSampler", "RandomSampler"}:
if sampler == "WeightedRandomSampler":
classes = dataset.get_targets()
counts = torch.bincount(classes)
weights = torch.zeros_like(classes, dtype=torch.float)
for example_idx, class_idx in enumerate(classes):
class_prob = class_probs[class_idx] / float(counts[class_idx])
weights[example_idx] = class_prob
sampler = WeightedRandomSampler(weights=weights, num_samples=num_samples,
replacement=replacement)
elif sampler == "RandomSampler":
weights = None
sampler = RandomSampler(data_source=dataset, num_samples=num_samples,
replacement=True)
elif sampler is not None:
raise ValueError(f"Sampler {sampler} not supported.")
else:
num_samples = len(dataset)
weights = None
if batch_size > 1:
num_slices = dataset.get_num_slices()
batch_sampler = ExamBatchSampler(batch_size, num_slices,
sampler=sampler,
weights=weights,
num_samples=num_samples,
replacement=replacement,
shuffle=shuffle,
drop_last=False)
super().__init__(dataset=dataset, num_workers=num_workers,
batch_sampler=batch_sampler, pin_memory=pin_memory,
collate_fn=mt_exam_collate)
else:
super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler, pin_memory=pin_memory,
collate_fn=mt_exam_collate)
class MTExamDataLoader(DataLoader):
def __init__(self,
dataset,
batch_size=1,
shuffle=False,
num_workers=6,
sampler=None,
num_samples=1000,
replacement=False,
weight_task=None,
class_probs=None,
pin_memory=False):
"""
"""
if sampler in {"WeightedRandomSampler", "RandomSampler"}:
# get example weights so examples are sampled according to class_probs
if sampler == "WeightedRandomSampler":
classes = []
for target in dataset.get_targets(tasks=[weight_task], hard=True):
target_class = target[weight_task]
classes.append(target_class)
classes = torch.stack(classes)
if classes.shape[-1] > 1:
classes = soft_to_hard(classes, break_ties="random").long()
classes = torch.LongTensor(classes)
counts = torch.bincount(classes)
weights = torch.zeros_like(classes, dtype=torch.float)
for example_idx, class_idx in enumerate(classes):
class_prob = class_probs[class_idx] / float(counts[class_idx])
weights[example_idx] = class_prob
sampler = WeightedRandomSampler(weights=weights, num_samples=num_samples,
replacement=replacement)
elif sampler == "RandomSampler":
weights = None
sampler = RandomSampler(data_source=dataset, num_samples=num_samples,
replacement=True)
elif sampler is not None:
raise ValueError(f"Sampler {sampler} not supported.")
else:
num_samples = len(dataset)
weights = None
if batch_size > 1:
num_slices = dataset.get_num_slices()
batch_sampler = ExamBatchSampler(batch_size, num_slices,
sampler=sampler,
weights=weights,
num_samples=num_samples,
replacement=replacement,
shuffle=shuffle,
drop_last=False)
super().__init__(dataset=dataset, num_workers=num_workers,
batch_sampler=batch_sampler, pin_memory=pin_memory,
collate_fn=mt_exam_collate)
else:
super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler, pin_memory=pin_memory,
collate_fn=mt_exam_collate)
class MTAdaptiveDataloader(DataLoader):
def __init__(self, dataset,
batch_size=1,
shuffle=False,
num_workers=6,
num_samples=1000,
weight_task=None,
pin_memory=False):
"""
"""
self.num_samples = num_samples
sampler = RandomSampler(data_source=dataset, num_samples=num_samples,
replacement=True)
super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler)
def update(self, metrics, metric="roc_auc"):
"""
"""
for task in self.dataset.tasks:
value = metrics.get_metric(metric=metric, task=task)
weights = torch.zeros(len(dataset))
for example_idx, targets in enumerate(self.dataset.get_targets(hard=True)):
weights[example_idx] = 0
sampler = WeightedRandomSampler(weights=weights, num_samples=self.num_samples, replacement=True)
self.batch_sampler = BatchSampler(sampler, self.batch_size, self.drop_last)
class MTMIExamDataLoader(DataLoader):
def __init__(self,
dataset,
batch_size=1,
shuffle=False,
num_workers=6,
sampler=None,
num_samples=1000,
replacement=False,
weight_task=None,
class_probs=None,
pin_memory=False):
"""
"""
# get example weights so examples are sampled according to class_probs
if sampler == "WeightedRandomSampler":
self.num_samples = int(round(num_samples / batch_size))
classes = torch.LongTensor([target[weight_task]
for target in dataset.get_targets([weight_task])])
counts = torch.bincount(classes)
weights = torch.zeros_like(classes, dtype=torch.float)
for example_idx, class_idx in enumerate(classes):
class_prob = class_probs[class_idx] / float(counts[class_idx])
weights[example_idx] = class_prob
sampler = WeightedRandomSampler(weights=weights, num_samples=num_samples,
replacement=replacement)
elif sampler == "RandomSampler":
self.num_samples = int(round(num_samples / batch_size))
sampler = RandomSampler(data_source=dataset, num_samples=num_samples,
replacement=True)
elif sampler is not None:
raise ValueError(f"Sampler {sampler} not supported.")
super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler, pin_memory=pin_memory,
collate_fn=mt_mi_exam_collate)
def __len__(self):
"""
"""
if hasattr(self, "num_samples"):
return self.num_samples
else:
return super().__len__()
class DynamicRandomSampler(Sampler):
def __init__(self, weights, num_samples, replacement=False):
if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
if not replacement:
self.mask = torch.ones_like(self.weights, dtype=torch.double)
def __iter__(self):
for _ in range(self.num_samples):
weights = self.weights if self.replacement else self.weights * self.mask
idx = int(torch.multinomial(weights, 1, self.replacement))
if self.replacement:
self.mask[idx] = 0.0
yield idx
def __len__(self):
return self.num_samples
class MTDynamicDataLoader(DataLoader):
def __init__(self, dataset,
batch_size=1,
num_workers=6,
num_samples=1000,
update_period=10,
replacement=False,
priority_metric="roc_auc",
priority_scale=100,
pin_memory=False):
# initialize uniform weights
self.exams = list(dataset.get_targets(hard=True))
weights = torch.ones(len(self.exams))
self.sampler = DynamicRandomSampler(weights, num_samples, replacement=replacement)
self.priority_metric = priority_metric
self.priority_scale = priority_scale
self.task_to_priority = defaultdict(lambda: 1)
self.task_to_samples = defaultdict(list)
self.sample_idxs = []
self.tasks = [{"name": task,
"cardinality": len(self.exams[0][task])}
for task in dataset.tasks]
self._init_demands()
# support for periodic updates
self.update_counter = 0
self.update_period = update_period
self.queued_idxs = []
super().__init__(dataset=dataset, batch_size=batch_size,
num_workers=num_workers, sampler=self.sampler, pin_memory=pin_memory,
collate_fn=mt_exam_collate)
def update_epoch(self, metrics):
# reset
self.task_to_samples = defaultdict(list)
self.sample_idxs = []
self._init_demands()
self.update_counter = 0
self.queued_idxs = []
# update priorities
task_to_metric = {}
total = 0
for task in self.tasks:
metric = metrics.get_metric(self.priority_metric, task["name"])
task_to_metric[task["name"]] = metric
total += metric
self.task_to_priority = {
task: self.priority_scale * (1 - metric/total) for task, metric in task_to_metric.items()
}
logging.info(task_to_metric)
logging.info(self.task_to_priority)
def update_batch(self, idxs):
"""
"""
self.queued_idxs.extend(idxs)
self.update_counter += len(idxs)
if self.update_counter % self.update_period == 0:
self.update_demands()
self.update_weights()
def _init_demands(self):
"""
"""
self.task_to_demands = {}
for task in self.tasks:
self.task_to_demands[task["name"]] = np.full((1, task["cardinality"]), fill_value=1/task["cardinality"])
self.task_to_samples[task["name"]] = []
def get_loss_weights(self, targets):
"""
"""
task_to_weight = {}
for task, tgt in targets.items():
tgt = int(torch.argmax(tgt).cpu().numpy())
task_to_weight[task] = self.task_to_demands[task][-1, tgt]
return task_to_weight
def update_demands(self):
"""
"""
for idx in self.queued_idxs:
self.sample_idxs.append(idx)
exam = self.exams[idx]
for task, tgt in exam.items():
self.task_to_samples[task].append(int(torch.argmax(tgt)))
for task in self.tasks:
samples = np.array(self.task_to_samples[task["name"]])
total = np.sum(self.task_to_demands[task["name"]][np.arange(samples.size), samples])
new_demands = []
for tgt in range(task["cardinality"]):
tgt_total = np.sum(self.task_to_demands[task["name"]]
[np.where(samples == tgt), tgt])
new_demand = (1 - (tgt_total + 1) / (total + task["cardinality"]))
new_demand *= self.task_to_priority[task["name"]]
new_demands.append(new_demand)
self.task_to_demands[task["name"]] = np.append(self.task_to_demands[task["name"]],
np.array(new_demands)[None, :], axis=0)
self.queued_idxs = []
def update_weights(self):
weights = []
for exam in self.exams:
weight = self._get_weight(exam)
weights.append(weight)
self.sampler.weights = torch.nn.functional.softmax(torch.tensor(weights), dim=0).double()
def _get_weight(self, exam):
"""
"""
weight = 0
for task in self.tasks:
tgt = torch.argmax(exam[task["name"]])
weight += self.task_to_demands[task["name"]][-1, tgt]
return weight
| [
"gdlangus@gmail.com"
] | gdlangus@gmail.com |
2e02acf34dd5d480147613f233ac22a65a757b24 | a43dd2a0d87cc3344b4878ce10473f9a04c90ab4 | /Lib/ConfParams/__init__.py | f2b2766638cb1fdaed6bf055a34cae397ccc28e9 | [] | no_license | spigad/GangaCRAB3 | ad027549418b5fd513f0bce48223dab727bff7bf | 19280afdbdd8fc6e0f85e85ad896cedcf09b4c2e | refs/heads/master | 2020-12-07T13:32:37.855684 | 2014-03-07T09:58:01 | 2014-03-07T09:58:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from CMSSW import *
from CRAB import *
from GRID import *
from USER import *
from TASK import *
| [
"vale.mancy@gmail.com"
] | vale.mancy@gmail.com |
e1f0588ca4007cacbd8e69dbf5e288398f24eb62 | f95d2646f8428cceed98681f8ed2407d4f044941 | /a-T-biji/day19/code/mylist.py | 6af18978dfed1c59323153a37b1218cb56b4ec9e | [] | no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py |
class MyList:
def __init__(self, iterable=()):
self.data = [x for x in iterable]
def __repr__(self):
return "MyList(%s)" % self.data
def __len__(self):
return len(self.data)
L = []
L.append({1, 2, 3})
L.append(("A", "B", "C"))
L.append(MyList([4, 5, 6]))
s = 0
for x in L:
s += len(x) # x.__len__()
print("元素个数是:", s) # 6
print(L) | [
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] | C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn |
df192875fb351a853e6859bc87d918027df660a8 | d37a19ab3bcaba6e808a18df411c653c644d27db | /Year2/CA268/Week06/student.py | c2189723899ed611ef2287bccb76956591250ba5 | [] | no_license | Andrew-Finn/DCU | 9e7009dac9a543aaade17e9e94116259dcc1de20 | 013789e8150d80d3b3ce2c0c7ba968b2c69a7ce0 | refs/heads/master | 2023-02-21T05:13:42.731828 | 2022-02-14T12:39:20 | 2022-02-14T12:39:20 | 157,438,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from Node import Node
#
# Function to add an item to a tree.
#
# This is not good object oriented coding. It's not even very polite. It directly interferes with the tree's innards.
#
def add(tree, item):
""" Add this item to its correct position on the tree """
# This is a non recursive add method. A recursive method would be cleaner.
if tree.root == None: # ... Empty tree ...
tree.root = Node(item, None, None) # ... so, make this the root
else:
lst = []
# Find where to put the item
child_tree = tree.root
while child_tree != None:
parent = child_tree
lst.append(parent)
if item < child_tree.item: # If smaller ...
child_tree = child_tree.left # ... move to the left
elif item > child_tree.item:
child_tree = child_tree.right
# child_tree should be pointing to the new node, but we've gone too far
# we need to modify the parent nodes
if item < parent.item:
parent.left = Node(item, None, None)
elif item > parent.item:
parent.right = Node(item, None, None)
# Ignore the case where the item is equal.
for items in lst[-2::-1]:
if abs(tree.recurse_height(items.left) - tree.recurse_height(items.right)) > 1:
return items.item
#
# Note that you can get the height of a node by calling tree.recurse_height().
# For example, the height of the root is tree.recurse_height(tree.root)
#
| [
"git@afinn.me"
] | git@afinn.me |
f90db0d1572923a61ff028d06924ad3675160b02 | 9d9220ac3abc0aa316d1ce3653afe2c6d1a0593e | /sqlalchemy/schema.py | ebcc9a7ed373cbe00d20adb25547332f2e57f2f3 | [
"MIT"
] | permissive | sauloal/PiCastPy | 016733b597f8b15f2bc2fb6e6bc5f0f9aef95e70 | c907a5ba72ccd576b2c7ae78af25abb741327cee | refs/heads/master | 2020-06-04T21:34:45.015707 | 2013-08-09T17:18:29 | 2013-08-09T17:18:29 | 11,934,825 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135,458 | py | # sqlalchemy/schema.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
import re
import inspect
from . import exc, util, dialects, event, events, inspection
from .sql import expression, visitors
import collections
ddl = util.importlater("sqlalchemy.engine", "ddl")
sqlutil = util.importlater("sqlalchemy.sql", "util")
url = util.importlater("sqlalchemy.engine", "url")
sqltypes = util.importlater("sqlalchemy", "types")
__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index',
'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint',
'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData',
'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault',
'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL',
'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence',
'AddConstraint', 'DropConstraint',
]
__all__.sort()
RETAIN_SCHEMA = util.symbol('retain_schema')
class SchemaItem(events.SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
quote = None
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent_with_dispatch(self)
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return util.generic_repr(self)
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`.Table` and :class:`.Column`.
"""
return {}
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
def _validate_dialect_kwargs(kwargs, name):
# validate remaining kwargs that they all specify DB prefixes
for k in kwargs:
m = re.match('^(.+?)_.*', k)
if m is None:
raise TypeError("Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k)
inspection._self_inspects(SchemaItem)
class Table(SchemaItem, expression.TableClause):
"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`.Table` object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`.MetaData` object. Calling the :class:`.Table`
constructor with the same name and same :class:`.MetaData` argument
a second time will return the *same* :class:`.Table` object - in this way
the :class:`.Table` constructor acts as a registry function.
See also:
:ref:`metadata_describing` - Introduction to database metadata
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
This property, along with the *schema*, indicates the *singleton
identity* of this table in relation to its parent :class:`.MetaData`.
Additional calls to :class:`.Table` with the same name, metadata,
and schema name will return the same :class:`.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
:param metadata: a :class:`.MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`.Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`.
:param autoload: Defaults to False: the Columns for this table should
be reflected from the database. Usually there will be no Column
objects in the constructor if this property is set.
:param autoload_replace: If ``True``, when using ``autoload=True``
and ``extend_existing=True``,
replace ``Column`` objects already present in the ``Table`` that's
in the ``MetaData`` registry with
what's reflected. Otherwise, all existing columns will be
excluded from the reflection process. Note that this does
not impact ``Column`` objects specified in the same call to ``Table``
which includes ``autoload``, those always take precedence.
Defaults to ``True``.
.. versionadded:: 0.7.5
:param autoload_with: If autoload==True, this is an optional Engine
or Connection instance to be used for the table reflection. If
``None``, the underlying MetaData's bound connectable will be used.
:param extend_existing: When ``True``, indicates that if this
:class:`.Table` is already present in the given :class:`.MetaData`,
apply further arguments within the constructor to the existing
:class:`.Table`.
If ``extend_existing`` or ``keep_existing`` are not set, an error is
raised if additional table modifiers are specified when
the given :class:`.Table` is already present in the :class:`.MetaData`.
.. versionchanged:: 0.7.4
``extend_existing`` will work in conjunction
with ``autoload=True`` to run a new reflection operation against
the database; new :class:`.Column` objects will be produced
from database metadata to replace those existing with the same
name, and additional :class:`.Column` objects not present
in the :class:`.Table` will be added.
As is always the case with ``autoload=True``, :class:`.Column`
objects can be specified in the same :class:`.Table` constructor,
which will take precedence. I.e.::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload=True,
autoload_with=engine
)
The above will overwrite all columns within ``mytable`` which
are present in the database, except for ``y`` which will be used as is
from the above definition. If the ``autoload_replace`` flag
is set to False, no existing columns will be replaced.
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`.MetaData`, ignore
further arguments within the constructor to the existing
:class:`.Table`, and return the :class:`.Table` object as
originally created. This is to allow a function that wishes
to define a new :class:`.Table` on first call, but on
subsequent calls will return the same :class:`.Table`,
without any of the declarations (particularly constraints)
being applied a second time. Also see extend_existing.
If extend_existing or keep_existing are not set, an error is
raised if additional table modifiers are specified when
the given :class:`.Table` is already present in the :class:`.MetaData`.
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`.Table` before
the "autoload" process begins. Particularly useful for
the :meth:`.DDLEvents.column_reflect` event::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The *schema name* for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
:param useexisting: Deprecated. Use extend_existing.
"""
__visit_name__ = 'table'
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
if schema is None:
schema = metadata.schema
keep_existing = kw.pop('keep_existing', False)
extend_existing = kw.pop('extend_existing', False)
if 'useexisting' in kw:
msg = "useexisting is deprecated. Use extend_existing."
util.warn_deprecated(msg)
if extend_existing:
msg = "useexisting is synonymous with extend_existing."
raise exc.ArgumentError(msg)
extend_existing = kw.pop('useexisting', False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
metadata._remove_table(name, schema)
raise
def __init__(self, *args, **kw):
"""Constructor for :class:`~.schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`~.schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(name)
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
if self.schema is None:
self.schema = metadata.schema
self.quote_schema = kwargs.pop(
'quote_schema', metadata.quote_schema)
else:
self.quote_schema = kwargs.pop('quote_schema', None)
self.indexes = set()
self.constraints = set()
self._columns = expression.ColumnCollection()
PrimaryKeyConstraint()._set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
self.kwargs = {}
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
# this argument is only used with _init_existing()
kwargs.pop('autoload_replace', True)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
self.quote = kwargs.pop('quote', None)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if 'listeners' in kwargs:
listeners = kwargs.pop('listeners')
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
self._autoload(metadata, autoload_with, include_columns)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _autoload(self, metadata, autoload_with, include_columns,
exclude_columns=()):
if self.primary_key.columns:
PrimaryKeyConstraint(*[
c for c in self.primary_key.columns
if c.key in exclude_columns
])._set_parent_with_dispatch(self)
if autoload_with:
autoload_with.run_callable(
autoload_with.dialect.reflecttable,
self, include_columns, exclude_columns
)
else:
bind = _bind_or_error(metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>")
bind.run_callable(
bind.dialect.reflecttable,
self, include_columns, exclude_columns
)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
def _init_existing(self, *args, **kwargs):
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
autoload_replace = kwargs.pop('autoload_replace', True)
schema = kwargs.pop('schema', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
if 'info' in kwargs:
self.info = kwargs.pop('info')
if autoload:
if not autoload_replace:
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata, autoload_with, include_columns, exclude_columns)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
# validate remaining kwargs that they all specify DB prefixes
_validate_dialect_kwargs(kwargs, "Table")
self.kwargs.update(kwargs)
def _init_collections(self):
pass
@util.memoized_property
def _autoincrement_column(self):
for col in self.primary_key:
if col.autoincrement and \
col.type._type_affinity is not None and \
issubclass(col.type._type_affinity, sqltypes.Integer) and \
(not col.foreign_keys or col.autoincrement == 'ignore_fk') and \
isinstance(col.default, (type(None), Sequence)) and \
(col.server_default is None or col.server_default.reflected):
return col
@property
def key(self):
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a :class:`~.schema.Column` to this :class:`~.schema.Table`.
The "key" of the newly added :class:`~.schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`~.schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`~.schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
"""
column._set_parent_with_dispatch(self)
def append_constraint(self, constraint):
"""Append a :class:`~.schema.Constraint` to this
:class:`~.schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`~.schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``Table``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def get_children(self, column_collections=True,
schema_visitor=False, **kw):
if not schema_visitor:
return expression.TableClause.get_children(
self, column_collections=column_collections, **kw)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table,
self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA):
"""Return a copy of this :class:`.Table` associated with a different
:class:`.MetaData`.
E.g.::
some_engine = create_engine("sqlite:///some.db")
# create two metadata
meta1 = MetaData()
meta2 = MetaData()
# load 'users' from the sqlite engine
users_table = Table('users', meta1, autoload=True,
autoload_with=some_engine)
# create the same Table object for the plain metadata
users_table_2 = users_table.tometadata(meta2)
:param metadata: Target :class:`.MetaData` object.
:param schema: Optional string name of a target schema, or
``None`` for no schema. The :class:`.Table` object will be
given this schema name upon copy. Defaults to the special
symbol :attr:`.RETAIN_SCHEMA` which indicates no change should be
made to the schema name of the resulting :class:`.Table`.
"""
if schema is RETAIN_SCHEMA:
schema = self.schema
elif schema is None:
schema = metadata.schema
key = _get_table_key(self.name, schema)
if key in metadata.tables:
util.warn("Table '%s' already exists within the given "
"MetaData - not copying." % self.description)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
table = Table(
self.name, metadata, schema=schema,
*args, **self.kwargs
)
for c in self.constraints:
table.append_constraint(c.copy(schema=schema, target_table=table))
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if len(index.columns) == 1 and \
list(index.columns)[0].index:
continue
Index(index.name,
unique=index.unique,
*[table.c[col] for col in index.columns.keys()],
**index.kwargs)
table.dispatch._update(self.dispatch)
return table
class Column(SchemaItem, expression.ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to the special
type :class:`.NullType`. If and when this :class:`.Column` is
made to refer to another column using :class:`.ForeignKey`
and/or :class:`.ForeignKeyConstraint`, the type of the remote-referenced
column will be copied to this column as well, at the moment that
the foreign key is resolved against that remote :class:`.Column`
object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`.Column` from its
:class:`.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`,
and :class:`.Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: This flag may be set to ``False`` to
indicate an integer primary key column that should not be
considered to be the "autoincrement" column, that is
the integer primary key column which generates values
implicitly upon INSERT and whose value is usually returned
via the DBAPI cursor.lastrowid attribute. It defaults
to ``True`` to satisfy the common use case of a table
with a single integer primary key column. If the table
has a composite primary key consisting of more than one
integer column, set this flag to True only on the
column that should be considered "autoincrement".
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Are not referenced by any foreign keys, unless
the value is specified as ``'ignore_fk'``
.. versionadded:: 0.7.4
* have no server side or client side defaults (with the exception
of Postgresql SERIAL).
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on Postgresql, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior. See the SQLite dialect documentation for
information on SQLite's AUTOINCREMENT.
* The column will be considered to be available as
cursor.lastrowid or equivalent, for those dialects which
"post fetch" newly inserted identifiers after a row has
been inserted (SQLite, MySQL, MS-SQL). It does not have
any effect in this regard for databases that use sequences
to generate primary key identifiers (i.e. Firebird, Postgresql,
Oracle).
.. versionchanged:: 0.7.4
``autoincrement`` accepts a special value ``'ignore_fk'``
to indicate that autoincrementing status regardless of foreign
key references. This applies to certain composite foreign key
setups, such as the one demonstrated in the ORM documentation
at :ref:`post_update`.
:param default: A scalar, Python callable, or
:class:`.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to ``server_default`` which creates a
default generator on the database side.
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`.Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`.Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`.Index` construct
instead.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: If set to the default of ``True``, indicates the
column will be rendered as allowing NULL, else it's rendered as
NOT NULL. This parameter is only used when issuing CREATE TABLE
statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))0
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a :class:`.DefaultClause`
object upon initialization.
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not specify any DDL
and the implementation is left to the database, such as via a
trigger.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`.Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`.UniqueConstraint` or
:class:`.Index` constructs explicitly.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], util.string_types):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if (isinstance(coltype, sqltypes.TypeEngine) or
(isinstance(coltype, type) and
issubclass(coltype, sqltypes.TypeEngine))):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
super(Column, self).__init__(name, None, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.quote = kwargs.pop('quote', None)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = set()
self.foreign_keys = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self._proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, sqltypes.SchemaType):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, '_warn_on_bytestring', False):
if isinstance(self.default, util.binary_type):
util.warn("Unicode column received non-unicode "
"default value.")
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default._as_for_update(False))
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate._as_for_update(True))
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(list(kwargs)))
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" %
self.table.description or "table=None")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table.")
if self.key is None:
self.key = self.name
existing = getattr(self, 'table', None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object already assigned to Table '%s'" %
existing.description)
if self.key in table._columns:
col = table._columns.get(self.key)
if col is not self:
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
Table._autoincrement_column._reset(table)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, util.string_types):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index(expression._truncated_label('ix_%s' % self._label),
self, unique=self.unique)
elif self.unique:
if isinstance(self.unique, util.string_types):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
fk_key = (table.key, self.key)
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
fk._set_remote_table(table)
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
type_ = self.type
if isinstance(type_, sqltypes.SchemaType):
type_ = type_.copy(**kw)
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
unique=self.unique,
quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
info=self.info,
doc=self.doc,
*args
)
c.dispatch._update(self.dispatch)
return c
def _make_proxy(self, selectable, name=None, key=None,
name_is_truncatable=False, **kw):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column, _constraint=f.constraint)
for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError("Cannot initialize a sub-selectable"
" with this Column object until it's 'name' has "
"been assigned.")
try:
c = self._constructor(
expression._as_truncated(name or self.name) if \
name_is_truncatable else (name or self.name),
self.type,
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
quote=self.quote,
_proxies=[self], *fk)
except TypeError:
util.raise_from_cause(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__)
)
c.table = selectable
selectable._columns.add(c)
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns[c.key]
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate)
if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return expression.ColumnClause.get_children(self, **kwargs)
class ForeignKey(SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`.Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`.Column` which
in turn is associated with a :class:`.Table`. Conversely,
when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`.ForeignKeyConstraint` object must be used, and applied
to the :class:`.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`.Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
schema=None,
initially=None, link_to_name=False, match=None):
"""
Construct a column-level FOREIGN KEY.
The :class:`.ForeignKey` object when constructed generates a
:class:`.ForeignKeyConstraint` which is associated with the parent
:class:`.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
.. versionadded:: 0.7.4
Note that if the schema name is not included, and the
underlying :class:`.MetaData` has a "schema", that value will
be used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`.ForeignKeyConstraint` to indicate the constraint should be
generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See that classes' constructor for details.
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
"""
self._colspec = column
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.parent = None
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`.ForeignKey` object.
The new :class:`.ForeignKey` will not be bound
to any :class:`.Column`.
This method is usually used by the internal
copy procedures of :class:`.Column`, :class:`.Table`,
and :class:`.MetaData`.
:param schema: The returned :class:`.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match
)
fk.dispatch._update(self.dispatch)
return fk
def _get_colspec(self, schema=None):
"""Return a string based 'column specification' for this
:class:`.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
return schema + "." + self.column.table.name + \
"." + self.column.key
elif isinstance(self._colspec, util.string_types):
return self._colspec
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
return "%s.%s" % (_column.table.fullname, _column.key)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`.Table` is referenced by this
:class:`.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`.Column` in the given :class:`.Table`
referenced by this :class:`.ForeignKey`.
Returns None if this :class:`.ForeignKey` does not reference the given
:class:`.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def _column_tokens(self):
"""parse a string-based _colspec into its component parts."""
m = self._colspec.split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
if (len(m) == 1):
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if (len(m) > 0):
schema = '.'.join(m)
else:
schema = None
return schema, tname, colname
def _table_key(self):
if isinstance(self._colspec, util.string_types):
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
if _column.table is None:
return None
else:
return _column.table.key
def _resolve_col_tokens(self):
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it.")
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table.")
parenttable = self.parent.table
# assertion, can be commented out.
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(self, parenttable, table, colname):
if not hasattr(self.constraint, '_referred_table'):
self.constraint._referred_table = table
else:
assert self.constraint._referred_table is table
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, table.name, key),
table.name, key)
self._set_target_column(_column)
def _set_target_column(self, column):
# propagate TypeEngine to parent if it didn't have one
if isinstance(self.parent.type, sqltypes.NullType):
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
if isinstance(self.parent.table, Table):
fk_key = (self.parent.table.key, self.parent.key)
if fk_key in self.parent.table.metadata._fk_memos:
for fk in self.parent.table.metadata._fk_memos[fk_key]:
if isinstance(fk.parent.type, sqltypes.NullType):
fk.parent.type = column.type
self.column = column
@util.memoized_property
def column(self):
"""Return the target :class:`.Column` referenced by this
:class:`.ForeignKey`.
If no target column has been established, an exception
is raised.
.. versionchanged:: 0.9.0
Foreign key target column resolution now occurs as soon as both
the ForeignKey object and the remote Column to which it refers
are both associated with the same MetaData object.
"""
if isinstance(self._colspec, util.string_types):
parenttable, tablekey, colname = self._resolve_col_tokens()
if tablekey not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'" %
(self.parent, tablekey, colname),
tablekey)
elif parenttable.key not in parenttable.metadata:
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable)
else:
raise exc.NoReferencedColumnError(
"Could not initialize target column for "
"ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, tablekey, colname),
tablekey, colname)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, column):
if self.parent is not None and self.parent is not column:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table):
parenttable, tablekey, colname = self._resolve_col_tokens()
self._link_to_col_by_colstring(parenttable, table, colname)
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
match=self.match,
)
self.constraint._elements[self.parent] = self
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, util.string_types):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
self._link_to_col_by_colstring(parenttable, table, colname)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
class _NotAColumnExpr(object):
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression." % self.__class__.__name__)
__clause_element__ = self_group = lambda self: self._not_a_column_expr()
_from_objects = property(lambda self: self._not_a_column_expr())
class DefaultGenerator(_NotAColumnExpr, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
""""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`.Connection` in use as well as the current
statement and parameters.
"""
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, expression.ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and \
not self.is_clause_element and \
not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
The alternative here is to require that
a simple callable passed to "default" would need
to be of the form "default=lambda ctx: datetime.now".
That is the more "correct" way to go, but the case
of using a zero-arg callable for "default" is so
much more prominent than the context-specific one
I'm having trouble justifying putting that inconvenience
on everyone.
"""
if inspect.isfunction(fn):
inspectable = fn
elif inspect.isclass(fn):
inspectable = fn.__init__
elif hasattr(fn, '__call__'):
inspectable = fn.__call__
else:
# probably not inspectable, try anyways.
inspectable = fn
try:
argspec = inspect.getargspec(inspectable)
except TypeError:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# Py3K compat - no unbound methods
if inspect.ismethod(inspectable) or inspect.isclass(fn):
positionals -= 1
if positionals == 0:
return lambda ctx: fn()
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`.Engine`
or :class:`.Connection`, rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table('some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'), primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
See also: :class:`.CreateSequence` :class:`.DropSequence`
"""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=None, metadata=None,
quote_schema=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
:param name: The name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param schema: Optional schema name for the sequence, if located
in a schema other than the default.
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the Postgresql backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the schema name on or off. When left at its
default of ``None``, normal quoting rules based on casing and reserved
words take place.
:param metadata: optional :class:`.MetaData` object which will be
associated with this :class:`.Sequence`. A :class:`.Sequence`
that is associated with a :class:`.MetaData` gains access to the
``bind`` of that :class:`.MetaData`, meaning the
:meth:`.Sequence.create` and :meth:`.Sequence.drop` methods will
make usage of that engine automatically.
.. versionchanged:: 0.7
Additionally, the appropriate CREATE SEQUENCE/
DROP SEQUENCE DDL commands will be emitted corresponding to this
:class:`.Sequence` when :meth:`.MetaData.create_all` and
:meth:`.MetaData.drop_all` are invoked.
Note that when a :class:`.Sequence` is applied to a :class:`.Column`,
the :class:`.Sequence` is automatically associated with the
:class:`.MetaData` object of that column's parent :class:`.Table`,
when that association is made. The :class:`.Sequence` will then
be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding
to when the :class:`.Table` object itself is created or dropped,
rather than that of the :class:`.MetaData` object overall.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`.Column`, should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
super(Sequence, self).__init__(for_update=for_update)
self.name = name
self.start = start
self.increment = increment
self.optional = optional
self.quote = quote
if metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
self.quote_schema = metadata.quote_schema
else:
self.schema = schema
self.quote_schema = quote_schema
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
def next_value(self):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return expression.func.next_value(self, bind=self.bind)
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element."
% self.__class__.__name__)
class FetchedValue(_NotAColumnExpr, events.SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
def __init__(self, for_update=False):
self.for_update = for_update
def _as_for_update(self, for_update):
if for_update == self.for_update:
return self
else:
return self._clone(for_update)
def _clone(self, for_update):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop('column', None)
n.for_update = for_update
return n
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return util.generic_repr(self)
inspection._self_inspects(FetchedValue)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(arg, (util.string_types[0],
expression.ClauseElement,
expression.TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % \
(self.arg, self.for_update)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
.. deprecated:: 0.6
:class:`.PassiveDefault` is deprecated.
Use :class:`.DefaultClause`.
"""
@util.deprecated("0.6",
":class:`.PassiveDefault` is deprecated. "
"Use :class:`.DefaultClause`.",
False)
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None,
**kw):
"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
:param \**kwargs:
Dialect-specific keyword parameters, see the documentation
for various dialects and constraints regarding options here.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
self._create_rule = _create_rule
util.set_creation_order(self)
_validate_dialect_kwargs(kw, self.__class__.__name__)
self.kwargs = kw
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
class ColumnCollectionMixin(object):
def __init__(self, *columns):
self.columns = expression.ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c)
for c in columns]
if self._pending_colargs and \
isinstance(self._pending_colargs[0], Column) and \
isinstance(self._pending_colargs[0].table, Table):
self._set_parent_with_dispatch(self._pending_colargs[0].table)
def _set_parent(self, table):
for col in self._pending_colargs:
if isinstance(col, util.string_types):
col = table.c[col]
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
ColumnCollectionMixin.__init__(self, *columns)
Constraint.__init__(self, **kw)
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
Constraint._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
c = self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
c.dispatch._update(self.dispatch)
return c
def contains_column(self, col):
return self.columns.contains_column(col)
def __iter__(self):
# inlining of
# return iter(self.columns)
# ColumnCollection->OrderedProperties->OrderedDict
ordered_dict = self.columns._data
return (ordered_dict[key] for key in ordered_dict._list)
def __len__(self):
return len(self.columns._data)
class CheckConstraint(Constraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, _create_rule=None,
_autoattach=True):
"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct.
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(CheckConstraint, self).\
__init__(name, deferrable, initially, _create_rule)
self.sqltext = expression._literal_as_text(sqltext)
if table is not None:
self._set_parent_with_dispatch(table)
elif _autoattach:
cols = sqlutil.find_columns(self.sqltext)
tables = set([c.table for c in cols
if isinstance(c.table, Table)])
if len(tables) == 1:
self._set_parent_with_dispatch(
tables.pop())
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, target_table=None, **kw):
if target_table is not None:
def replace(col):
if self.table.c.contains_column(col):
return target_table.c[col.key]
else:
return None
sqltext = visitors.replacement_traverse(self.sqltext, {}, replace)
else:
sqltext = self.sqltext
c = CheckConstraint(sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
_autoattach=False)
c.dispatch._update(self.dispatch)
return c
class ForeignKeyConstraint(Constraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`.ForeignKey` to the definition of a :class:`.Column` is a shorthand
equivalent for an unnamed, single column :class:`.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None, use_alter=False,
link_to_name=False, match=None, table=None):
"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped. This is shorthand for the
usage of :class:`.AddConstraint` and :class:`.DropConstraint` applied
as "after-create" and "before-drop" events on the MetaData object.
This is normally used to generate/drop constraints on objects that
are mutually dependent on each other.
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
"""
super(ForeignKeyConstraint, self).\
__init__(name, deferrable, initially)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
if self.name is None and use_alter:
raise exc.ArgumentError("Alterable Constraint requires a name")
self.use_alter = use_alter
self.match = match
self._elements = util.OrderedDict()
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
for col, refcol in zip(columns, refcolumns):
self._elements[col] = ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match
)
if table is not None:
self._set_parent_with_dispatch(table)
elif columns and \
isinstance(columns[0], Column) and \
columns[0].table is not None:
self._set_parent_with_dispatch(columns[0].table)
def _validate_dest_table(self, table):
table_keys = set([elem._table_key() for elem in self._elements.values()])
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
'ForeignKeyConstraint on %s(%s) refers to '
'multiple remote tables: %s and %s' % (
table.fullname,
self._col_description,
elem0,
elem1
))
@property
def _col_description(self):
return ", ".join(self._elements)
@property
def columns(self):
return list(self._elements)
@property
def elements(self):
return list(self._elements.values())
def _set_parent(self, table):
super(ForeignKeyConstraint, self)._set_parent(table)
self._validate_dest_table(table)
for col, fk in self._elements.items():
# string-specified column names now get
# resolved to Column objects
if isinstance(col, util.string_types):
try:
col = table.c[col]
except KeyError:
raise exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, col))
if not hasattr(fk, 'parent') or \
fk.parent is not col:
fk._set_parent_with_dispatch(col)
if self.use_alter:
def supports_alter(ddl, event, schema_item, bind, **kw):
return table in set(kw['tables']) and \
bind.dialect.supports_alter
event.listen(table.metadata, "after_create",
AddConstraint(self, on=supports_alter))
event.listen(table.metadata, "before_drop",
DropConstraint(self, on=supports_alter))
def copy(self, schema=None, **kw):
fkc = ForeignKeyConstraint(
[x.parent.key for x in self._elements.values()],
[x._get_colspec(schema=schema) for x in self._elements.values()],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match
)
fkc.dispatch._update(self.dispatch)
return fkc
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
Defines a single column or composite PRIMARY KEY constraint. For a
no-frills primary key, adding ``primary_key=True`` to one or more
``Column`` definitions is a shorthand equivalent for an unnamed single- or
multiple-column PrimaryKeyConstraint.
"""
__visit_name__ = 'primary_key_constraint'
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key in table.constraints:
table.constraints.remove(table.primary_key)
table.primary_key = self
table.constraints.add(self)
for c in self.columns:
c.primary_key = True
def _replace(self, col):
self.columns.replace(col)
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX. For a no-frills, single
column index, adding ``index=True`` to the ``Column`` definition is
a shorthand equivalent for an unnamed, single column :class:`.Index`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for the
:class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = 'index'
def __init__(self, name, *expressions, **kw):
"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultmately refer to a
:class:`.Column`.
.. versionadded:: 0.8 :class:`.Index` supports SQL expressions as
well as plain columns.
:param unique:
Defaults to False: create a unique index.
:param \**kw:
Other keyword arguments may be interpreted by specific dialects.
"""
self.table = None
columns = []
for expr in expressions:
if not isinstance(expr, expression.ClauseElement):
columns.append(expr)
else:
cols = []
visitors.traverse(expr, {}, {'column': cols.append})
if cols:
columns.append(cols[0])
else:
columns.append(expr)
self.expressions = expressions
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns)
self.name = name
self.unique = kw.pop('unique', False)
self.kwargs = kw
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'." % (
self.name,
self.table.description,
table.description
)
)
self.table = table
for c in self.columns:
if c.table != self.table:
raise exc.ArgumentError(
"Column '%s' is not part of table '%s'." %
(c, self.table.description)
)
table.indexes.add(self)
self.expressions = [
expr if isinstance(expr, expression.ClauseElement)
else colexpr
for expr, colexpr in zip(self.expressions, self.columns)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self)
return self
def drop(self, bind=None):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self)
def __repr__(self):
return 'Index(%s)' % (
", ".join(
[repr(self.name)] +
[repr(c) for c in self.columns] +
(self.unique and ["unique=True"] or [])
))
class MetaData(SchemaItem):
"""A collection of :class:`.Table` objects and their associated schema
constructs.
Holds a collection of :class:`.Table` objects as well as
an optional binding to an :class:`.Engine` or
:class:`.Connection`. If bound, the :class:`.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`.Table` objects themselves are stored in the
``metadata.tables`` dictionary.
The ``bind`` property may be assigned to dynamically. A common pattern is
to start unbound and then bind later when an engine is available::
metadata = MetaData()
# define tables
Table('mytable', metadata, ...)
# connect to an engine later, perhaps after loading a URL from a
# configuration file
metadata.bind = an_engine
MetaData is a thread-safe object after tables have been explicitly defined
or loaded via reflection.
See also:
:ref:`metadata_describing` - Introduction to database metadata
.. index::
single: thread safety; MetaData
"""
__visit_name__ = 'metadata'
def __init__(self, bind=None, reflect=False, schema=None,
quote_schema=None):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
.. deprecated:: 0.8
Please use the :meth:`.MetaData.reflect` method.
:param schema:
The default schema to use for the :class:`.Table`,
:class:`.Sequence`, and other objects associated with this
:class:`.MetaData`. Defaults to ``None``.
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
.. versionadded:: 0.7.4
``schema`` and ``quote_schema`` parameters.
"""
self.tables = util.immutabledict()
self.schema = schema
self.quote_schema = quote_schema
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
if reflect:
util.warn("reflect=True is deprecate; please "
"use the reflect() method.")
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True")
self.reflect()
def __repr__(self):
return 'MetaData(bind=%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
dict.__setitem__(self.tables, key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set([t.schema
for t in self.tables.values()
if t.schema is not None])
def __getstate__(self):
return {'tables': self.tables,
'schema': self.schema,
'quote_schema': self.quote_schema,
'schemas': self._schemas,
'sequences': self._sequences,
'fk_memos': self._fk_memos}
def __setstate__(self, state):
self.tables = state['tables']
self.schema = state['schema']
self.quote_schema = state['quote_schema']
self._bind = None
self._sequences = state['sequences']
self._schemas = state['schemas']
self._fk_memos = state['fk_memos']
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`.Engine` or :class:`.Connection` to which this
:class:`.MetaData` is bound.
Typically, a :class:`.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, util.string_types + (url.URL, )):
from sqlalchemy import create_engine
self._bind = create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of :class:`.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`.Table` objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. seealso::
:meth:`.Inspector.sorted_tables`
"""
return sqlutil.sort_tables(self.tables.values())
def reflect(self, bind=None, schema=None, views=False, only=None):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
:param schema:
Optional, query and reflect tables from an alterate schema.
If None, the schema associated with this :class:`.MetaData`
is used, if any.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
"""
if bind is None:
bind = _bind_or_error(self)
with bind.connect() as conn:
reflect_opts = {
'autoload': True,
'autoload_with': conn
}
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(bind.engine.table_names(schema,
connection=conn))
if views:
available.update(
bind.dialect.get_view_names(conn, schema)
)
if schema is not None:
available_w_schema = util.OrderedSet(["%s.%s" % (schema, name)
for name in available])
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [name for name, schname in
zip(available, available_w_schema)
if schname not in current]
elif util.callable(only):
load = [name for name, schname in
zip(available, available_w_schema)
if schname not in current and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' %
(bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if name not in current]
for name in load:
Table(name, self, **reflect_opts)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``MetaData``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
tables = kw['tables']
listener(event, target, connection, tables=tables)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst,
tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst,
tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, util.string_types + (url.URL, )):
try:
self.context._engine = self.__engines[bind]
except KeyError:
from sqlalchemy import create_engine
e = create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldnt have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.values():
if hasattr(e, 'dispose'):
e.dispose()
class SchemaVisitor(visitors.ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor': True}
class _DDLCompiles(expression.ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(expression.Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
See also:
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = expression.Executable.\
_execution_options.union({'autocommit': True})
target = None
on = None
dialect = None
callable_ = None
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
@util.deprecated("0.7", "See :class:`.DDLEvents`, as well as "
":meth:`.DDLElement.execute_if`.")
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`.MetaData` and :class:`.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(event_name,
target, connection, **kw):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace('-', '_'), call_event)
@expression._generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@expression._generative
def execute_if(self, dialect=None, callable_=None, state=None):
"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`.Table` or :class:`.MetaData` object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable_
as the ``state`` keyword argument.
See also:
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and \
not self._should_execute_deprecated(None, target, bind, **kw):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and \
not self.callable_(self, target, bind, state=self.state, **kw):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, util.string_types + (tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`.Table` or
:class:`.MetaData` objects as targets. Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
.. deprecated:: 0.7
See :meth:`.DDLElement.execute_if`.
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
See also:
:class:`.DDLEvents`
:mod:`sqlalchemy.event`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (expression.ColumnElement, )):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
class _CreateDropBase(DDLElement):
"""Base class for DDL constucts that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
.. versionadded:: 0.7.4
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
.. versionadded:: 0.7.4
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(self, element, on=None, bind=None):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column)
for column in element.columns
]
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`.Column` as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`.Table` as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`.Column.info` collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
.. versionadded:: 0.8 The :class:`.CreateColumn` construct was added
to support custom column creation styles.
"""
__visit_name__ = 'create_column'
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s %r' % (name, label)
else:
item = name
if isinstance(schemaitem, (MetaData, DDL)):
bindable = "the %s's .bind" % name
else:
bindable = "this %s's .metadata.bind" % name
if msg is None:
msg = "The %s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against. Either execute with an explicit connection or "\
"assign %s to enable implicit execution." % \
(item, bindable)
raise exc.UnboundExecutionError(msg)
return bind
| [
"sauloal@gmail.com"
] | sauloal@gmail.com |
10e2f5cde32a9b9a30b9c539d66cde75c7c68ada | 3365672d406501563b28b2c797408b3b5118780f | /scripts/utils.py | 91aff96518474cffee4359e905afb7212095c9e6 | [] | no_license | KKrajevska/ComputerVision | bd5f74f0500c5c044fc25be66587e8b7256532e0 | 89a341257a2bdca604bfc32c67f4f59e7a1290ef | refs/heads/master | 2022-12-08T14:30:43.226733 | 2020-08-28T14:08:07 | 2020-08-28T14:08:07 | 289,524,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,529 | py | from __future__ import print_function
from collections import defaultdict, deque
import datetime
import pickle
import time
import torch
import torch.distributed as dist
import errno
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(
size=(max_size - local_size,), dtype=torch.uint8, device="cuda"
)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, epoch=None, tb_writer=None, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
if epoch:
tb_writer.add_scalar(f"{k}/train", v, epoch)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def collate_fn(batch):
return tuple(zip(*batch))
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| [
"krajevska.kristina98@gmail.com"
] | krajevska.kristina98@gmail.com |
34fad3d4b52079c5e76aa19962797d9e983fc97c | bd741a6f3c3e1d09ee903a8025460f44c76497c9 | /venv/bin/django-admin.py | 6c750b890b673ec76de6690b718ea7d033bba763 | [] | no_license | kosuke-taniguchi/MY-SNS-APP | 3add5478b6cc04433f995a6127fc176164677eb3 | 33d71c305b05e91ba8b27b1c334cdbed5e77636c | refs/heads/main | 2023-02-04T18:35:13.714107 | 2020-12-26T11:45:48 | 2020-12-26T11:45:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #!/home/kosuke/work/boardproject/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"applekkttegg@yahoo.co.jp"
] | applekkttegg@yahoo.co.jp |
13f2511761bcf657cdb23154f404dad6b8ed9891 | 1445f86cb460d4dc17a14ba1ccd666e8cd9ac976 | /ThunderViper/program_mapper.py | d58d0ea9195526f5f0126948264ff7675e164564 | [] | no_license | DinoZAR/Dinosaur-OS | d8a606ef5cc6aa365641a587b445758f31e0b894 | 52ca12bb2e31e91efb46966de55b49b832e2a212 | refs/heads/master | 2016-09-05T21:54:58.567898 | 2012-02-14T22:59:56 | 2012-02-14T22:59:56 | 2,552,318 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 433 | py |
class ProgramMap:
def __init__(self):
# Current line in question
currLine = ''
# List of program objects
# Each object will be represented as [parent, obj]
# The list will not be nested, but instead, everything is referenced to another, from
# which the tree structure can be generated.
#
# The root is defined as 0.
progObjs = []
def parse(self, line):
pass
def dump(self, filename):
pass
| [
"sgraffe@gmail.com"
] | sgraffe@gmail.com |
5eec5ecb79cf809a75874db406c4748b9d76491e | 13ce77f9beb41aee1d23f51d18c690776ed02d6d | /TPSoporte.pyde | 187a35a641fab495676bae50cc6301100b9f296b | [] | no_license | brunocaracini/SpotiPy | cec35e12b54e3537fd2cd04f24bfbebdf72c3c47 | 17d4236203a0ad9ff44b27fb6ea40ddf27400e3e | refs/heads/master | 2023-08-19T06:39:58.365677 | 2021-09-20T13:30:48 | 2021-09-20T13:30:48 | 216,396,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,251 | pyde | import json
import os
import sys
import math
import re
from collections import Counter
mousex = 0
mousey= 0
refresh = False
config = False
salir = False
busca_recomend = False
artista_recomendaciones = ''
recomend = False
op = 0
scroll = 0
estado = 0
usuario = ''
password = ''
password2 = ''
uri = ''
nombre = ''
name = False
sp_user = False
user = False
passw = False
passw2 = False
log_in = False
sign_up = False
op1 = False
op2 = False
op3 = False
op4 = False
op5 = False
op6 = False
error_window = False
error = ''
error_window_stroke = False
plsel = ''
ingresapl = False
ingresapl2 = False
len_data = 0
playlist = []
youtube = True
canciones = []
class Presentacion():
def login(self):
if usuario == '' and password == '':
self.login_form()
def login_form(self):
image(back, 0, 0)
#Cartel Iniciar Sesion o Registrese
textSize(50)
fill(250, 230)
text('Inicie Sesion', 522 , 290)
#Recuadro ingreso usuario
noStroke()
photo = loadImage("user_icon.png")
image(photo, 460, 397)
fill(240, 50)
if user:
stroke(117,231,193)
rect(445, 380, 450, 70)
#Recuadro ingreso contraseña
noStroke()
photo = loadImage("password_icon.png")
image(photo, 460, 487)
fill(240, 50)
if passw:
stroke(117,231,193)
rect(445, 470, 450, 70)
#boton Log in
noStroke()
if len(usuario) == 0 or len(password) == 0:
fill(90)
elif log_in:
fill (117,231,193, 250)
else:
fill (117,231,193, 150)
rect(600,590,150,50,20)
textSize(18)
fill(255, 200)
text('Log in', 650,620)
#sign Up
if sign_up:
fill(117,231,193)
else:
fill(255, 180)
textSize(17)
text('No estas registrado aun? Resgistrate aqui',490,567)
#Error window
if error_window:
self.error_window_form()
def signup(self):
if usuario == '' and password == '' and password2 == '' and uri == '' and nombre == '':
self.signup_form()
def signup_form(self):
image(back, 0, 0)
#Cartel Registrese
textSize(50)
fill(250, 230)
text('Registrese', 545 , 80)
#Recuadro ingreso usuario
noStroke()
photo = loadImage("user_icon.png")
image(photo, 460, 147)
fill(240, 50)
if name:
stroke(117,231,193)
rect(445, 130, 450, 70)
#Recuadro ingreso mail
noStroke()
photo = loadImage("mail_icon.png")
tint(230, 120)
image(photo, 460, 244)
noTint()
fill(240, 50)
if user:
stroke(117,231,193)
rect(445, 220, 450, 70)
#Recuadro ingreso uri Spotify
noStroke()
fill(240, 50)
photo = loadImage("spotify_icon.png")
image(photo, 464, 329)
if sp_user:
stroke(117,231,193)
rect(445, 310, 450, 70)
#Recuadro ingreso contraseña
noStroke()
photo = loadImage("password_icon.png")
image(photo, 460, 417)
fill(240, 50)
if passw:
stroke(117,231,193)
rect(445, 400, 450, 70)
#Recuadro ingreso contraseña 2
noStroke()
photo = loadImage("password_icon.png")
image(photo, 460, 507)
fill(240, 50)
if passw2:
stroke(117,231,193)
rect(445, 490, 450, 70)
#boton Registrarse
noStroke()
if sign_up:
fill (117,231,193, 250)
else:
fill (117,231,193, 150)
rect(600,610,150,50,20)
textSize(18)
fill(255, 200)
text('Sign up', 643,640)
#Log In
if log_in:
fill(117,231,193)
else:
fill(255,180)
textSize(17)
text('Ya posee una cuenta? Inicie sesion aqui',500,587)
#Error window
if error_window:
self.error_window_form()
def menu_form(self):
image(back, 0, 0)
#Barra Lateral del menu
fill(20)
rect(0,0,350,height)
#Texto menu lateral
fill(240,210)
textSize(20)
#Icono usuario barra lateral
textAlign(CENTER)
text(nombre + " | " + uri, 175, 180)
textAlign(LEFT)
tint(255,240)
photo = loadImage("usermenu_icon.png")
image(photo, 105, 20)
#Informacion usuario:
textSize(40)
fill(117,231,193, 220)
if canciones <10:
text(canciones, 55,420)
elif canciones <100:
text(canciones, 42,420)
else:
text(canciones, 30,420)
if artistas <10:
text(len(artistas), 161,420)
elif len(artistas) <100:
text(len(artistas), 149,420)
else:
text(len(artistas), 137,420)
if cant_playlist <10:
text(cant_playlist, 258,420)
elif cant_playlist <100:
text(cant_playlist, 247,420)
else:
text(cant_playlist, 236,420)
fill(255,210)
textSize(25)
textSize(15)
text("Canciones", 30,450)
text("Artistas", 145,450)
text("Playlist", 245,450)
stroke(240,100)
line(20,215,330,215)
line(20,490,330,490)
noStroke()
#Imagen estadisticas
photo = loadImage("stats_icon.png")
image(photo, 105, 240)
#Opcion de salir en barra lateral
if salir:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(240,210)
fill(240,210)
photo = loadImage("exit_icon.png")
image(photo, 20, 630)
textSize(20)
text("Cerrar sesion", 85, 665)
#Opcion de configuracion en barra lateral
if refresh:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(240,210)
fill(240,210)
photo = loadImage("refresh_icon.png")
image(photo, 25, 580)
textSize(20)
text("Actualizar", 85, 605)
#Opcion 1: Mostrar tracks de una playlist en Spotify
if op1:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(240,210)
photo = loadImage("playlist_icon.png")
image(photo, 602, 25)
textSize(22)
text("Spotify", 618, 150)
text("Playlists", 611, 180)
#Opcion 2: Buscar Videos de una PlayList
if op2:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(255,210)
photo = loadImage("videoSearch_icon.png")
image(photo, 613, 263)
textSize(22)
text("Buscar videos", 587, 388)
text("desde playlist", 585, 418)
#Opcion 3: Buscar video Lyrics desde una Playlist
if op3:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(255,210)
photo = loadImage("videoSearch_icon.png")
image(photo, 613, 490)
textSize(22)
text("Video lyric", 600, 615)
text("desde playlist", 584, 645)
#Opcion 4: Buscar Canales de Artistas de mis Playlist
if op4:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(255,210)
photo = loadImage("channel_icon.png")
image(photo, 1011, 263)
textSize(22)
text("Buscar Canales", 976, 388)
text("de artistas", 1001, 418)
#Opcion 5: Buscar Canales de Artistas de mis Playlist
if op5:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(255,210)
photo = loadImage("favourite_icon.png")
image(photo, 1011, 33)
textSize(22)
text("Artistas", 1011, 150)
text("Favoritos", 1006, 180)
#Opcion 6: Recomendar artistas
photo = loadImage("music_icon.png")
if op6:
tint(117,231,193, 170)
fill(117,231,193, 170)
else:
tint(250,210)
fill(255,210)
image(photo, 1013, 487)
textSize(22)
text("Artistas", 1013, 615)
text("Recomendados", 980, 645)
#Default Settings:
tint(255,180)
fill(255,180)
def showPlaylist(self):
image(back, 0, 0)
with open('user_playlists.txt') as json_file:
user_playlists = json.load(json_file)
#Botón de atrás
image(back_button, 75 , height/2)
tint(255, 230)
#Rectangulo negro de fondo
noStroke()
fill(40)
rect(200,250 + scroll, width - 400, len(user_playlists)*50 + 75, 20)
#Imprimir playlists
vposition = 270
hposition = 350
len_playlists = len(user_playlists)
for i in range(0, len_playlists):
vposition +=50
stroke(70, 240)
line(220, vposition + 17 + scroll, width - 240, vposition + 15 + scroll)
fill (220, 230)
textSize(18)
text(str(i + 1) + ".", hposition - 40, vposition + scroll)
text(user_playlists[i][0:50], hposition, vposition + scroll)
#Recuadro ingresa numero playlist
noStroke()
fill(40)
if ingresapl:
stroke(117,231,193)
rect(200, 200 + scroll, 295, 40, 20)
#Recuadro buscar
if ingresapl2:
fill(117,231,193,250)
else:
fill(117,231,193,150)
noStroke()
rect(510, 200 + scroll, 80, 40, 20)
fill(220, 230)
text('Ingrese numero de Playlist:', 210, 227 + scroll)
text('Buscar', 520, 227 + scroll)
fill(117,231,193)
text(plsel, 460, 227)
#Error window
if error_window:
self.error_window_form()
def showTracks(self):
image(back, 0, 0)
with open('data.txt') as json_file:
data = json.load(json_file)
#Botón de atrás
image(back_button, 75 , height/2)
tint(255, 230)
#Rectángulo Título de Playlist:
noStroke()
fill(40)
rect(200,120 + scroll, width - 400, 100, 20)
textSize(50)
fill (220, 230)
text("Playlist: " + playlist_name, 310, 185 + scroll)
#Rectangulo negro de fondo
noStroke()
fill(40)
rect(200,250 + scroll, width - 400, len(playlist)*50 + 75, 20)
#Imprimir playlists
vposition = 300
hposition = 350
i = 0
fill(255)
for track in playlist:
vposition +=50
stroke(70, 240)
line(220, vposition + 17 + scroll, width - 240, vposition + 15 + scroll)
fill (220, 230)
textSize(18)
text(str(i + 1) + ".", hposition - 40, vposition + scroll)
text(track['name'][0:40], hposition, vposition + scroll)
text(track['artist'][0:50], hposition + 400, vposition + scroll)
if op == 2 or op == 3:
image(youtube_icon, hposition + 680, vposition - 21 + scroll)
i+=1
def showArtists(self):
image(back, 0, 0)
with open('data.txt') as json_file:
data = json.load(json_file)
#Botón de atrás
image(back_button, 75 , height/2)
tint(255, 230)
#Rectángulo Título de Playlist:
noStroke()
fill(40)
rect(200,120 + scroll, width - 400, 100, 20)
textSize(50)
fill (220, 230)
text("Artistas: " , 310, 185 + scroll)
#Rectangulo negro de fondo
noStroke()
fill(40)
rect(200,250 + scroll, width - 400, len(artistas)*50 + 75, 20)
#Imprimir Artistas
vposition = 300
hposition = 350
i = 0
fill(255)
for artist in artistas:
vposition +=50
stroke(70, 240)
line(220, vposition + 17 + scroll, width - 240, vposition + 15 + scroll)
fill (220, 230)
textSize(18)
text(str(i + 1) + ".", hposition - 40, vposition + scroll)
text(artist[0:40], hposition, vposition + scroll)
if op == 2 or op == 3 or op == 4:
image(youtube_icon, hposition + 680, vposition - 21 + scroll)
i+=1
def showFavs(self):
image(back, 0, 0)
with open('data.txt') as json_file:
data = json.load(json_file)
#Botón de atrás
image(back_button, 75 , height/2)
tint(255, 230)
#Rectángulo Título de Playlist:
noStroke()
fill(40)
rect(200,120 + scroll, width - 400, 100, 20)
textSize(50)
fill (220, 230)
text("Artistas Favoritos: " , 310, 185 + scroll)
#Rectangulo negro de fondo
noStroke()
fill(40)
rect(200,250 + scroll, width - 400, len(lista)*50 + 75, 20)
#Imprimir Artistas
vposition = 300
hposition = 350
i = 0
fill(255)
for artist in lista:
vposition +=50
stroke(70, 240)
line(220, vposition + 17 + scroll, width - 240, vposition + 15 + scroll)
fill (220, 230)
textSize(18)
text(str(i + 1) + ".", hposition - 40, vposition + scroll)
text(artist[0:40], hposition, vposition + scroll)
image(youtube_icon, hposition + 680, vposition - 21 + scroll)
i+=1
def showRecomendaciones(self):
image(back, 0, 0)
#Botón de atrás
image(back_button, 75 , height/2)
tint(255, 230)
#Rectángulo Título de Playlist:
noStroke()
fill(40)
rect(200,120 + scroll, width - 400, 100, 20)
textSize(50)
fill (220, 230)
text("Artistas recomendados: " , 310, 185 + scroll)
#Rectangulo negro de fondo
noStroke()
fill(40)
rect(200,250 + scroll, width - 400, len(recomendaciones)*50 + 75, 20)
#Imprimir playlists
vposition = 300
hposition = 350
i = 0
fill(255)
for artist in recomendaciones:
vposition +=50
stroke(70, 240)
line(220, vposition + 17 + scroll, width - 240, vposition + 15 + scroll)
fill (220, 230)
textSize(18)
text(str(i + 1) + ".", hposition - 40, vposition + scroll)
text(artist[0:40], hposition, vposition + scroll)
if op== 6:
image(youtube_icon, hposition + 680, vposition - 21 + scroll)
i+=1
def busqueda_artista_form(self):
image(back, 0, 0)
#cartel
textSize(50)
fill(250, 230)
text('Ingrese artista', 490 , 245)
#Rectangulo ingreso artista
textSize(18)
noStroke()
fill(40)
if recomend:
stroke(117,231,193)
rect(445, 290, 450, 70)
fill(245,230)
text(artista_recomendaciones, 530 , 332)
#Boton atras
image(back_button, 75 , height/2)
tint(255, 230)
#Boton buscar
noStroke()
if len(artista_recomendaciones) == 0:
fill(90)
elif busca_recomend:
fill (117,231,193, 250)
else:
fill (117,231,193, 150)
rect(590,390,150,50,20)
textSize(18)
fill(255, 200)
text('Buscar', 640,420)
def error_window_form(self):
global error
#Fondo negro
fill(10,230)
rect(0,0,width,height)
#Ventana de error
fill(20,230)
stroke(117,231,193)
rect(400, 300, 535, 190)
#Boton Aceptar
fill(117,231,193,20)
if error_window_stroke:
fill(117,231,193)
rect(570,447,200,30, 20)
fill(255, 200)
text('Aceptar', 640,468)
#signo advertencia
photo = loadImage("warning_icon.png")
image(photo, 619, 317)
tint(255,200)
#Mensaje del error
fill(240, 240)
textAlign(CENTER)
text("Error: "+ error, 670, 430)
textAlign(LEFT)
app = Presentacion()
def reset():
global usuario
global password
global password2
global uri
global user
global passw2
global sp_user
global passw
global log_in
global sign_up
global nombre
sign_up = False
log_in = False
user = False
passw = False
passw2 = False
sp_user = False
name = False
usuario = ''
password = ''
password2 = ''
uri = ''
nombre = ''
def verifica_input(key_):
verifica_input = False
if estado == 3:
if ((int(ord(key_))) >= 48 and (int(ord(key_))) <= 57):
verifica_input = True
else:
verifica_input = False
else:
if key_ == 65535:
str(key_)[:1]
elif ((int(ord(key_)) >= 64 and int(ord(key_)) <= 90) or (int(ord(key_)) >= 97 and int(ord(key_)) <= 122) or int(ord(key_)) == 95 or int(ord(key_)) == 45 or int(ord(key_)) == 43 or int(ord(key_)) == 46):
verifica_input = True
elif (int(ord(key_)) >= 48 and int(ord(key_)) <= 57):
verifica_input = True
elif estado == 8 and (int(ord(key_))) == 32:
verifica_input = True
return verifica_input
def printText():
global usuario
global password
global uri
global password2
global error_window
global error
global ingresapl
global ingresapl2
global plsel
global nombre
if estado == 0:
app.login_form()
fill(240)
if error_window == False:
text(usuario, 530 , 422)
text(password, 530 , 512)
elif estado == 1:
app.signup_form()
fill(240)
if error_window == False:
text(nombre, 530, 172)
text(usuario, 530 , 262)
text(uri, 530 , 352)
text(password, 530 , 442)
text(password2, 530 , 532)
elif estado == 3:
app.showPlaylist()
def setup():
global back
global youtube_icon
global back_button
size (1365, 700)
frameRate(20)
youtube_icon = loadImage("youtube_icon.png")
back_button = loadImage("back_icon.png")
back = loadImage("fondo2.jpg")
image(back, 0, 0)
strokeWeight(2)
def mouseWheel(event):
global scroll
if estado == 5 or estado == 4 or estado == 6 or estado == 7 or estado == 9 or estado == 10:
if scroll == 0:
if event.getCount() >=0:
scroll -= event.getCount() * 50
else:
scroll -= event.getCount() * 50
def keyPressed():
global estado
global usuario
global password
global password2
global uri
global user
global passw
global passw2
global sp_user
global ingresapl
global plsel
global recomend
global artista_recomendaciones
global nombre
global name
if estado == 0:
textSize(18)
if user and verifica_input(key):
usuario += str(key)
elif user and (key == BACKSPACE) and len(usuario)>0:
usuario = usuario[:-1]
if passw and verifica_input(key):
password += str(key)
elif passw and (key == BACKSPACE) and len(password)>0:
password = password[:-1]
if estado == 1:
textSize(18)
if name and verifica_input(key):
nombre += str(key)
elif name and (key == BACKSPACE) and len(nombre)>0:
nombre = nombre[:-1]
if user and verifica_input(key):
usuario += str(key)
elif user and (key == BACKSPACE) and len(usuario)>0:
usuario = usuario[:-1]
if passw and verifica_input(key):
password += str(key)
elif passw and (key == BACKSPACE) and len(password)>0:
password = password[:-1]
if passw2 and verifica_input(key):
password2 += str(key)
elif passw2 and (key == BACKSPACE) and len(password2)>0:
password2 = password2[:-1]
if sp_user and verifica_input(key):
uri += str(key)
elif sp_user and (key == BACKSPACE) and len(uri)>0:
uri = uri[:-1]
if estado ==3:
if ingresapl and verifica_input(key) and len(plsel) < 2:
plsel += str(key)
elif ingresapl and (key == BACKSPACE) and len(password)>0:
plsel = plsel[:-1]
if estado == 8:
if recomend and verifica_input(key) and len(plsel) < 2:
artista_recomendaciones += str(key)
elif recomend and (key == BACKSPACE) and len(artista_recomendaciones)>0:
artista_recomendaciones = artista_recomendaciones[:-1]
printText()
def mousePressed():
global user
global passw
global passw2
global estado
global log_in
global sign_up
global sp_user
global error_window
global uri
global ingresapl
global op
global scroll
global youtube
global recomend
global name
global error
if estado == 0:
#Resalta borde ingreso usuario
if mouseX >= 445 and mouseX <=895 and mouseY >=380 and mouseY <= 450:
passw = False
log_in = False
user = True
printText()
#Resalta borde ingreso Contraseña
elif mouseX >= 445 and mouseX <=895 and mouseY >=470 and mouseY <= 540:
user = False
log_in = False
passw = True
printText()
#Ejecuta .py de validacion del login y cambia estado a 2 (Menu de opciones)
elif mouseX >= 600 and mouseX <=750 and mouseY >=590 and mouseY <= 640 and len(usuario) > 0 and len(password) > 0:
'''
estado = 2
obtener_artistas()
obtener_playlist()
obtener_canciones()
'''
printText()
exporta_json()
if importa_json():
estado = 2
exporta_json()
importa_json()
os.system("Python Obtener_canciones.py {0}".format(uri))
obtener_artistas()
obtener_playlist()
obtener_canciones()
#Cambia estado a Registrarse
elif mouseX>= 470 and mouseX<= 835 and mouseY>= 545 and mouseY <= 579:
estado = 1
sign_up = False
reset()
elif estado == 1:
#Resalta borde ingreso nombre de usuario
if mouseX >= 445 and mouseX <=895 and mouseY >=130 and mouseY <= 240:
passw = False
passw2 = False
sp_user = False
user = False
name = True
printText()
#Resalta borde ingreso usuario
if mouseX >= 445 and mouseX <=895 and mouseY >=220 and mouseY <= 310:
passw = False
passw2 = False
sp_user = False
name = False
user = True
printText()
#Resalta borde ingreso uri Spotify
if mouseX >= 445 and mouseX <=895 and mouseY >=310 and mouseY <= 380:
sp_user = True
passw = False
passw2 = False
name = False
user = False
printText()
#Resalta borde ingreso contraseña 1
if mouseX >= 445 and mouseX <=895 and mouseY >=400 and mouseY <= 470:
passw = True
passw2 = False
user = False
sp_user = False
name = False
printText()
#Resalta borde ingreso Contraseña 2
elif mouseX >= 445 and mouseX <=895 and mouseY >=490 and mouseY <= 560:
user = False
passw = False
nombre = False
sp_user = False
nombre = False
passw2 = True
printText()
#Ejecuta Registro y cambia estado a Log In (estado = 0)
elif mouseX >= 600 and mouseX <=750 and mouseY >=610 and mouseY <= 660:
exporta_json()
if importa_json():
estado = 0
#Cambia estado a Registrarse
elif mouseX>= 470 and mouseX<= 835 and mouseY>= 565 and mouseY <= 599:
estado = 0
reset()
elif estado == 2:
#Seleccion de opcion 1 (Buscar Playlists en Spotify)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 25 and mouseY <= 125) or (mouseX>= 607 and mouseX<= 700 and mouseY>= 130 and mouseY <= 200):
#os.system("Python Obtener_canciones.py {0}".format(uri))
op = 1
estado = 3
#Seleccion de opcion 2 (Buscar videos para una Playlist)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 263 and mouseY <= 348) or (mouseX>= 590 and mouseX<= 740 and mouseY>= 368 and mouseY <= 438):
#os.system("Python Obtener_canciones.py {0}".format(uri))
op = 2
estado = 3
#Seleccion opcion opcion 3 (Buscar video Lyrics para una Playlist)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 470 and mouseY <= 563) or (mouseX>= 590 and mouseX<= 740 and mouseY>= 572 and mouseY <= 655):
#os.system("Python Obtener_canciones.py {0}".format(uri))
op = 3
estado = 3
#Seleccion Opcion 4 (Artistas Favoritos)
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 25 and mouseY <= 125) or (mouseX>= 970 and mouseX<= 1140 and mouseY>= 130 and mouseY <= 200):
artistasMasEscuchados()
estado = 10
#Selección opción 5 (Buscar canales para los artistas de mis playlist)
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 263 and mouseY <= 348) or (mouseX>= 970 and mouseX<= 1140 and mouseY>= 368 and mouseY <= 438):
obtener_artistas()
op = 4
estado = 7
#Seleccion opcion 6 (Recomendación de artistas)
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 470 and mouseY <= 563) or (mouseX>= 993 and mouseX<= 1120 and mouseY>= 572 and mouseY <= 620):
op = 6
cursor(ARROW)
estado = 8
#Cerrar sesion
if (mouseX>= 20 and mouseX<= 220 and mouseY>= 640 and mouseY <= 685):
salir = False
cursor(ARROW)
reset()
estado = 0
#Actualizar
if (mouseX>= 20 and mouseX<= 235 and mouseY>= 580 and mouseY <= 625):
os.system("Python Obtener_canciones.py {0}".format(uri))
obtener_canciones()
obtener_artistas()
obtener_playlist()
elif estado == 3:
#Selección botón atrás
if mouseX>= 75 and mouseX<= 145 and mouseY>= 372.5 and mouseY <= 452.5:
estado = 2
#seleccion ingreso numero playlist
if (mouseX>= 200 and mouseX<= 550 and mouseY>= 200 and mouseY <= 250):
ingresapl = True
#Seleccion busqueda (cambia estado en base a estado anterior)
if (mouseX>= 510 and mouseX<= 600 and mouseY>= 200 and mouseY <= 250):
if busca_index():
obtener_playlist()
if op == 1:
estado = 6
elif op == 2:
estado = 4
elif op == 3:
estado = 5
elif estado == 4:
#Ejecuta busqueda video
if mouseX>=1025 and mouseX<=1075:
with open('data.txt') as json_file:
data = json.load(json_file)
numero = math.ceil((mouseY - scroll + 340)/50) - 13
arg = playlist[int(numero)]['name'] + " " + playlist[int(numero)]['artist']
arg = re.sub('[^a-zA-Z.\d\s]', '', arg)
with open('song.txt', 'w') as outfile:
json.dump(arg, outfile)
os.system("Python busqueda_videos.py")
#Cambia estado a 3 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 3
scroll = 0
elif estado == 5:
#Ejecuta busqueda video
if mouseX>=1025 and mouseX<=1075:
with open('data.txt') as json_file:
data = json.load(json_file)
numero = math.ceil((mouseY - scroll + 340)/50) - 13
arg = "Lyrics video " + playlist[int(numero)]['name'] + " " + playlist[int(numero)]['artist']
arg = re.sub('[^a-zA-Z.\d\s]', '', arg)
arg = re.sub('Remastered', '', arg)
with open('song.txt', 'w') as outfile:
json.dump(arg, outfile)
os.system("Python busqueda_videos.py")
#Cambia estado a 3 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 3
scroll = 0
elif estado == 6:
#Cambia estado a 3 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 3
scroll = 0
elif estado == 7:
if mouseX>=1025 and mouseX<=1075:
with open('data.txt') as json_file:
data = json.load(json_file)
numero = math.ceil((mouseY - scroll + 340)/50) - 13
arg = artistas[int(numero)]
arg = re.sub('[^a-zA-Z.\d\s]', '', arg)
with open('channel.txt', 'w') as outfile:
json.dump(arg, outfile)
os.system("Python busqueda_canales.py")
#Cambia estado a 2 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 2
scroll = 0
elif estado == 8:
#Resalta borde ingreso artista para recomendaciones
if mouseX >= 445 and mouseX <=895 and mouseY >=290 and mouseY <= 360:
recomend = True
#Cambia estado a 2 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 2
scroll = 0
if mouseX>= 580 and mouseX<= 750 and mouseY>= 380 and mouseY <= 450 and len(artista_recomendaciones) >0:
obtener_recomendaciones1()
estado = 9
elif estado == 9:
if mouseX>=1025 and mouseX<=1075:
with open('data.txt') as json_file:
data = json.load(json_file)
numero = math.ceil((mouseY - scroll + 340)/50) - 13
arg = recomendaciones[int(numero)]
arg = re.sub('[^a-zA-Z.\d\s]', '', arg)
with open('channel.txt', 'w') as outfile:
json.dump(arg, outfile)
os.system("Python busqueda_canales.py")
#Cambia estado a 8 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 8
scroll = 0
elif estado == 10:
if mouseX>=1025 and mouseX<=1075:
with open('data.txt') as json_file:
data = json.load(json_file)
numero = math.ceil((mouseY - scroll + 340)/50) - 13
arg = lista[int(numero)]
arg = re.sub('[^a-zA-Z.\d\s]', '', arg)
with open('channel.txt', 'w') as outfile:
json.dump(arg, outfile)
os.system("Python busqueda_canales.py")
#Cambia estado a 8 (vuelve hacia atrás)
if mouseX>= 75 and mouseX<= 145 and mouseY>= height/2 and mouseY <= height/2 + 70:
estado = 2
scroll = 0
if error_window:
if (mouseX>= 570 and mouseX<= 770 and mouseY>= 447 and mouseY<= 467):
error_window = False
error = ''
def importa_json():
global estado
global error
global error_window
global uri
global nombre
global cant_playlist
if estado == 0:
with open('user_login_result.txt') as json_file:
user_data = json.load(json_file)
error = user_data
os.system("DEL user_login_result.txt")
elif estado == 1:
with open('user_signup_result.txt') as json_file:
user_data_signup = json.load(json_file)
error = user_data_signup
os.system("DEL user_signup_result.txt")
elif estado == 2:
with open('user_uri.txt') as json_file:
user_uri = json.load(json_file)
uri = user_uri['uri']
nombre = user_uri['nombre']
os.system("DEL user_uri.txt")
if error == 'ok':
error_window = False
return True
else:
error_window = True
return False
def exporta_json():
global usuario
global password
global password2
global uri
global estado
global nombre
if estado == 0:
user_data = [usuario, password]
with open('user_data.txt', 'w') as outfile:
json.dump(user_data, outfile)
os.system("Python capa_interfaz.py login")
elif estado == 1:
user_data_signup = {
'usuario': usuario,
'password': password,
'uri' : uri,
'estado': '',
'nombre': nombre
}
with open('user_data_signup.txt', 'w') as outfile:
json.dump(user_data_signup, outfile)
os.system("Python capa_interfaz.py signup")
elif estado == 2:
user_uri = {
'usuario': usuario,
'uri': '',
'nombre':''
}
with open('user_uri.txt', 'w') as outfile:
json.dump(user_uri, outfile)
os.system("Python capa_interfaz.py geturi")
def checkMousePosition():
global estado
global sign_up
global log_in
global ingresapl2
global op1
global op2
global op3
global op4
global op5
global op6
global salir
global error_window_stroke
global busca_recomend
global config
global refresh
global error_window
if estado == 0:
#Cambia color del cartel de cambio de estado
if mouseX>= 470 and mouseX<= 835 and mouseY>= 545 and mouseY <= 579:
sign_up = True
printText()
elif estado == 0:
sign_up = False
printText()
#Cambia color boton log_in o registrarse
if mouseX >= 600 and mouseX <=750 and mouseY >=600 and mouseY <= 640 and len(password)>=1 and len(usuario)>=1:
log_in = True
printText()
elif estado == 0:
log_in = False
printText()
if (sign_up or log_in) and error_window == False:
cursor(HAND)
elif error_window == False:
cursor(ARROW)
if estado == 1:
#Cambia color del cartel de cambio de estado
if mouseX>= 470 and mouseX<= 835 and mouseY>= 565 and mouseY <= 599:
log_in = True
printText()
elif estado == 1:
log_in = False
printText()
#Cambia color boton Registrame
if mouseX >= 600 and mouseX <=750 and mouseY >=605 and mouseY <= 660:
sign_up = True
printText()
elif estado == 1:
sign_up = False
printText()
if (sign_up or log_in) and error_window == False:
cursor(HAND)
elif error_window == False:
cursor(ARROW)
if estado == 2:
#Cambia color opcion 1 (Buscar Playlists en Spotify)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 25 and mouseY <= 125) or (mouseX>= 607 and mouseX<= 700 and mouseY>= 130 and mouseY <= 200):
op1 = True
elif estado == 2:
op1 = False
#Cambia color opcion 2 (Buscar Videos para una Playlist)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 263 and mouseY <= 348) or (mouseX>= 590 and mouseX<= 740 and mouseY>= 368 and mouseY <= 438):
op2 = True
elif estado == 2:
op2 = False
#Cambia color opcion 3 (Buscar video Lyrics para una Playlist)
if (mouseX>= 600.5 and mouseX<= 700.5 and mouseY>= 470 and mouseY <= 577) or (mouseX>= 590 and mouseX<= 740 and mouseY>= 600 and mouseY <= 655):
op3 = True
elif estado == 2:
op3 = False
#Cambia color opción 4 (Buscar canales de los artistas de una playlist)
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 263 and mouseY <= 348) or (mouseX>= 970 and mouseX<= 1140 and mouseY>= 368 and mouseY <= 438):
op4 = True
elif estado == 2:
op4 = False
#Cambia color opcion 6 (Recomendar artistas)
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 470 and mouseY <= 577) or (mouseX>= 977 and mouseX<= 1160 and mouseY>= 600 and mouseY <= 660):
op6 = True
elif estado == 2:
op6 = False
#Cambia color opcion
if (mouseX>= 1000 and mouseX<= 1103 and mouseY>= 25 and mouseY <= 125) or (mouseX>= 970 and mouseX<= 1140 and mouseY>= 130 and mouseY <= 200):
op5 = True
elif estado == 2:
op5 = False
#Cambia color opcion cerrar sesion
if (mouseX>= 20 and mouseX<= 220 and mouseY>= 640 and mouseY <= 685):
salir = True
elif estado == 2:
salir = False
#Cambia color opcion actualizar
if (mouseX>= 20 and mouseX<= 235 and mouseY>= 580 and mouseY <= 625):
refresh = True
elif estado == 2:
refresh = False
#Cambia cursor de una flecha a una mano
if op1 or op2 or op3 or op4 or op5 or op6 or salir or config or refresh:
cursor(HAND)
else:
cursor(ARROW)
if estado == 3:
#Cambia color boton de busqueda y cursor sobre ese botón
if mouseX>= 510 and mouseX<= 600 and mouseY>= 200 and mouseY <= 245:
ingresapl2 = True
cursor(HAND)
#Cambia cursor sobre el botón de atrás
elif mouseX>= 75 and mouseX<= 145 and mouseY>= 372.5 and mouseY <= 452.5:
cursor(HAND)
#Cambia cursor sobre el recuadro de ingreso de playlist
elif (mouseX>= 200 and mouseX<= 550 and mouseY>= 200 and mouseY <= 250):
cursor(HAND)
#Devuelve el cursor a una flecha
elif estado == 3:
ingresapl2 = False
cursor(ARROW)
if estado == 4 or estado == 5 or estado == 7 or estado == 9 or estado == 10:
#Cambia cursor en icono youtube y botón de atrás
if mouseX>=1025 and mouseX<=1075:
cursor(HAND)
elif mouseX>= 75 and mouseX<= 145 and mouseY>= 372.5 and mouseY <= 452.5:
cursor(HAND)
else:
cursor(ARROW)
if estado == 6:
if mouseX>= 75 and mouseX<= 145 and mouseY>= 372.5 and mouseY <= 452.5:
cursor(HAND)
else:
cursor(ARROW)
if estado == 8:
#Cambia color boton buscar artistas recomendados
if mouseX>= 580 and mouseX<= 750 and mouseY>= 380 and mouseY <= 450:
busca_recomend = True
elif estado == 8:
busca_recomend = False
#Cambia Mouse en boton atras y boton buscar
if mouseX>= 75 and mouseX<= 145 and mouseY>= 372.5 and mouseY <= 452.5 or recomend:
cursor(HAND)
else:
cursor(ARROW)
if error_window:
#Cambia color boton aceptar de la ventana de error
if (mouseX>= 560 and mouseX<= 780 and mouseY>= 437 and mouseY<= 487):
error_window_stroke = True
cursor(HAND)
else:
error_window_stroke = False
cursor(ARROW)
def obtener_playlist():
global playlist
global playlist_name
global cant_playlist
playlist = []
with open('user_playlists.txt') as json_file:
user_playlists = json.load(json_file)
with open('data.txt') as json_file:
data = json.load(json_file)
if estado == 2:
cant_playlist = len(user_playlists)
elif estado == 3:
playlist_name = user_playlists[int(plsel) - 1]
for track in data['tracks']:
if track['playlist'] == playlist_name:
playlist.append(track)
def obtener_artistas():
global artistas
artistas = []
with open('data.txt') as json_file:
data = json.load(json_file)
for track in data['tracks']:
if track['artist'] not in artistas:
artistas.append(track['artist'])
def obtener_canciones():
global canciones
with open('data.txt') as json_file:
data = json.load(json_file)
canciones = len(data['tracks'])
def obtener_recomendaciones1():
global recomendaciones
global artista_recomendaciones
with open('artista_recomendaciones.txt', 'w') as outfile:
json.dump(artista_recomendaciones, outfile)
os.system("Python buscar_reco.py")
with open('reco.txt') as json_file:
reco = json.load(json_file)
recomendaciones = reco
def busca_index():
global error_window
global error
with open('user_playlists.txt') as json_file:
user_playlists = json.load(json_file)
index = len(user_playlists)
if int(plsel) >= 1 and int(plsel) <= index:
error_window = False
return True
else:
error = 'Playlist inexistente'
error_window = True
return False
def artistasMasEscuchados():
global lista
lista = []
with open('data.txt') as json_file:
data = json.load(json_file)
for track in data['tracks']:
lista.append(track['artist'])
listaTuplas = list(Counter(lista).most_common(10))
lista = []
for i in range(len(listaTuplas)):
lista.append(listaTuplas[i][0])
def draw():
if estado == 0:
checkMousePosition()
app.login()
elif estado == 1:
checkMousePosition()
app.signup()
if estado == 2:
checkMousePosition()
app.menu_form()
if estado == 3:
checkMousePosition()
app.showPlaylist()
if estado == 4 or estado == 5 or estado == 6:
checkMousePosition()
app.showTracks()
if estado == 7:
checkMousePosition()
app.showArtists()
if estado == 8:
checkMousePosition()
app.busqueda_artista_form()
if estado == 9:
checkMousePosition()
app.showRecomendaciones()
if estado == 10:
checkMousePosition()
app.showFavs()
| [
"noreply@github.com"
] | brunocaracini.noreply@github.com |
ca81bbb63f589c7380edf18d24d9a69bc95f3f91 | 560a4b3673137d10f46062ed20e6a5e976889a9f | /indexacion.py | 64459c6265fb4f7d1cd20c5cf0499e34bd539cde | [] | no_license | PiaNgg/t08.chunga_huatay | 87f853b838df706dc2c8d68aa12973798e7b5abb | ce818f73e67f89e2740c0444eae35d14f9192507 | refs/heads/master | 2020-09-30T11:01:18.990114 | 2019-12-12T01:02:52 | 2019-12-12T01:02:52 | 227,274,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,425 | py | #ejercicio 01
tipo_arbol="MADERA ROBLE" #asignacion del valor a la variable tipo de arbol
print(tipo_arbol[0]) #se imprime la primera letra de la cadena
print(tipo_arbol[5]) #se imprime la sexta letra
print(tipo_arbol[9]) #se imprime la decima letra
print(tipo_arbol[0]+tipo_arbol[11]) #se imprime una union de letras de la cadena
#ejercicio 02
colores="EL COLOR PREFERIDO ES AMARILLO" #asignacion del valor a una variable colores
print(colores[-4]) #se imprime la cuarta letra contando al reves la cadena
print(colores[-1]) #se imprime la ultima letra al reves
print(colores[-12]+colores[-13]) #se imprime la union de letras al reves de la cadena
#ejercicio 03
sabor="A CARLA LE GUSTA LA FRESA" #asignacion del valor a una variable sabor
print(sabor[3]+sabor[7]+sabor[11]) #se imprime la suma de letras
#ejercicio 04
marca="\aLA MARCA SANSUMG" #asignacion de un valor a la variable marca
print(marca[0]+marca[-1]) #se imprime cada letra asignada de la variable marca
#ejercicio 05
curso="ME GUSTA EL CURSO DE PROGRAMACION" # asignacion de un valor a la variable curso
print(curso[3]+"\n"+curso[6]) # se imprime la letra asignada de la variable curso
#ejercicio 06
zapatillas="LAS ZAPATILLAS NIKE SON LAS MEJORES" # asignacion de un valor a a la variable zapatillas
print(curso[-2]+curso[-15]+"\b")
#ejercicio 07
utiles="\"REGALAME UN BORRADOr\"" # asignacion de un valor a la variable utiles
print(utiles[-22]+"\n"+utiles[0]) # se imprime las letras asignadas de la variable utiles
#ejercicio 08
arcoiris="EL ROJO ES EL COLOR MAS HERMOSO DEL ARCOIRIS" # asignacion de un valor a a la variable arcoiris
print(arcoiris[-3]+"\n"+arcoiris[0]) # se imprime las letras asignadas de la variable arcoiris
#ejercicio 09
transporte="A CARLA LE GUSTA VIAJAR EN AVION" # asignacion de un valor a la variable transporte
print("HOLA "+"\n"+transporte[2]+transporte[3]+transporte[4]+transporte[5]+transporte[6])
#ejercicio 10
planetas="LA TIERRA ES UN NUESTRO HOGAR CUIDALO" # asignacion de un valor a la variable planetas
print(planetas[0]+planetas[1]+"\n"+planetas[-37]+planetas[-36])
#ejercicio 11
instrumentos="LILI TOCA EL PIANO" # asignacion de un valor a la variable intrumento
print(instrumentos[-5]+instrumentos[-4]+"\n"+instrumentos[-10]+instrumentos[-11]) #se imprime valor asignados de la cadena
#ejercicio 12
peluqueria ="MAYRA ES EXPERTA CORTANDO CON MAQUINA" #asignacion de un valor a la variable peluqueria
print("\b"+peluqueria[-12]+peluqueria[12]+"\"") #se imprime valor asignados de la cadena
#ejercicio 13
grados="MI HERMANO ESTA EN 2 AÑO DE SECUNDARIA" #asignacion de un valor a la variable grados
print("\n"+grados[-1]+grados[1]+grados[15]+"?\n") #se imprime valor asignados de la cadena
#ejercicio 14
notas="ANABEL APRENDIO EL SI MAYOR" #asignacion de un valor a la variable notas
print(notas[0]+notas[2]+"\n"+notas[19]+notas[20]+"\a") #se imprime valor asignados de la cadena
#ejercicio 15
latops="YO TENGO UNA LATOP LENOVA" #asignacion de un valor a la variable latops
print("\t"+latops[-11]+latops[5]+latops[6]+latops[-5]+latops[-6]+"\n"+"es mi nombre") #se imprime valor asignados de la cadena
| [
"noreply@github.com"
] | PiaNgg.noreply@github.com |
67c0e243b2247c0dc834c42885c6269733ffb4c0 | 0539dc99967bb279a30e3d463d871c8843fe2b62 | /mysite/urls.py | bc7368917da892bd0201a1490b1f642154f9350b | [] | no_license | helloreuben/Django-Tutorial-1.11-mysite | 29c84b08cb35980b5ee93a8fbfbafc73107e1135 | e55f868ecfc6d9d7185daf65ca4abdb3e0fbf2b5 | refs/heads/master | 2021-05-01T12:08:30.212963 | 2018-02-10T22:17:13 | 2018-02-10T22:17:13 | 121,059,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# url(r'^',include('polls.urls')),
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
] | [
"Sharlene@MacBookPro.home"
] | Sharlene@MacBookPro.home |
acc9cd8c5c022c9eae765a729d6837248be3ddfe | 06fcc38b7907e6e32bf7a20418c31b5346199b69 | /catalog/urls.py | 6dc828553e0ef925f1fe8d73a1893078588d42d2 | [] | no_license | kmoniker/Spark-Mowers | 2dac709b75c54d11ab386dd4163edc873723bac0 | 91881e3c68e098aceb10e4e826eea5f70cf34e13 | refs/heads/master | 2020-03-20T20:42:00.108449 | 2018-12-01T19:20:33 | 2018-12-01T19:20:33 | 137,701,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('services/', views.ServiceTypeListView.as_view(), name='services'),
path('contact-us/', views.contactus, name='contact-us'),
path('about-us/', views.aboutus, name='about-us'),
path('for-sale/', views.SaleListingListView.as_view(), name='for-sale'),
path('for-sale/<int:pk>', views.SaleListingDetailView, name='salelisting-detail'),
path('crm/', views.CustomerListView.as_view(), name='crm'),
path('crm/customer/<int:pk>', views.CustomerDetailView.as_view(), name='view-customer'),
path('crm/customer/edit/<int:pk>', views.CustomerUpdate.as_view(), name='customer-form'),
path('crm/customer/create/', views.CustomerCreate.as_view(), name='create-customer'),
path('crm/lawn-mower/<int:pk>', views.LawnMowerDetailView.as_view(), name='mower-detail'),
path('crm/lawn-mower/edit/<int:pk>', views.LawnMowerUpdate.as_view(), name='mower-form'),
path('crm/lawn-mower/create/', views.LawnMowerCreate.as_view(), name='create-mower'),
path('crm/lawn-mower/add/<int:custfk>', views.LawnMowerCreate.as_view(), name='add-mower'),
path('crm/lawn-mower/service/edit/<int:pk>', views.ServiceRecordUpdate.as_view(), name='edit-service'),
path('crm/lawn-mower/service/add/<int:mowerfk>', views.ServiceRecordCreate.as_view(), name='add-service'),
path('hours/', views.hours, name='hours'),
]
| [
"caseywchristensen@gmail.com"
] | caseywchristensen@gmail.com |
dccad00638cd7e0f33bd1ad3134664488f3e91a9 | c861d2aea4f7207373f95e42093b2baa15f102ef | /muswarmlogger/__main__.py | 654eb6c4bb714a89396b7642e38a69f8a454c295 | [] | no_license | big-data-europe/mu-swarm-logger-service | 04190ffa7f7fc1d6744220ad5756b6f38765d7ab | bf526d6ad2cf3296daf7d9d7f3438463f1c2dceb | refs/heads/master | 2021-01-19T03:48:45.619298 | 2017-11-23T08:53:58 | 2017-11-23T08:53:58 | 84,416,441 | 1 | 3 | null | 2017-11-23T08:53:59 | 2017-03-09T08:20:01 | Python | UTF-8 | Python | false | false | 390 | py | import asyncio
from muswarmlogger.main import run
loop = asyncio.get_event_loop()
task = loop.create_task(run())
try:
loop.run_until_complete(task)
except (SystemExit, KeyboardInterrupt):
exit(0)
finally:
if not task.done():
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
pass
loop.close()
| [
"cecile.tonglet@gmail.com"
] | cecile.tonglet@gmail.com |
5c633f8440a2ae0e0fd1de149e56467ea186482b | 27c29ffc06c57e63ae39b5ffefd5b60258b2e454 | /P2P-Decentralized-Network/tracker.py | eb3537fdd71d1a8ac90558c00aeb147beb3c2227 | [] | no_license | yogeskc/P2P-Transmitter | 621654d02aa92e4762c5a0d4bd470fe67329a0c0 | 817a7b9b2432a638692f39ff53ff1d14d962ee24 | refs/heads/master | 2023-07-15T19:14:57.221940 | 2021-08-15T09:59:16 | 2021-08-15T09:59:16 | 396,293,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,701 | py | from server import Server
from torrent import Torrent
import socket
import bencodepy
import threading
class Tracker:
def __init__(self, server, torrent, uuid, announce=True, dht_port=6000):
self.DHT_PORT = dht_port
self.server = server
self.torrent = Torrent(torrent)
self.torrent_info_hash = self._get_torrent_info_hash()
self._is_announce = announce
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.udp_socket.bind(("", self.DHT_PORT))
self.non_broadcast_socket = None
self.peer_id = {"info_hash":self.torrent_info_hash, "node_id":uuid, "server_ip":self.server.ip_address, "server_port":self.server.port}
self._DHT = {str(self.torrent_info_hash):{str(uuid): str(self.server.ip_address + "/" + self.server.port)}} #table of hashes
# {<hash_info_1> : [nodes]}
#peer_ips = ['127.0.0.1/4998', '127.0.0.1/4999']
#everyone has the torrent + hash, requesting original
#self.run(self.peer_id)
def _get_torrent_info_hash(self):
"""
TODO: creates the torrent info hash (SHA1) from the info section in the torrent file
:return:
"""
sha1 = self.torrent.info_hash()
return sha1 # returns the info hash
def add_peer_to_swarm(self, peer_id, peer_ip, peer_port):
"""
TODO: when a peers connects to the network adds this peer
to the list of peers connected
:param peer_id: hash_info, ip, port, uuid
:param peer_ip:
:param peer_port:
:return:
"""
"""
for all swarms in the DHT, compare peer_id[0] to the hash_info of that swarm
if it matches, add the (peer_ip, peer_port) as a node
"""
self._DHT[peer_id["info_hash"]][peer_id["node_id"]] = str(peer_ip + "/" + peer_port)
def remove_peer_from_swarm(self, peer_id):
"""
TODO: removes a peer from the swarm when it disconnects from the network
Note: this method needs to handle exceptions when the peer disconnected abruptly without
notifying the network (i.e internet connection dropped...)
:param peer_id: hash_info, ip, port, uuid
:return:
"""
"""
for all swarms in the DHT, compare peer_id[0] to the hash_info of that swarm
if it matches, delete the node with peer_id[1] and peer.id[2] in it
"""
self._DHT[peer_id["info_hash"]].pop(peer_id["node_id"])
def broadcast(self, message, self_broadcast_enable=False):
"""
TODO: broadcast the list of connected peers to all the peers in the network.
:return:
"""
try:
encoded_message = self.encode(message)
self.udp_socket.sendto(encoded_message, ("<broadcast>", self.DHT_PORT))
print("Broadcasting.....")
except socket.error as error:
print(error)
pass # your code here
def send_udp_message(self, message, ip, port):
#you can communicate with any peer, same as response
try:
self.non_broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
message = self.encode(message)
self.non_broadcast_socket.sendto(message, (ip, port))
except:
print("Error UDP Message")
def broadcast_listener(self):
try:
print("Listening at DHT PORT: ", self.DHT_PORT)
while True:
raw_data, sender_ip_and_port = self.udp_socket.recvfrom(4096)
if raw_data:
data = self.decode(raw_data) # infohash of new peer
ip_sender = sender_ip_and_port[0]
port_sender = sender_ip_and_port[0]
if(data["disconnect"]):
self.remove_peer_from_swarm(data)
else:
#sends DHT to broadcaster
self.send_udp_message(self._DHT, ip_sender, port_sender)
#self._network.append()
#self.data = data
#self.data['nodeId'] = {ip_sender,port_sender}
#self.process_query()
print("data recieved by sender", data, ip_sender, port_sender)
#you have ip and port, you can create tcp connection if you like
except:
print("Error listening for Broadcast DHT PORT")
def encode(self, message):
"""
bencodes a message
:param message: a dictionary representing the message
:return: the bencoded message
"""
return bencodepy.encode(message)
def decode(self, bencoded_message):
"""
Decodes a bencoded message
:param bencoded_message: the bencoded message
:return: the original message
"""
return bencodepy.decode(bencoded_message)
def set_total_uploaded(self, peer_id):
"""
TODO: sets the total data uploaded so far by the peer passed as a parameter
:param peer_id:
:return: VOID
"""
pass # your code here
def total_downloaded(self, peer_id):
"""
TODO: sets the total data downloaded so far by the peer passed as a parameter
:param peer_id:
:return: VOID
"""
pass # your code here
def validate_torrent_info_hash(self, peer_torrent_info_hash):
"""
TODO: compare the info_hash generated by this peer with another info_hash sent by another peer
this is done to make sure that both peers agree to share the same file.
:param peer_torrent_info_hash: the info_hash from the info section of the torrent sent by other peer
:return: True if the info_hashes are equal. Otherwise, returns false.
"""
return peer_torrent_info_hash == self.torrent_info_hash
def run(self, peer_id, start_with_broadcast = True):
"""
TODO: This function is called from the peer.py to start this tracker
:return: VOID
"""
if self._is_announce == True:
threading.Thread(target=self.broadcast_listener).start()
if start_with_broadcast:
message = peer_id # [info_hash]self.torrent_info_hash
self.broadcast(message, self_broadcast_enable=True)
else:
print("Error Tracker DHT Protocol") | [
"ycasey71@gmail.com"
] | ycasey71@gmail.com |
c632f2cc5d52b6e234ad5f4a3a9f929cdce8a71c | b98e52565574c1e270cef787db46db34be982aea | /srwr/normalizer.py | 1dfdc6a4140aa74a3a13465fab9582fc6f17c966 | [
"MIT"
] | permissive | feng-123/srwr | a4e8a254ce3f60dc324fac766ce9db8ff92f0cc0 | 025a4bf35aaa8c2f6885aa11cc274315ebb97fc3 | refs/heads/master | 2020-11-29T11:42:11.013641 | 2019-04-14T15:46:57 | 2019-04-14T15:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import numpy as np
from scipy.sparse import csr_matrix, spdiags, find
def semi_row_normalize(A):
'''
Perform the semi row-normalization for given adjacency matrix
inputs
A: csr_matrix
adjacency matrix of given graph
outputs
nAp: csr_matrix
positive semi row-normalized adjacency matrix
nAn: csr_matrix
negative semi row-normalized adjacency matrix
'''
m, n = A.shape
# row-wise sum, d is out-degree for each node
d = abs(A).sum(axis=1)
d = np.asarray(d).flatten()
d = np.maximum(d, np.ones(n))
invd = 1.0 / d
invD = spdiags(invd, 0, m, n)
snA = invD * A
I, J, K = find(snA)
pos = K > 0
neg = K < 0
nAp = csr_matrix((abs(K[pos]), (I[pos], J[pos])), shape=(m, n))
nAn = csr_matrix((abs(K[neg]), (I[neg], J[neg])), shape=(m, n))
return nAp, nAn
| [
"jinhongjung@snu.ac.kr"
] | jinhongjung@snu.ac.kr |
a683c8325a452af35737ced132f309b3faf98ed9 | e967ded85d4062380e7d5af249874c6e944be775 | /products/views.py | 3a2a52237d9712d27dbb66e57d631dd9743bb3fd | [] | no_license | russel-coder/Pyshop | a85865b41c1e97b508a9cb18313b66848981e9d3 | 8142d9b4a643cc8563531d542372d0c170647a09 | refs/heads/master | 2023-03-06T19:59:04.316230 | 2021-02-17T01:42:12 | 2021-02-17T01:42:12 | 339,567,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py |
from django.shortcuts import render
from .models import Product, New
def index(request):
products = Product.objects.all()
return render(request, 'index.html', {'products': products})
def new(request):
new = New.objects.all()
return render(request, 'new.html', {'new': new})
| [
"rassyxmk0240@gmail.com"
] | rassyxmk0240@gmail.com |
539d4f8e768214e82aa97dbd762103324402686f | cda47cf8f49427be28f02d7a6aa0d2b43a41b5a2 | /MainFile.py | 393d165af89bbe1aa7e7e90f41d844b341a5e053 | [] | no_license | WIA2005-Algorithm/Delivery-GENIX-Project---Algo | d1264116b6eae653f4c73da615878d314f1bc0f9 | 37efc0977b8bbe099f1f97c93214c5afda0f272b | refs/heads/main | 2023-06-04T14:20:15.256797 | 2021-06-20T14:04:39 | 2021-06-20T14:04:39 | 359,398,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,511 | py | import collections
import time
import RawData
from Algorithms import NormaliseDataRanking
from ConclusionPlot import PlotSentimentConclusion
from CusHubsMap import HubDeliveryMap, PreProcess
from RawData import CourierCompanies, CustomerData, Articles
from Sentimental_Analysis import AnalyseArticles, AnalyseWordsCategories, Conclusion
from Matplot import plotBarGraphs
from prettytable import PrettyTable
def PrettyPrint(FieldNames, iterableRows, keys):
if len(FieldNames) != len(keys):
return "Not Sufficient Keys to Operate"
x = PrettyTable()
x.field_names = FieldNames
for row in iterableRows:
x.add_row([key(row) for key in keys])
return x
def PrintCustomer_Hubs():
print("\nFirst of all let us analysis our customers & their requirements:")
print(PrettyPrint(['Customer', 'Origin Location', 'Destination Location'], CustomerData.items(),
[lambda x: 'Customer ' + x[0], lambda x: x[1]['Origin']['name'],
lambda x: x[1]['Destination']['name']]))
print("\nFollowing are the available Hubs: ")
print(PrettyPrint(['Hub', 'Location'], CourierCompanies.items(), [lambda x: x[0], lambda x: x[1]['name']]))
def AddDirectDistanceMap():
print("Please wait while the resources load...APIs do take time sometime due to internet connection problems...")
PreProcess()
H = HubDeliveryMap()
print("\nFollowing is the customer direct distance:- ")
H.MarkLeastDistantPath()
print(PrettyPrint(['Customer', 'Origin', 'Destination', 'Distance (Meters)', 'Distance (KM)'],
CustomerData.items(),
[lambda x: 'Customer ' + x[0], lambda x: x[1]['Origin']['name'],
lambda x: x[1]['Destination']['name'], lambda x: str(x[1]['DirectDistance']['value']) + ' m',
lambda x: x[1]['DirectDistance']['text']]
))
print("""
Alright, Time for Some Visuals... Let's Goo
Hint: Click on the symbols to view more details
""")
input("Press enter to visualize it...")
print(H)
input("Press enter to continue...")
def AvailableOptions():
for Customer, CusDetails in CustomerData.items():
print(f"\nCustomer {Customer} has these available options:- ")
print(PrettyPrint(['Hub', 'Distance through Hub', 'Recommended Choice'], CusDetails['RouteRank'],
[lambda x: x['Hub'], lambda x: str(x['DistanceTravelled']) + ' Km',
lambda x: x['Recommended']]))
H2 = HubDeliveryMap(False) # I am going to mark route through hub
H2.MarkRoutesHubs()
print("\nBelow are the recomended choices based on the distance:-")
print(PrettyPrint(['Customer', 'Origin', 'Destination', 'Hub Recommended', 'Distance (Km)'],
CustomerData.items(),
[lambda x: 'Customer ' + x[0], lambda x: x[1]['Origin']['name'],
lambda x: x[1]['Destination']['name'], lambda x: x[1]['route']['Hub'],
lambda x: str(x[1]['route']['DistanceTravelled']) + ' Km']
))
print("""
*** Let's Have a Look at it on the Map ***
""")
input("Press enter to visualize it...")
print(H2)
AnalyseArticles()
AnalyseWordsCategories()
input("Press enter to continue...")
def FrequencyAnalysis():
print("\nHere's the frequency count for top 40 words in each article: - \n")
for Name, file in Articles.items():
print(f"Review Word Frequency for Hub {Name}")
print(PrettyPrint(['word', 'Frequncy'], collections.Counter(file["wordFrequency"]).most_common(40),
[lambda x: x[0], lambda x: x[1]]))
def SentimentalPrint():
print(
"After analysing the words for its count, We rank them based on their positive & negative review.\nHere are the total results :-->")
RawData.RankedSentiments = []
for name, rank in Conclusion().items():
RawData.RankedSentiments.append({'Hub': name, 'rank': rank[1], 'value': rank[0], 'Recommended': rank[2]})
print(PrettyPrint(['Hub', 'Rank', 'Review', 'Recomended'],
RawData.RankedSentiments,
[lambda x: x['Hub'], lambda x: x['rank'], lambda x: 'POSITIVE' if x['value'] <= 0 else 'NEGATIVE',
lambda x: x['Recommended']]
))
print("Enter to initialise concluding graph...")
PlotSentimentConclusion()
input('Enter to continue..')
def FinalConclusion():
print("Based on the distance Analysis & Sentimental Analysis,\nFollowing is the data otained:-")
for customer, detail in CustomerData.items():
print(f"Summary Table for Customer - {customer}")
Final = NormaliseDataRanking(detail['RouteRank'], RawData.RankedSentiments, lambda x: x['Hub'],
lambda x: x['DistanceTravelled'], lambda x: x['value'])
detail['prev_route'], detail['route'] = detail['route'], Final[0]
print(PrettyPrint(
['Hub', 'Distance Normalised', 'Reviews Normalised', 'Final Conclusion', 'Rank', 'Recomendation based'],
Final,
[lambda x: x['Hub'], lambda x: "{:.2f}".format(x['FinalDetails'][0]),
lambda x: "{:.2f}".format(x['FinalDetails'][1]),
lambda x: "{:.2f}".format(x['FinalDetails'][2]), lambda x: x['FinalDetails'][3],
lambda x: x['FinalDetails'][4]]
))
print("""
Below is the Customer Summary Table to choose the best hub
Distance is given weight 2, while reviews have been given 1
""")
print(PrettyPrint(
['Customer', 'Origin', 'Destination', 'Direct Distance', 'Hub (Distance Based)', 'Hub (Review Based)',
'Hub (Final Recomendation)'],
CustomerData.items(),
[lambda x: 'Customer ' + x[0],
lambda x: x[1]['Origin']['name'],
lambda x: x[1]['Destination']['name'],
lambda x: x[1]['DirectDistance']['text'],
lambda x: x[1]['prev_route']['Hub'],
lambda x: RawData.RankedSentiments[0]['Hub'],
lambda x: x[1]['route']['Hub']]
))
print("""
\t\t\t*** WELCOME TO HUBS DELIVERY SERVICE APPLICATION ***
\t\t\tThe best customer service in town
Our Mission is to provide the best possible hub service to choose from so your parcel reach faster & in good health.
Well, don't believe it?
Let me take you through our application... \nPlease wait...\nDrum roll in 3..2..1
""")
PrintCustomer_Hubs()
time.sleep(1)
print("""
*** Okay, this all looks pretty complex to actually render out a solution. Let me simplify for you ***
Let us mark customer's origin as well as destination locations on map.
We also will be marking Hub locations for future Use.
""")
AddDirectDistanceMap()
print("""
\t\t Welcome Back, Let's Continue
Oki, Time to decide the best hub out there...hihi
""")
AvailableOptions()
print("""
Alright! Just to make sure, Making your parcel reach faster is not our only mission, We also ensure, your parcel reaches healthy & in good quality conditions
Let's look at companies, having the highest review rates
For an example, We took 3 articles from Internet about each company & collected the data to analyse it
""")
FrequencyAnalysis()
input("\nPress Enter to Plot Visualisation of bar graphs: ")
plotBarGraphs()
input("\nPress Enter to Continue: ")
SentimentalPrint()
FinalConclusion()
HNew = HubDeliveryMap(False)
HNew.MarkRoutesHubs()
print(HNew)
| [
"kamal20012011@hotmail.com"
] | kamal20012011@hotmail.com |
61fa05259f809321d6cd70f6ac7be3186250d9e5 | b6ea1c40b000e3d341d4627b31a1130525cf9e2f | /weather_app/migrations/0003_remove_city_user.py | a71479cc62d2b82381b218c51e119970d6e6e153 | [] | no_license | SlavaGolovatskyu/DjangoWeatherApp | 4baddac7d121f5491a87d34a22089c93fb07b2a5 | 850680354dcfd0e493fcb06beeb71585b025cc79 | refs/heads/main | 2023-08-15T21:52:18.188868 | 2021-09-27T18:32:37 | 2021-09-27T18:32:37 | 410,944,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Generated by Django 3.1.3 on 2021-09-27 10:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('weather_app', '0002_auto_20210927_1249'),
]
operations = [
migrations.RemoveField(
model_name='city',
name='user',
),
]
| [
"slavik.golovatskyu@gmail.com"
] | slavik.golovatskyu@gmail.com |
87275507fe32f56b680e0a0ffd71b2cde87de45a | 6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f | /apps/project/migrations/0018_task_showname_202101138_1632.py | a8a015cf95e1c0442bf6cf61678282606e464030 | [] | no_license | reo-dev/bolt | 29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54 | d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e | refs/heads/master | 2023-07-13T04:05:57.856278 | 2021-08-27T09:07:03 | 2021-08-27T09:07:03 | 382,195,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # Generated by Django 3.0.8 on 2021-01-18 07:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0017_comment_update_20210113_1741'),
]
operations = [
migrations.AddField(
model_name='task',
name='showName',
field=models.CharField(default='default', max_length=256, verbose_name='세부일정보여주기용'),
),
]
| [
"75593016+reo-dev@users.noreply.github.com"
] | 75593016+reo-dev@users.noreply.github.com |
76498d6976b71f444f3aff14a25fad78cbeb2624 | 02c461d1e8c4dc69e79b4c51f798620b6d190b08 | /analyzer/linux/analyzer.py | e8052c417b63b419df9f22f778322d389e2a6c3a | [] | no_license | ksmaheshkumar/cuckoo | bd63ecba062da7777ec1a2cc8368efefd66c55b3 | d8bdd9c696ac4e34a6a7bd189c1a8ccd0983c1c3 | refs/heads/master | 2021-01-14T11:41:02.298149 | 2015-07-15T14:06:44 | 2015-07-15T14:06:44 | 39,197,757 | 1 | 0 | null | 2015-07-16T13:16:19 | 2015-07-16T13:16:18 | null | UTF-8 | Python | false | false | 13,942 | py | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import socket
import struct
import random
import pkgutil
import logging
import tempfile
import xmlrpclib
import traceback
import time
import datetime
from lib.api.process import Process
from lib.common.abstracts import Package, Auxiliary
from lib.common.constants import PATHS
from lib.common.exceptions import CuckooError, CuckooPackageError
from lib.common.hashing import hash_file
from lib.common.results import upload_to_host
from lib.core.config import Config
from lib.core.startup import create_folders, init_logging
from modules import auxiliary
log = logging.getLogger()
PID = os.getpid()
FILES_LIST = set()
DUMPED_LIST = set()
PROCESS_LIST = set()
SEEN_LIST = set()
PPID = Process(pid=PID).get_parent_pid()
def add_pids(pids):
"""Add PID."""
if not isinstance(pids, (tuple, list, set)):
pids = [pids,]
for pid in pids:
log.info("Added new process to list with pid: %s", pid)
pid = int(pid)
if not pid in SEEN_LIST: PROCESS_LIST.add(pid)
SEEN_LIST.add(pid)
def dump_files():
"""Dump all the dropped files."""
for file_path in FILES_LIST:
log.info("PLS IMPLEMENT DUMP, want to dump %s", file_path)
class Analyzer:
"""Cuckoo Linux Analyzer.
This class handles the initialization and execution of the analysis
procedure, including the auxiliary modules and the analysis packages.
"""
def __init__(self):
self.config = None
self.target = None
def prepare(self):
"""Prepare env for analysis."""
# Create the folders used for storing the results.
create_folders()
# Initialize logging.
init_logging()
# Parse the analysis configuration file generated by the agent.
self.config = Config(cfg="analysis.conf")
if self.config.get("clock", None):
# Set virtual machine clock.
clock = datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S")
# Setting date and time.
os.system("date -s \"{0}\"".format(clock.strftime("%y-%m-%d %H:%M:%S")))
# We update the target according to its category. If it's a file, then
# we store the path.
if self.config.category == "file":
self.target = os.path.join(tempfile.gettempdir(), self.config.file_name)
# If it's a URL, well.. we store the URL.
else:
self.target = self.config.target
def complete(self):
"""End analysis."""
# Dump all the notified files.
dump_files()
# Hell yeah.
log.info("Analysis completed.")
def run(self):
"""Run analysis.
@return: operation status.
"""
self.prepare()
log.debug("Starting analyzer from: %s", os.getcwd())
log.debug("Storing results at: %s", PATHS["root"])
# If no analysis package was specified at submission, we try to select
# one automatically.
if not self.config.package:
log.debug("No analysis package specified, trying to detect "
"it automagically.")
if self.config.category == "file":
package = "generic"
else:
package = "wget"
# If we weren't able to automatically determine the proper package,
# we need to abort the analysis.
if not package:
raise CuckooError("No valid package available for file "
"type: {0}".format(self.config.file_type))
log.info("Automatically selected analysis package \"%s\"", package)
# Otherwise just select the specified package.
else:
package = self.config.package
# Generate the package path.
package_name = "modules.packages.%s" % package
# Try to import the analysis package.
try:
__import__(package_name, globals(), locals(), ["dummy"], -1)
# If it fails, we need to abort the analysis.
except ImportError:
raise CuckooError("Unable to import package \"{0}\", does "
"not exist.".format(package_name))
# Initialize the package parent abstract.
Package()
# Enumerate the abstract subclasses.
try:
package_class = Package.__subclasses__()[0]
except IndexError as e:
raise CuckooError("Unable to select package class "
"(package={0}): {1}".format(package_name, e))
# Initialize the analysis package.
pack = package_class(self.config.get_options())
# Initialize Auxiliary modules
Auxiliary()
prefix = auxiliary.__name__ + "."
for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix):
if ispkg:
continue
# Import the auxiliary module.
try:
__import__(name, globals(), locals(), ["dummy"], -1)
except ImportError as e:
log.warning("Unable to import the auxiliary module "
"\"%s\": %s", name, e)
# Walk through the available auxiliary modules.
aux_enabled, aux_avail = [], []
for module in sorted(Auxiliary.__subclasses__(), key=lambda x: x.priority, reverse=True):
# Try to start the auxiliary module.
try:
aux = module()
aux_avail.append(aux)
aux.start()
except (NotImplementedError, AttributeError):
log.warning("Auxiliary module %s was not implemented",
aux.__class__.__name__)
continue
except Exception as e:
log.warning("Cannot execute auxiliary module %s: %s",
aux.__class__.__name__, e)
continue
finally:
log.debug("Started auxiliary module %s",
aux.__class__.__name__)
aux_enabled.append(aux)
# Start analysis package. If for any reason, the execution of the
# analysis package fails, we have to abort the analysis.
try:
pids = pack.start(self.target)
except NotImplementedError:
raise CuckooError("The package \"{0}\" doesn't contain a run "
"function.".format(package_name))
except CuckooPackageError as e:
raise CuckooError("The package \"{0}\" start function raised an "
"error: {1}".format(package_name, e))
except Exception as e:
raise CuckooError("The package \"{0}\" start function encountered "
"an unhandled exception: "
"{1}".format(package_name, e))
# If the analysis package returned a list of process IDs, we add them
# to the list of monitored processes and enable the process monitor.
if pids:
add_pids(pids)
pid_check = True
# If the package didn't return any process ID (for example in the case
# where the package isn't enabling any behavioral analysis), we don't
# enable the process monitor.
else:
log.info("No process IDs returned by the package, running "
"for the full timeout.")
pid_check = False
# Check in the options if the user toggled the timeout enforce. If so,
# we need to override pid_check and disable process monitor.
if self.config.enforce_timeout:
log.info("Enabled timeout enforce, running for the full timeout.")
pid_check = False
time_counter = 0
while True:
time_counter += 1
if time_counter == int(self.config.timeout):
log.info("Analysis timeout hit, terminating analysis.")
break
try:
# If the process monitor is enabled we start checking whether
# the monitored processes are still alive.
if pid_check:
for pid in list(PROCESS_LIST):
if not Process(pid=pid).is_alive():
log.info("Process with pid %s has terminated", pid)
PROCESS_LIST.remove(pid)
# ask the package if it knows any new pids
add_pids(pack.get_pids())
# also ask the auxiliaries
for aux in aux_avail:
add_pids(aux.get_pids())
# If none of the monitored processes are still alive, we
# can terminate the analysis.
if not PROCESS_LIST:
log.info("Process list is empty, "
"terminating analysis.")
break
# Update the list of monitored processes available to the
# analysis package. It could be used for internal
# operations within the module.
pack.set_pids(PROCESS_LIST)
try:
# The analysis packages are provided with a function that
# is executed at every loop's iteration. If such function
# returns False, it means that it requested the analysis
# to be terminate.
if not pack.check():
log.info("The analysis package requested the "
"termination of the analysis.")
break
# If the check() function of the package raised some exception
# we don't care, we can still proceed with the analysis but we
# throw a warning.
except Exception as e:
log.warning("The package \"%s\" check function raised "
"an exception: %s", package_name, e)
except Exception as e:
log.exception("The PID watching loop raised an exception: %s", e)
finally:
# Zzz.
time.sleep(1)
try:
# Before shutting down the analysis, the package can perform some
# final operations through the finish() function.
pack.finish()
except Exception as e:
log.warning("The package \"%s\" finish function raised an "
"exception: %s", package_name, e)
try:
# Upload files the package created to package_files in the results folder
package_files = pack.package_files()
if package_files != None:
for package in package_files:
upload_to_host(package[0], os.path.join("package_files", package[1]));
except Exception as e:
log.warning("The package \"%s\" package_files function raised an "
"exception: %s", package_name, e)
# Terminate the Auxiliary modules.
for aux in sorted(aux_enabled, key=lambda x: x.priority):
try:
aux.stop()
except (NotImplementedError, AttributeError):
continue
except Exception as e:
log.warning("Cannot terminate auxiliary module %s: %s",
aux.__class__.__name__, e)
if self.config.terminate_processes:
# Try to terminate remaining active processes. We do this to make sure
# that we clean up remaining open handles (sockets, files, etc.).
log.info("Terminating remaining processes before shutdown.")
for pid in PROCESS_LIST:
proc = Process(pid=pid)
if proc.is_alive():
try:
proc.terminate()
except:
continue
# Run the finish callback of every available Auxiliary module.
for aux in aux_avail:
try:
aux.finish()
except (NotImplementedError, AttributeError):
continue
except Exception as e:
log.warning("Exception running finish callback of auxiliary "
"module %s: %s", aux.__class__.__name__, e)
# Let's invoke the completion procedure.
self.complete()
return True
if __name__ == "__main__":
success = False
error = ""
try:
# Initialize the main analyzer class.
analyzer = Analyzer()
# Run it and wait for the response.
success = analyzer.run()
# This is not likely to happen.
except KeyboardInterrupt:
error = "Keyboard Interrupt"
# If the analysis process encountered a critical error, it will raise a
# CuckooError exception, which will force the termination of the analysis.
# Notify the agent of the failure. Also catch unexpected exceptions.
except Exception as e:
# Store the error.
error_exc = traceback.format_exc()
error = str(e)
# Just to be paranoid.
if len(log.handlers):
log.exception(error_exc)
else:
sys.stderr.write("{0}\n".format(error_exc))
# Once the analysis is completed or terminated for any reason, we report
# back to the agent, notifying that it can report back to the host.
finally:
# Establish connection with the agent XMLRPC server.
server = xmlrpclib.Server("http://127.0.0.1:8000")
server.complete(success, error, PATHS["root"])
| [
"ms@mwcollect.org"
] | ms@mwcollect.org |
e0963f0ca71eba28fd07f500981d49ffd28ca696 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/533135_Synchronizaticlasses_using/recipe-533135.py | 30da0a5e1771de474b7d1fb2b5babee4505086ac | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 5,885 | py | # Synchronization classes using decorators. Provides synchronized, semaphore
# and event classes which provide transparent decorator patterns for
# Lock, BoundedSemaphore and Event objects in Python.
from threading import Thread, Lock, BoundedSemaphore, Event, currentThread
from time import sleep
from random import random
class synchronized(object):
""" Class enapsulating a lock and a function
allowing it to be used as a synchronizing
decorator making the wrapped function
thread-safe """
def __init__(self, *args):
self.lock = Lock()
def __call__(self, f):
def lockedfunc(*args, **kwargs):
try:
self.lock.acquire()
print 'Acquired lock=>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
self.lock.release()
print 'Released lock=>',currentThread()
return lockedfunc
class semaphore(object):
""" Class encapsulating a semaphore to limit
number of resources """
def __init__(self, *args):
self.sem = BoundedSemaphore(args[0])
def __call__(self, f):
def semfunc(*args, **kwargs):
try:
print 'Trying to acquire sem=>',currentThread()
self.sem.acquire()
print 'Acquired sem=>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
self.sem.release()
print 'Released sem=>',currentThread()
return semfunc
class event(object):
""" Class encapsulating an event object to control
sequential access to a resource """
def __init__(self, *args):
self.evt = Event()
self.evt.set()
def __call__(self, f):
def eventfunc(*args, **kwargs):
try:
print 'Waiting on event =>',currentThread()
self.evt.wait()
# First thread will clear the event and
# make others wait, once it is done with the
# job, it sets the event which wakes up
# another thread, which does the same thing...
# This provides sequential access to a
# resource...
self.evt.clear()
print 'Cleared event =>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
# Wake up another thread...
self.evt.set()
print 'Set event=>',currentThread()
return eventfunc
##############################################################################
# Test Code #
##############################################################################
# Demonstrating the synchronization classes...
# Use a global list
l=range(10)
def reset():
global l
l = range(10)
# Not thread-safe
def func1(begin, end):
for x in range(begin, end):
sleep(random()*0.5)
l.append(x)
# Thread-safe!
@synchronized()
def func2(begin, end):
for x in range(begin, end):
sleep(random()*0.5)
l.append(x)
# Limited access, thread-safe
class DBConnection(object):
""" A dummy db connection class """
MAX = 5
# We want to limit the number of DB connections to MAX
# at a given time
@semaphore(MAX)
def connect(self, host):
print "Connecting...",currentThread()
# Sleep for some time
sleep(3.0)
pass
# We want sequential access to this function
@event()
def connect2(self, host):
print "Connecting...",currentThread()
# Sleep for some time
sleep(3.0)
pass
class PrintMsg(object):
def startmsg(self):
print '%s started...' % self.__class__.__name__
def endmsg(self):
print '%s ended...' % self.__class__.__name__
class BaseThread(Thread, PrintMsg):
pass
class MyThread1(BaseThread):
def run(self):
self.startmsg()
func1(10, 20)
self.endmsg()
class MyThread2(BaseThread):
def run(self):
self.startmsg()
func1(20, 30)
self.endmsg()
class MyThread3(BaseThread):
def run(self):
self.startmsg()
func2(10, 20)
self.endmsg()
class MyThread4(BaseThread):
def run(self):
self.startmsg()
func2(20, 30)
self.endmsg()
class DBThread(BaseThread):
def run(self):
db = DBConnection()
db.connect('localhost')
class DBThread2(BaseThread):
def run(self):
db = DBConnection()
db.connect2('localhost')
print 'Starting the lock test...'
t1 = MyThread1()
t2 = MyThread2()
t1.start(); t2.start()
t1.join(); t2.join()
# List will not have elements in order
print l
reset()
t3 = MyThread3()
t4 = MyThread4()
t3.start(); t4.start()
t3.join(); t4.join()
# List will have elements in order
print l
sleep(3.0)
print 'Starting the sem test...'
# Sem test, init 8 threads and call connect
# on the DBConnection object...
for x in range(8):
t = DBThread()
t.start()#
sleep(3.0)
print 'Starting event test..'
# Event test, init 8 threads and
# increment counter
for x in range(8):
t = DBThread2()
t.start()
print 'All tests completed.'
###############################################################################
# End of test code #
###############################################################################
| [
"betty@qburst.com"
] | betty@qburst.com |
1e64071508337af2ff756a094edaaaac397e1533 | 9d49c31d0200377c70303620036ecf0353455212 | /simplechat/__init__.py | 8faabc2f3ccea9d6dd724d4a8f057d7e6fb03e83 | [
"Apache-2.0"
] | permissive | drowolath/simplechat | abf8e59384aace20eab8dffedaf3555077f336bf | 8ae66e54ec12c022604463d4b1d0e2e0802eb6ee | refs/heads/master | 2021-01-19T23:44:16.360585 | 2019-05-08T15:02:04 | 2019-05-08T15:02:04 | 89,024,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | #!/usr/bin/env python
from core import *
# EOF
| [
"thomas.ayih-akakpo@gulfsat.mg"
] | thomas.ayih-akakpo@gulfsat.mg |
749da7ed5446bf3545755d8c752163c4a1e78d6f | 99c9f2fc9e91a18d770d6975a14a994994e1d487 | /main_model/src/const.py | ca959d7b10edd49c2109ebd148d733136341d8b0 | [] | no_license | kato-mahiro/arnold13 | ba37c4485199d0d9c0e6ca39d54ed818cf9a3760 | 0c3c5ff6463005909dcbcde26b5550187a54cde1 | refs/heads/master | 2022-02-01T02:08:31.321216 | 2019-04-25T07:15:21 | 2019-04-25T07:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | FIELD_RANGE = 20
VIEW_RANGE = 7
PREY_NUM = 40
INPUT_NUMBER = 39
ECHO_NUMBER = 39
HIDDEN_NUMBER = 16
OUTPUT_NUMBER = 4
MODURATORY_NUMBER = 6
| [
"katomasahiro10@gmail.com"
] | katomasahiro10@gmail.com |
4c5e166956aaa4f0b3bd3edb11e39eda37916b5f | 37a65dcdf94883df26d7fdbe93258fc7c7357f95 | /ops/settings.py | a7f8aeeb5e4fde4e6166afa000ffb6412d01f585 | [
"Unlicense"
] | permissive | rfukui/ops | 05ba7a9954e177e19d007b00b252de76f311c7e6 | 7e3324bd3d70c8da687ab4fca9a031a1c92b0ec1 | refs/heads/master | 2020-12-30T15:08:57.172620 | 2017-05-12T20:09:49 | 2017-05-12T20:09:49 | 91,112,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,085 | py | """
Django settings for ops project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'og$-6aby)+r(iqh1zy7r4k*^!26#g2l2ue4nggtx-7+2ubx%d='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
d802d7fc075b0b80d1629437c51bca824f4f8c7f | 25b9f27e2037d42b76699c76e3234e0be85463ca | /test1.py | 8242f6caa184cf87a7898fb2d3d3a0720db0e440 | [] | no_license | saputromfs/saputromfs | eaf9469bc8c632f1892838a6ee490c7a287a20bb | d782c8307b48236416c654da06f310db28f0313c | refs/heads/main | 2023-08-15T11:25:18.636782 | 2021-10-07T07:04:25 | 2021-10-07T07:04:25 | 414,475,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | a = 10
b = 5
c = a-b
print("Hasilnya adalah : ",c)
| [
"noreply@github.com"
] | saputromfs.noreply@github.com |
8de065b2041c6cc6c1b890e344fc958fbea1c0d7 | 2a9b0d4cb6f4676cc3d53105890a64767cc655fd | /src/test/__init__.py | 7df92765099e27a96abd3b8a91c95d601308e48c | [] | no_license | adrielyeung/job-ad-compare | dce11eb147ac7ea0abfa7ddd6647d8874bed1894 | ef4c7d4dc531705c71c075355b2890c59e7717e9 | refs/heads/main | 2023-03-29T23:08:07.403559 | 2021-04-07T13:25:50 | 2021-04-07T13:25:50 | 313,361,308 | 1 | 0 | null | 2021-04-07T13:21:25 | 2020-11-16T16:25:22 | Jupyter Notebook | UTF-8 | Python | false | false | 93 | py | # -*- coding: utf-8 -*-
ROOT_PATH = 'C:\\Users\\Adriel\\Documents\\PythonScripts\\Scraping\\' | [
"tsunyeung1997@gmail.com"
] | tsunyeung1997@gmail.com |
15b164e0717c4d81028e32a8bb630dfe1d31819a | 8aacef4d656d5cc133931947ba8ae06f04e27aa8 | /dit/other/__init__.py | 4bb7feedc0c504097593ca78f47ddcd38d154e01 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kokokostation/dit | 6468c4f7e7bc5355f929111ef62e760706ee2884 | f4ed11d68b2667109c7eefe1cdcf598e38f5d6e3 | refs/heads/master | 2021-05-05T00:54:03.278706 | 2018-01-30T17:16:36 | 2018-01-30T17:16:36 | 119,562,080 | 0 | 0 | null | 2018-01-30T16:22:27 | 2018-01-30T16:22:27 | null | UTF-8 | Python | false | false | 317 | py | """
Esoteric measures of information, typically fairly divorced from Shannon's
measures.
"""
from .cumulative_residual_entropy import *
from .disequilibrium import *
from .extropy import extropy
from .perplexity import perplexity
from .renyi_entropy import renyi_entropy
from .tsallis_entropy import tsallis_entropy
| [
"ryangregoryjames@gmail.com"
] | ryangregoryjames@gmail.com |
faba04f593e203adb5e7da15b1781b7a3b4624d5 | 89b53495ee194003087a385993a599b3ba0dbcd1 | /src/participants/models.py | c2f5b2249148d0e8ee53f861f0590bb4e51067f2 | [
"MIT"
] | permissive | leopiresgoss/django-etest | 1b4657184a63ac1342965c7a4adb6daef135a479 | 55ec087999b53fc734c32e6b6a6d73885f224af5 | refs/heads/main | 2023-07-13T04:30:39.966382 | 2021-08-19T00:33:36 | 2021-08-19T00:33:36 | 396,576,436 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | from django.db import models
from tests.models import Test, Question, Option
# Create your models here.
class Participant(models.Model):
name = models.CharField(max_length=120)
email = models.EmailField(max_length=254)
test = models.ForeignKey(Test, on_delete=models.CASCADE)
finished = models.BooleanField(default=False)
score = models.FloatField(default=0.0)
def __str__(self):
return str(self.name)
class Participant_answer(models.Model):
test = models.ForeignKey(Test, on_delete=models.CASCADE)
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answer = models.ForeignKey(Option, on_delete=models.CASCADE)
def __str__(self):
return f"{self.participant} answer of {self.question}"
| [
"61767582+leopiresgoss@users.noreply.github.com"
] | 61767582+leopiresgoss@users.noreply.github.com |
dcb373da6e790d29bcdee050c566e80757b0b0aa | 3515df48b3bc5a4049f2d23f386a18573048f3ef | /Harvard-CS50x/pset6/ceaser.py | 5516ad5f12bf55149b47707858dda0b3f8d96382 | [
"Apache-2.0"
] | permissive | Mudasirrr/Courses- | 5fe0662716e6afd1d1a228b6133d0dee432c9723 | fa566235d6b9751f73c69d0d46351a14fcdeb585 | refs/heads/master | 2021-05-19T03:51:02.033688 | 2020-03-29T16:17:32 | 2020-03-29T16:17:32 | 251,515,774 | 2 | 0 | Apache-2.0 | 2020-03-31T06:11:16 | 2020-03-31T06:11:15 | null | UTF-8 | Python | false | false | 1,076 | py | import sys
import cs50
def main():
if len(sys.argv) != 2:
print("missing command-line argument")
exit(0)
if sys.argv[1].isdigit() == False:
print("only digits allowed!")
exit(1)
key = int(sys.argv[1])
#key = sys.argv[1]
#keylen = len(sys.argv[1])
print("plainext: ", end="")
txt = cs50.get_string()
#print ("ciphertext: ", end="")
#jey = 0
ciphertext=[]
for i in range(len(txt)):
#j = ord(key[jey%keylen])
c = ord(txt[i])
if (65<=c<=90) == True:
c = ((c - 65 + key) % 26 ) + 65
ciphertext.append(chr(c))
#print(chr(c),end ="")
elif (97<=c<=122) == True:
c = ((c - 97 + key) % 26 ) + 97
ciphertext.append(chr(c))
#print(chr(c),end ="")
else:
#print(txt[i],end="")
ciphertext.append(chr(c))
#jey+=1
#print()
print("ciphertext: ", end="")
print("".join(ciphertext))
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | Mudasirrr.noreply@github.com |
6e75f6dee4b2c27f33f3de6ef61836f7bb3a43e7 | 1f0026465e5f1c1e81def763ac508d8f9b353adf | /dictionaryPractice.py | e1a23324604ea5fd4c9c8fec51e9240d2f621410 | [] | no_license | ryanlott168/Scrabble-Game | 383c9f5d644fc36777cb972b8246c276cbb3cc6c | c42f2f78f855e93f10eec4e2146cf2e43f703151 | refs/heads/master | 2020-04-08T10:26:42.783732 | 2018-11-27T03:16:13 | 2018-11-27T03:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
points = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 4, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]
letter_to_points = {key:value for key, value in zip(letters, points)}
letter_to_points[" "] = 0
def score_word(word):
point_total = 0
word = word.upper()
for letter in word:
point_total += letter_to_points.get(letter, 0)
return point_total
player_to_words ={"player1": ["blue", "tennis", "exit"], "wordNerd":["earth", "eyes", "machine"], "lexiCon":["eraser", "belly","husky"], "profReader":["zap", "coma", "period"]}
def play_word(player, word):
player_to_words[player] = (player_to_words[player])+[word]
def update_point_totals():
player_to_points = {}
for player, words in player_to_words.items():
player_points = 0
for word in words:
player_points += score_word(word)
player_to_points[player] = player_points
print(player_to_points)
update_point_totals()
play_word('lexiCon', 'corn')
play_word('player1', 'corn')
update_point_totals()
| [
"toranlyt@gmail.com"
] | toranlyt@gmail.com |
d12d85280708831eabbca47a35ae966c4f133fdd | 040477088d73d786411fe8e051163899eebdf1e2 | /tests/test_cookie_error.py | 03a84d42cf2a4575a97fde3d22589744f4381493 | [
"BSD-3-Clause"
] | permissive | Tessares/libconvert | e9f63767bcfc454667cd95d05087d1540d78b6b2 | 948053cdfe31ce4b3f5543663f2ce22bdfddb1c8 | refs/heads/master | 2022-03-10T05:40:16.687136 | 2022-01-11T19:15:23 | 2022-01-11T19:15:23 | 172,021,690 | 7 | 9 | BSD-3-Clause | 2022-06-07T07:49:46 | 2019-02-22T08:04:34 | C | UTF-8 | Python | false | false | 576 | py | #!/usr/bin/env python3
from converter_mock import *
from test_lib import *
class TestCookieError(TestInstance):
def run(self):
converter = Convert(tlvs=[ConvertTLV_Error(error_code=3)])
print(converter.build())
cookie = self.get_cookie()
class MockConverterCookieError(MockConverter):
actions = [RecvSyn(), SendSynAckCheckCookie(converter.build(), cookie), Wait(1), SendPkt(flags='R')]
MockConverterCookieError()
def validate(self):
self.assert_log_contains("received TLV error: 3")
TestCookieError()
| [
"pol.nicolai@tessares.net"
] | pol.nicolai@tessares.net |
074bcf9c2af5b96e81d964e97fe897ad73bd83d7 | 295efbd5b466d246ff51c3849cea5ff637df6211 | /read102715.py | ce7e0b76309b8309d84155630c77a7f0b1ef61a1 | [] | no_license | allenjianzhe/mip_heuristic | 75b15ce9d9735fdc0f5381bfef8cded4d5348a12 | 1365b63b2b3a3814b271e3bb95fb6671486e84fc | refs/heads/master | 2020-09-26T08:41:57.244645 | 2016-10-12T22:43:09 | 2016-10-12T22:43:09 | 66,394,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | import csv
import sys
import math
def getArcC(nodes, modes, departure):
arc_C={}
for i in nodes:
if i != 4:
for k in modes:
for s in departure:
arc_C[i,1,s]=200
arc_C[i,2,s]=200
arc_C[i,3,s]=200
arc_C[i,4,s]=200
return arc_C
def getDistance():
Distance ={}
Distance[1]=413
Distance[2]=485
Distance[3]=695
Distance[4]=741
return Distance
def getTransTime(nodes,modes,Distance):
trans_time={}
for i in nodes:
if i==4:
continue
else:
for k in modes:
trans_time[i,1]=round(Distance[i]/60.0)
trans_time[i,2]=round(Distance[i]/100.0)
trans_time[i,3]=round(Distance[i]/250.0)
trans_time[i,4]=round(Distance[i]/500.0)
return trans_time
def getArcTransCost(customer,nodes,modes,Distance):
arc_trans_cost={}
for row in customer:
for i in nodes:
if i==4:
continue
else:
for k in modes:
arc_trans_cost[int(row[0]),i,1]=300*int(row[2])*Distance[i]/1000.0
arc_trans_cost[int(row[0]),i,2]=500*int(row[2])*Distance[i]/1000.0
arc_trans_cost[int(row[0]),i,3]=1500*int(row[2])*Distance[i]/1000.0
arc_trans_cost[int(row[0]),i,4]=2000*int(row[2])*Distance[i]/1000.0
return arc_trans_cost
def getdT():
dT={}
dT[1,1,1]=1
dT[1,1,2]=2
dT[1,2,1]=1
dT[1,2,2]=2
dT[1,3,1]=1
dT[1,3,2]=3
dT[1,4,1]=1
dT[1,4,2]=3
dT[2,1,1]=9
dT[2,1,2]=10
dT[2,2,1]=9
dT[2,2,2]=10
dT[2,3,1]=5
dT[2,3,2]=7
dT[2,4,1]=4
dT[2,4,2]=6
dT[3,1,1]=18
dT[3,1,2]=20
dT[3,2,1]=15
dT[3,2,2]=20
dT[3,3,1]=9
dT[3,3,2]=11
dT[3,4,1]=8
dT[3,4,2]=10
return dT
o=open(r'C:\Users\MacBook Air\Desktop\my research\cus_5.csv','r')
reader= csv.reader(o)
o.seek(0)
customer=[]
DD={}
for row in reader:
customer.append(row)
for row in customer:
DD[int(row[0])]=int(row[3])
| [
"allenjianzhe@yahoo.com"
] | allenjianzhe@yahoo.com |
cefd3787c7ef134f91fb967caf03cf84fb516757 | f9d6b975ba48ce4c390e9b6713e5f0080ba42231 | /django_app/settings.py | cde0895d281c0f0a1b740205b2d4db18505e04c6 | [] | no_license | nattesharan/django_app | fae16a6d4aba447bfe7bfc34131276c8d6cebfdf | b25041efd86bcc9b803be937ac23e575cacf6c5e | refs/heads/master | 2022-12-03T01:50:20.117259 | 2020-08-16T18:46:39 | 2020-08-16T18:46:39 | 175,052,229 | 4 | 2 | null | 2022-11-22T03:45:15 | 2019-03-11T17:38:46 | Python | UTF-8 | Python | false | false | 8,142 | py | """
Django settings for django_app project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import structlog
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '173(5poa^0-c0e2u6y&9ztf&7f3^nbkv3+an1l%#5jhejtaqic'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'haystack',
'accounts',
'home',
'api',
'extra_topics',
'rest_framework_swagger',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'rest_framework.authtoken',
'django_extensions',
'django_filters',
'django_celery_beat',
'django_celery_results',
'graphene_django'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django_app.middleware.LoginRequiredMiddleware'
]
ROOT_URLCONF = 'django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_app',
'HOST': 'db' if 'USING_DOCKER' in os.environ else '127.0.0.1',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'Up123pU'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'django_app/media')
LOGIN_URL = '/account/login/'
LOGIN_REDIRECT_URL = '/home/'
LOGIN_EXEMPT_URLS = (
r'^pubsub/',
r'^account/register/$',
r'^account/reset-password/$',
r'^account/reset-password/done/$',
r'^account/reset-password/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
r'^account/reset-password/complete/$',
r'^home/searchview/$'
)
# EMAIL_USE_TLS = True
# EMAIL_HOST = 'smtp.gmail.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'example@gmail.com'
# EMAIL_HOST_PASSWORD = 'app_password'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter':'standard'
},
'logstash': {
'class': 'logstash.TCPLogstashHandler',
'host': '35.240.149.42',
'level': 'DEBUG',
'port': 5959, # Default value: 5959
'version': 1, # Version of logstash event schema. Default value: 0 (for backward compatibility of the library)
'message_type': 'logstash', # 'type' field in logstash message. Default value: 'logstash'.
'fqdn': False, # Fully qualified domain name. Default value: false.
'tags': ['app']
}
},
'loggers': {
'': {
'handlers': ['console', 'logstash'],
'level': 'DEBUG',
'propagate': True,
},
},
}
def show_toolbar(request):
return DEBUG
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar
}
# django rest framework by default considers SessionAuthentication and session auth expects csrf token so change
# the default auth class
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
)
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_app.backends.EmailAuthBackend',
)
# for more info
# settings for jwt
# SIMPLE_JWT = {
# 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
# 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
# 'ROTATE_REFRESH_TOKENS': False,
# 'BLACKLIST_AFTER_ROTATION': True,
# 'ALGORITHM': 'HS256',
# 'SIGNING_KEY': settings.SECRET_KEY,
# 'VERIFYING_KEY': None,
# 'AUTH_HEADER_TYPES': ('Bearer',),
# 'USER_ID_FIELD': 'id',
# 'USER_ID_CLAIM': 'user_id',
# 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
# 'TOKEN_TYPE_CLAIM': 'token_type',
# 'JTI_CLAIM': 'jti',
# 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
# 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
# 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
# }
# Pub sub settings
GOOGLE_APPLICATION_CREDENTIALS=os.path.join(BASE_DIR, 'django_app/key.json')
PROJECT = 'personal-243717'
TOPIC = 'hello_topic'
SUBSCRIBER_1='subscription1'
SUBSCRIBER_2='subscription2'
# CELERY STUFF
CELERY_BROKER_URL = 'redis://34.93.180.96:6379'
CELERY_RESULT_BACKEND = 'django-db'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
#HAYSTACK SETTINGS
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine',
'URL': 'http://35.240.149.42:9200/',
'INDEX_NAME': 'haystack',
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# Graphene settings
GRAPHENE = {
'SCHEMA': 'django_app.schema.schema'
}
| [
"nattesharan@gmail.com"
] | nattesharan@gmail.com |
ae86bd205c7f858831151f1dffba292ecf6e87d1 | fceef863fe5ba229f01162c3fce7ba96f277fccf | /azure.py | d2e43af257fce03150872188b96c566f02096211 | [] | no_license | SureshBolla/Suresh-Dummyrepo | 13570c927c6d792104249f52fe0a69cdf2680564 | 33a73b8befb177fde7e9ea521c22f9835a7089eb | refs/heads/master | 2023-06-08T13:05:44.721077 | 2021-07-03T06:52:38 | 2021-07-03T06:52:38 | 359,087,343 | 0 | 0 | null | 2021-04-21T04:18:43 | 2021-04-18T08:31:39 | Groovy | UTF-8 | Python | false | false | 32 | py | azure.py azuremigration.groovy
| [
"sureshbolla4@gmail.com"
] | sureshbolla4@gmail.com |
52916b19582fc73b317267448b682e7796f91584 | 8a3dfe493abbddf222cc98efc61ba6b9917f1601 | /blender_implementations/perlin_ps_sca_decentralised/sca_brancher.py | 2fdd2679908a4a1785bd69bd774fdb6bc18cf7c0 | [] | no_license | lorentzo/Growth_models | fa95f80547a545374ab0fb738c5fddff9d5b4d19 | fbc4cab2fd4d5cdeade2492407a80fdc5d585a51 | refs/heads/master | 2020-04-28T22:41:07.006889 | 2019-07-04T11:20:33 | 2019-07-04T11:20:33 | 175,626,424 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,944 | py |
#
# This source file is part of Growth_models.
# Visit https://github.com/lorentzo/Growth_models for more information.
#
# This software is released under MIT licence.
#
# Copyright (c) Lovro Bosnar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# -*- coding: utf-8 -*-
""" Circular branching of SCA model.
This module uses the sca module and creates the circular growth.
Contains:
SCACircleBrancher class
"""
#Project specific imports.
from sca import SCA
# Blender imports
import bpy
import bmesh
# Standard imports.
import numpy as np
import os
class SCACircleBrancher:
""" Defines the circular growth of SCA model.
Attributes:
center (np.array): center of circular SCA.
n_sca_trees (int): number of SCA trees in circular SCA.
root_circle_radius (float): radius of circle where roots will be.
leaf_center_radius (float): radius of circle where leaves centers will be.
leaves_spread (np.array): spread of leaves in x,y,z direction.
n_leaves (int): number of leaves in every SCA tree.
branch_thickness_max (float): max thickness of a branch.
bevel_radius_delta (float): increase of thickness in every iteration.
name (string): name of circular SCA object.
color (np.array): color of SCA circular object.
Methods:
__init__()
initialize_sca_forest()
emerge_sca_volume()
"""
def __init__(self,
center,
n_sca_trees,
root_circle_radius,
leaf_center_radius,
leaves_spread,
n_leaves,
branch_thickness_max,
bevel_radius_delta,
name,
color):
# User defined.
self.center = center
self.n_sca_trees = n_sca_trees
self.root_circle_radius = root_circle_radius
self.leaf_center_radius = leaf_center_radius
self.leaves_spread = leaves_spread
self.n_leaves = n_leaves
self.name = name
self.color = color
# Additional.
self.sca_forest = []
self.bevel_radius = 0
self.bevel_radius_delta = bevel_radius_delta
self.bevel_radius_max = branch_thickness_max
self.bevel_object = None
def initialize_sca_forest(self, scene):
""" Confiure and grow the set of the SCA objects.
Args:
scene (Blender scene object): scene where SCA objects will be placed.
Yields:
list of Blender curve objects.
"""
segment = 2 * np.pi / self.n_sca_trees
# Create bevel object for volume (ini: 0 volume).
bpy.ops.curve.primitive_nurbs_circle_add()
bpy.context.object.scale = (0,0,0)
self.bevel_object = bpy.context.object
# For every SCA model.
for n in range(self.n_sca_trees):
# Configure SCA root position.
xr = self.center[0] + np.cos(segment * n) * self.root_circle_radius
yr = self.center[1] + np.sin(segment * n) * self.root_circle_radius
zr = self.center[2] + 0
# Configure SCA leaf center position.
xl = self.center[0] + np.cos(segment * n) * self.leaf_center_radius
yl = self.center[1] + np.sin(segment * n) * self.leaf_center_radius
zl = self.center[2] + 0
# Configure SCA and grow.
sca = SCA(root_position=[xr,yr,zr],
leaves_cloud_center=[xl, yl, zl],
leaves_spread=self.leaves_spread,
n_leaves=self.n_leaves)
sca.grow()
# Create mesh.
bm = bmesh.new()
for branch in sca.branches:
if branch.parent == None:
continue
v1 = bm.verts.new(branch.position)
v2 = bm.verts.new(branch.parent.position)
interpolated = self.__interpolate_nodes(v1, v2, 4, 0.5, bm)
for i in range(len(interpolated)-1):
bm.edges.new((interpolated[i], interpolated[i+1]))
# Add a new mesh data.
sca_data = bpy.data.meshes.new(self.name+str(n)+"_data")
# Add a new empty mesh object using the mesh data.
sca_object = bpy.data.objects.new(self.name+str(n)+"_object", sca_data)
# Make the bmesh the object's mesh.
# Transfer bmesh data do mesh data which is connected to empty mesh object.
bm.to_mesh(sca_data)
bm.free()
# Add sca object to scene, convert to curve, add bevel.
scene.objects.link(sca_object)
sca_object.select = True
bpy.context.scene.objects.active = sca_object
bpy.ops.object.convert(target='CURVE')
sca_object.data.bevel_object = self.bevel_object
# Add color.
material = bpy.data.materials.new(self.name+str(n)+"_material")
material.diffuse_color = self.color
sca_object.active_material = material
# Store
self.sca_forest.append(sca_object)
def __interpolate_nodes(self, v1, v2, n_nodes, rand_amplitude, bm):
""" Interpolates nodes between two existing nodes.
Given the two existing nodes, interpolate additional nodes with a small
amout of noise.
Args:
v1 (Blender vertex object): first node
v2 (Blender vertex object): second node
n_nodes (int): number of nodes to interpolate
rand_amlitude (float): noise amiplitude
bm (Blender bmesh object): container for created nodes
Yields:
list of Blender vertex objects: interpolated nodes
"""
helper_nodes = []
for t in range(n_nodes+1):
# Interpolate.
x = (1 - t / n_nodes) * v1.co[0] + (t / n_nodes) * v2.co[0]
y = (1 - t / n_nodes) * v1.co[1] + (t / n_nodes) * v2.co[1]
z = (1 - t / n_nodes) * v1.co[2] + (t / n_nodes) * v2.co[2]
# Add random noise.
x += np.random.rand() * rand_amplitude
y += np.random.rand() * rand_amplitude
#z += np.random.rand() * rand_amplitude
helper_nodes.append(bm.verts.new([x,y,z]))
return helper_nodes
def emerge_sca_volume(self):
""" Increases the radius of bevel object.
Increasing the radius of bevel object results in increasing the
branch radius.
"""
new_radius = self.bevel_object.scale[0] + self.bevel_radius_delta
if new_radius < self.bevel_radius_max:
self.bevel_object.scale = (new_radius, new_radius, new_radius)
return False # not finished
else:
return True # finished
| [
"lovro.bosnar1@gmail.com"
] | lovro.bosnar1@gmail.com |
df19e899be82303369d712875194a24a32db8150 | b9d7ef5f2985fa4c545af5f537d703c471602c42 | /apps/trade/migrations/0008_auto_20180122_1013.py | 48f780d86708f52d89a162e1492cd600ddda34be | [] | no_license | ZhangXiaoD/shop | 226fc3f37743c25f09cbb1879259b22014050622 | 35a0ec14f1fa35d019716b782db270f0e44efc05 | refs/heads/master | 2021-09-06T01:08:03.760340 | 2018-02-01T06:28:11 | 2018-02-01T06:28:11 | 111,665,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-22 10:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trade', '0007_auto_20180122_0952'),
]
operations = [
migrations.AlterField(
model_name='ordergoods',
name='goods',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品'),
),
migrations.AlterField(
model_name='ordergoods',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='goods', to='trade.OrderInfo', verbose_name='订单'),
),
]
| [
"156678702@qq.com"
] | 156678702@qq.com |
b26f245123a85aed99869e51155650695eca1b5e | 21af59d1213528111b78c44dd3ac0be68d9e8880 | /day12-20/tuple.py | 815135877f08283e86c02f03a0d09a05dfaa4121 | [] | no_license | luchaoet/python-test | 68caf2bcf02d20ad99463a9d869e44f5ccc55362 | ceb60c8232027f8100ff8da625f6d8c07d756936 | refs/heads/master | 2022-12-17T22:18:31.181347 | 2020-09-23T09:13:37 | 2020-09-23T09:13:37 | 113,461,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | # tuple 将序列转化为元组
a = tuple([1,2,3,4])
b = tuple('abcd')
print(a,b)
# 元组基本操作
c = (1,2,3,5)
print(c[2])
print(c[1:3]) | [
"550502661@qq.com"
] | 550502661@qq.com |
ccec286398e46c7aabb41c1a2f32a9eb00292d55 | 76f43cf1276f96ba963d9d11b1a047bfef46bb86 | /code-guild/demo/blog/migrations/0008_remove_blogpost_comments.py | e9860f1e2e04972786b2323729b02610df869046 | [] | no_license | grantholly/code-guild | 1192e14380d7fcbb7b36f72f3eb5c898918b1982 | 85d5e6c2eefdcd458165cadaa087acd0a0744378 | refs/heads/master | 2021-01-10T22:04:08.339812 | 2015-08-25T05:18:09 | 2015-08-25T05:18:09 | 23,163,257 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_merge'),
]
operations = [
migrations.RemoveField(
model_name='blogpost',
name='comments',
),
]
| [
"gdholly@gmail.com"
] | gdholly@gmail.com |
4af322a8843cb332539f410c45a104449c904e40 | 43e480685c9b872d1d68564d89e4befabbccaf3b | /client_src/security.py | dfdfb590601c5f30b8534cd2556f304921aeb54d | [
"MIT"
] | permissive | davendiy/QWERTY_messenger | d15d55affe6cfab9b8cd9e57eb551b71cbaf7700 | 6bfa5a6ceb7b63f3e57d3d7779a1cda26cd55616 | refs/heads/master | 2020-09-16T15:53:07.400698 | 2019-12-11T19:36:31 | 2019-12-11T19:36:31 | 223,820,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 02.12.2019
# by David Zashkolny
# 3 course, comp math
# Taras Shevchenko National University of Kyiv
# email: davendiy@gmail.com
from Crypto.PublicKey import RSA
from Crypto.Signature.pkcs1_15 import PKCS115_SigScheme
from Crypto.Hash import SHA256
from Crypto.Random import get_random_bytes
from .constants import RSA_PUBLIC_KEY_PATH
RSA_KEY = None
RSA_SIGNER = None
class VerificationError(Exception):
pass
def read_keys():
""" Read generated keys from files.
Calls when the server starts to work.
Changes the global variables RSA_KEY, RSA_SIGNER
"""
global RSA_KEY, RSA_SIGNER
with open(RSA_PUBLIC_KEY_PATH, 'rb') as file:
RSA_KEY = RSA.import_key(file.read())
RSA_SIGNER = PKCS115_SigScheme(RSA_KEY)
# noinspection PyTypeChecker
def verify_control_message(message: bytes, signature: bytes):
""" Verify whether the message is really from the server.
:param message: any data of any length
:param signature: array of 256 bytes
:raises ValueError: if the signature is wrong.
"""
tmp = SHA256.new(message)
try:
RSA_SIGNER.verify(tmp, signature)
except ValueError:
raise VerificationError()
def generate_control_message():
return get_random_bytes(2048)
# prepare the global variables
if RSA_KEY is None:
read_keys()
| [
"davendiy@gmail.com"
] | davendiy@gmail.com |
1f1ffd5eb27b1397ebb012d6a0a8b8c4621d6bd3 | e9b93ac365d88366d3e3ebf39214173e6457daa2 | /analizadorDeRegistros.py | ee9c1dda73d38723d897a307e005a4b04ecb2592 | [] | no_license | adevesa/stevetricks | 57a60279cb8cd36620d5d705b9d97f31b7444888 | ccc7823d47e531d20a7a8c586e25dc081535b0e2 | refs/heads/master | 2020-03-11T16:11:08.182579 | 2018-10-08T01:36:28 | 2018-10-08T01:36:28 | 130,108,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | ''' Analizador de registros vía archivo
Dado un path de un archivo con multiples registros, los analiza y devuelve un string con:
el tamaño de sus requests y responses con su codigo de ResponseEnd y la cantidad de registros
ejemplo:
STRING = analisis_de_registros("C:\\Users\\adevesa\\Documents\\Certius Info\\HSBC\\Info\\TXs\\regs")
'''
def tamanio_payload_request(Paquete):
spliteado = Paquete.split(";")
ultimo = len(spliteado) - 2
if len(spliteado[ultimo]) == 55: print(Paquete)
return len(spliteado[ultimo])
def tamanio_payload_response(Paquete):
spliteado = Paquete.split(";")
ultimo = len(spliteado) - 1
tipoResp = spliteado[3]
return (tipoResp,len(spliteado[ultimo]))
def nombres_operaciones(Paquete):
spliteado = Paquete.split(";")
return (spliteado[4],spliteado[2])
def comparador(uno,otro):
if uno > otro: return 1
if uno < otro: return -1
if uno == otro: return 0
def analisis_de_registros(Ruta):
## ruta = "C:\\Users\\adevesa\\Documents\\Certius Info\\HSBC\\Info\\TXs\\regs"
ruta = Ruta
registros = open(ruta, 'r')
Lregistros = registros.readlines()
ReqValores = []
ResValores = []
for reg in Lregistros:
ReqValores.append(tamanio_payload_request(reg))
ResValores.append(tamanio_payload_response(reg))
ReqValores = set(ReqValores)
ResValores = set(ResValores)
# Lvalores = sorted(Lvalores,key=lambda x: int(x[1]))
resultado = ""
resultado = resultado + "Cantidad de registros: " + str(len(Lregistros)) + "\n"
resultado = resultado + "Tamanio Payloads Requests: " + str(ReqValores) + "\n"
resultado = resultado + "Tamanio Payloads Responses: " + str(ResValores)
registros.close()
return resultado
| [
"noreply@github.com"
] | adevesa.noreply@github.com |
1109e548b2e556ab75ee86333919fff894bc4a80 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/scene/annotation/_visible.py | b2c5d8326b717c6c3200d02290e1474c15b5ad94 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 427 | py | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.scene.annotation", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.