blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53d39e3c02a0b7669c421ea057be09d75910efd8
|
2d0a363ff1b4f25eee426240c7daa4ecb8624ae2
|
/worker/worker.py
|
fbed5b9d11a2a771edbac2224ab5e494215ea448
|
[
"MIT"
] |
permissive
|
JoaoOliveiracc/email-workers
|
b0e03d2b254e7048740c3d7e8671a986db5db54b
|
01056a49fc3cb07f72229aa3302a69dbf3ce6b4b
|
refs/heads/main
| 2023-07-10T23:48:01.527716
| 2021-08-12T18:40:40
| 2021-08-12T18:40:40
| 394,717,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
import redis
import json
import os
from time import sleep
from random import randint
if __name__ == '__main__':
redis_host = os.getenv('REDIS_HOST', 'queue')
r = redis.Redis(host=redis_host, port=6379, db=0)
print('Aguardando mensagens ...')
while True:
mensagem = json.loads(r.blpop('sender')[1])
print('Mandando a mensagem:', mensagem['assunto'])
sleep(randint(15, 45))
print('Mensagem', mensagem['assunto'], 'enviada')
|
[
"ccjoao_monteiro@outlook.com"
] |
ccjoao_monteiro@outlook.com
|
c2bf56a3ceec33b4a9d53a9de7325c9bd31e5b8f
|
4add63c577afc3a59868270638c5eb0b9e0f06d3
|
/tonyai.py
|
04dda794f9e40d293f718fbfde69e2f3ae5514fa
|
[] |
no_license
|
iamkaushalkb/speech-ai.py
|
81bc60cf5ffd6b04176a6ebd2b374057890dbbed
|
107c602a76c50c67dc8b12e598362c1402658089
|
refs/heads/main
| 2023-04-19T14:01:45.842636
| 2021-05-19T14:28:34
| 2021-05-19T14:28:34
| 368,897,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,748
|
py
|
# importing modules
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
# speech_voice
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# running audio
def speak(audio):
engine.say(audio)
engine.runAndWait()
# Wishing User According to the time
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Tony Sir. Please tell me how may I help you")
# Creating Function to take command
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-us')
print(f"User said: {query}\n")
except Exception as e:
print("Say that again please...")
return "None"
return query
# Creating Function for sending email
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('youremail@gmail.com', 'your-password')
server.sendmail('youremail@gmail.com', to, content)
server.close()
if __name__ == "__main__":
wishMe()
while True:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'open pictures' in query:
pic_dir = 'C://Users//uSer//Pictures'
pic = os.listdir(pic_dir)
print(pic)
os.startfile(os.path.join(pic_dir, pic[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M")
speak(f"Sir, the time is {strTime}")
|
[
"noreply@github.com"
] |
iamkaushalkb.noreply@github.com
|
326ed5f20e169492c82dbb1a6a2055189f6db873
|
c7b303929bd02fbc750e34eafe918ee1962ea0b4
|
/offline/signal_processing/preprocesssing_.py
|
554d25b1d5a55283a2faad2b993c739b61ec91f0
|
[] |
no_license
|
io0/Milo
|
9d5fa9318748a2959e737694979619893881a4ad
|
e58b17138c88975b9a631226b09ad5dcecbdb99c
|
refs/heads/master
| 2022-12-10T01:07:19.547660
| 2019-04-28T22:29:34
| 2019-04-28T22:29:34
| 183,957,667
| 3
| 1
| null | 2022-12-09T21:41:42
| 2019-04-28T20:52:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 9,126
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 09:21:25 2019
@author: jenisha
"""
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.signal import freqz
from scipy.signal import butter, lfilter
class Preprocessing():
def __init__(self, path):
self.path = path
self.sample_rate = 250
def load_data_BCI(self, list_channels):
"""
Load the data from OpenBCI txt file
and extract some parameters such as
- number of channels
- list of channels
- raw data
- timestamp
Input:
channel: lists of channels to use
"""
self.list_channels = list_channels
self.number_channels = len(list_channels)
# load raw data into numpy array
self.raw_eeg_data = np.loadtxt(self.path,
delimiter=',',
skiprows=7,
usecols=list_channels)
if self.number_channels == 1:
self.raw_eeg_data = np.expand_dims(self.raw_eeg_data,
axis=1)
# extract time stamps
self.time_stamps = pd.read_csv(self.path,
delimiter=',',
skiprows=7,
usecols=[12],
header = None)
def initial_filtering(self, hp_cutoff_Hz = 1.0, notch_freq_Hz = [60.0, 120.0],
order_high_pass=2, order_notch =3):
"""
Filters the data by channel to remove DC components
and line noise
Input:
hp_cutoff_Hz: cutoff frequency for highpass filter
notch_freq_Hz: cutoff frequencies for notch fitltering
order_high_pass: order of highpass filter
order_notch: order of notch filter
"""
# filter the data to remove DC
self.nyq = 0.5 * self.sample_rate
b_high, a_high = butter(order_high_pass, hp_cutoff_Hz/self.nyq, 'highpass')
self.filtered_eeg_data_dc = np.apply_along_axis(lambda l: lfilter(b_high, a_high ,l),0,
self.raw_eeg_data)
# notch filter the data to remove line noise
self.filtered_eeg_data_notch = self.filtered_eeg_data_dc
for freq_Hz in notch_freq_Hz:
bp_stop_Hz = freq_Hz + float(order_notch)*np.array([-1, 1]) # set the stop band
b_notch, a_notch = butter(order_notch, bp_stop_Hz/self.nyq , 'bandstop')
self.filtered_eeg_data_notch = np.apply_along_axis(lambda l: lfilter(b_notch, a_notch,l),0,
self.filtered_eeg_data_notch)
def convert_to_freq_domain(self, NFFT = 500, FFTstep = 125):
"""
Do a FFT of each channels data
Input:
- NFFT: The number of data points used in each block
- FFTstep: Length of the signal you want to calculate the Fourier transform of.
"""
#Todo rewrite this so its not the same thing twice
self.FFTstep = FFTstep # do a new FFT every FFTstep data points
self.overlap = NFFT - FFTstep # half-second steps
self.raw_spec_PSDperHz,self.raw_spec_PSDperBin, self.raw_freqs, self.raw_t_spec= [], [], [], []
for filtered in self.raw_eeg_data.T:
spec_PSDperHz, freqs, t_spec = mlab.specgram(
np.squeeze(filtered),
NFFT=NFFT,
window=mlab.window_hanning,
Fs=self.sample_rate,
noverlap=self.overlap
)
spec_PSDperBin = spec_PSDperHz * self.sample_rate / float(NFFT) # convert to "per bin"
self.raw_spec_PSDperHz.append(spec_PSDperHz)
self.raw_spec_PSDperBin.append(spec_PSDperBin)
self.raw_freqs.append(freqs)
self.raw_t_spec.append(t_spec)
self.spec_PSDperHz,self.spec_PSDperBin , self.freqs, self.t_spec= [], [], [], []
if self.number_channels == 1:
self.filtered_eeg_data_notch = np.expand_dims(self.filtered_eeg_data_notch,
axis=1)
for filtered in self.filtered_eeg_data_notch.T:
spec_PSDperHz, freqs, t_spec = mlab.specgram(
np.squeeze(filtered),
NFFT=NFFT,
window=mlab.window_hanning,
Fs=self.sample_rate,
noverlap=self.overlap
)
spec_PSDperBin = spec_PSDperHz * self.sample_rate / float(NFFT) # convert to "per bin"
self.spec_PSDperHz.append(spec_PSDperHz)
self.spec_PSDperBin.append(spec_PSDperBin)
self.freqs.append(freqs)
self.t_spec.append(t_spec)
def plots(self, channel=0):
"""
Plot the raw and filtered data of a channel as well as their spectrograms
Input:
- channel: channel whose data is to plot
"""
fig = plt.figure()
t_sec = np.array(range(0, self.filtered_eeg_data_notch.size)) / self.sample_rate
print(self.raw_eeg_data.T.size)
ax1 = plt.subplot(221)
plt.plot(t_sec, self.raw_eeg_data[:,channel])
plt.ylabel('EEG (uV)')
plt.xlabel('Time (sec)')
plt.title('Raw')
#plt.xlim(t_sec[0], t_sec[-1])
ax2 = plt.subplot(222)
plt.pcolor(self.raw_t_spec[channel], self.raw_freqs[channel],
10*np.log10(self.raw_spec_PSDperBin[channel]))
plt.clim(25-5+np.array([-40, 0]))
plt.xlim(t_sec[0], t_sec[-1])
#plt.ylim([0, self.freqs/2.0]) # show the full frequency content of the signal
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.title('Spectogram of Unfiltered')
ax3 = plt.subplot(223)
plt.plot(t_sec, self.filtered_eeg_data_notch[:,channel])
plt.ylim(-100, 100)
plt.ylabel('EEG (uV)')
plt.xlabel('Time (sec)')
plt.title('Filtered')
plt.xlim(t_sec[0], t_sec[-1])
ax4 = plt.subplot(224)
plt.pcolor(self.t_spec[channel], self.freqs[channel],
10*np.log10(self.spec_PSDperBin[channel]))
plt.clim(25-5+np.array([-40, 0]))
plt.xlim(t_sec[0], t_sec[-1])
#plt.ylim([0, 20]) # show the full frequency content of the signal
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.title('Spectogram of Filtered')
plt.tight_layout()
plt.show()
def band_pass_filtering(self, lowcut, highcut, order=5):
"""
Filters the data using a bandpass filter
Input:
lowcut: lower cutoff frequency
highcut: higher cutoff frequency
order:
"""
self.low = lowcut / self.nyq
self.high = highcut / self.nyq
b_bandpass, a_bandpass = butter(order, [self.low, self.high], btype='band')
self.y = lfilter(b, a,self.filtered_eeg_data_notch)
#fname_ec = '/Users/jenisha/Desktop/NeuroTech-Workshop-Demo/EyesClosedNTXDemo.txt'
#fname_eo = '/Users/jenisha/Desktop/NeuroTech-Workshop-Demo/EyesOpenedNTXDemo.txt'
#test = Preprocessing(fname_ec)
#test.load_data_BCI([1])
#test.initial_filtering()
#test.convert_to_freq_domain()
#test.plots()
#
#test2 = Preprocessing(fname_eo)
#test2.load_data_BCI([1])
#test2.initial_filtering()
#test2.convert_to_freq_domain()
#test2.plots()
fname_20 = '/Users/jenisha/Desktop/NeuroTechX-McGill-2019/offline/data/March_4/6_SUCCESS_Rest_RightClench_JawClench_ImagineClench_10secs.txt'
test3 = Preprocessing(fname_20)
test3.load_data_BCI([1])
test3.initial_filtering()
test3.convert_to_freq_domain()
test3.plots()
|
[
"marleyxiong0@gmail.com"
] |
marleyxiong0@gmail.com
|
4b77666f51cdd6605d73087ff62fc22b273bc31e
|
0da0173a046bc8f2ea67e553b2e4cf52619ae8b6
|
/puq/adaptive.py
|
cdce1f14264c94239b9692dafc4b84b69b293067
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
dalg24/puq
|
7cb6e9ba487ad867a9ce1a5c9b1bc7986aedfd7b
|
ea547cd80205f65d6227049868153b6ca154334b
|
refs/heads/master
| 2020-12-26T02:32:08.149124
| 2016-06-29T06:41:28
| 2016-06-29T06:41:28
| 64,855,996
| 0
| 0
| null | 2016-08-03T15:13:17
| 2016-08-03T15:13:17
| null |
UTF-8
|
Python
| false
| false
| 15,541
|
py
|
"""
h-Adaptive Stochastic Collocation
"""
import numpy as np
from puq.hdf import get_result, get_params, get_param_names
from puq.options import options
from puq.psweep import APSweep
from adap import uqsolver
from logging import info, debug, exception, warning, critical
import h5py, sys
from puq.util import process_data
from puq.pdf import PDF
import matplotlib
from puq.meshgridn import meshgridn
from puq.response import SampledFunc
class AdapStocColl(APSweep):
"""
Class implementing h-Adaptive Stochastic Collocation.
- **params** : Input list of :class:`Parameter`\s
- **tol** : Tolerance. Try 0.1 first, then decrease if further\
accuracy is needed.
- **max_iterations** : Maximum number of iterations to perform.\
The method will loop, performaning additional calculations and\
refining its results until either the specified tolerance is met,\
or the number of iterations is *max_iterations*. Default\
is None.
- **level** : Interpolation level. Default is 2
- **sel** : Dimensional Selectivity. Default is 0.5.
- **callback** : Optional function that is called every iteration.
"""
def __init__(self, params, tol, max_iterations=None, level=2, sel=0.5, callback=None):
APSweep.__init__(self)
self.params = params
self.level = level
self.tol = tol
self.sel = sel
self.max_iter = max_iterations
self._callback = callback
self._uqsolver = uqsolver(params, level, tol, sel)
def reinit(self):
print "REINIT %s %s %s %s" % (self.params, self.level, self.tol, self.sel)
APSweep.reinit(self)
self._callback = None # FIXME
self._uqsolver = uqsolver(self.params, self.level, self.tol, self.sel)
for p in self.params:
del p.values
return True
def extend(self, h5, args):
from optparse import OptionParser
debug(args)
usage = "Usage: sweep extend [keyword args] hdf5_filename.\n"
parser = OptionParser(usage)
parser.add_option("--tol", type='float', default = self.tol)
parser.add_option("--max_iter", type='int', default = self.max_iter)
(opt, ar) = parser.parse_args(args=list(args))
if opt.tol > self.tol:
print "Error: Previous tolerance was %s. You cannot" % self.tol
print "increase the tolerance."
sys.exit(1)
if opt.max_iter == self.max_iter and opt.tol == self.tol:
print "Error: Tolerance and Iterations are unchanged."
print "Nothing to do here."
sys.exit(0)
if opt.max_iter and self.max_iter and opt.max_iter < self.max_iter \
and opt.tol == self.tol:
print "Error: Previous iterations was %s. You cannot" % self.iter_max
print "decrease the iterations."
sys.exit(1)
if opt.tol != self.tol:
print "Changing tol from %s to %s" % (self.tol, opt.tol)
if opt.max_iter != self.max_iter:
print "Changing max_iter from %s to %s" % (self.max_iter, opt.max_iter)
self.tol = opt.tol
self.max_iter = opt.max_iter
self._sweep._reinit = True
self.reinit()
# Remove old results
try:
del h5['output/data']
except:
pass
self._sweep.host.reinit()
# Returns a list of name,value tuples
# For example, [('t', 1.0), ('freq', 133862.0)]
def get_args(self):
par = self._uqsolver.iadaptiveparams()
plist = par.tolist()
if plist == []:
return
for i, p in enumerate(self.params):
pcol = par[:,i]
try:
p.values.append(pcol)
except AttributeError:
p.values = [pcol]
for row in plist:
yield zip([p.name for p in self.params], row)
def analyze(self, hf):
process_data(hf, 'AdapStocColl', self._do_pdf)
def iteration_cb(self, sw, iter):
"""
Callback for each iteration. The sweep method calls this for
every iteration. This method then calls its registered callback.
"""
z = sw.get_result(iteration=iter)
# fixme: z must be floats
m,v,e = self._uqsolver.doiadaptive(z)
"""
put mean, var, std, err, pdf in /AdapStocColl
These will be indexed for each iteration, so
/AdapStocColl/mean/1 will be the mean after iteration 1.
"""
hf = h5py.File(sw._fname)
try:
hf['/AdapStocColl/mean/%d' % iter] = m
hf['/AdapStocColl/variance/%d' % iter] = v
hf['/AdapStocColl/std/%d' % iter] = np.sqrt(v)
hf['/AdapStocColl/error/%d' % iter] = e
except:
pass
# Call the callback, if defined
if self._callback:
finished = self._callback(iter, hf, z, m, v, e)
else:
finished = False
if iter == 0:
print "Iter mean var dev errind points cached"
print "%d: %.4e %.4e %.4e %.4e %5d %5d" \
% (iter, m, v, np.sqrt(v), e, self._num_jobs, self._num_jobs_cached)
hf.close()
if self.max_iter and iter >= self.max_iter:
finished = True
return finished
# plot types:
# surface - resampled using interpolate()
# scatter - all points
# scatter - for each iteration
def plot_response(self, h5, ivars=''):
fmt = options['plot']['format']
if fmt == 'png' or fmt == 'i':
load = options['plot']['iformat']
else:
load = fmt
matplotlib.use(load, warn=False)
import matplotlib.pyplot as plt
if ivars:
num_params = len(ivars)
else:
ivars = get_param_names(h5)
num_params = len(ivars)
if num_params > 2:
print "Error: Cannot plot in more than three dimensions."
print "Use '-v' to select a subset of input parameters."
raise ValueError
if num_params > 1:
self.scatter3(h5, ivars)
self.scatter3(h5, ivars, iteration='sum')
else:
self.scatter2(h5, ivars[0])
self.scatter2(h5, ivars[0], iteration='sum')
if fmt == 'i':
try:
plt.show()
except KeyboardInterrupt :
pass
def _do_pdf(self, hf, data):
num = 10000
params = get_params(hf['/'])
ndims = len(params)
pts = np.empty((num, ndims + 1))
for i, p in enumerate(params):
pts[:,i] = p.pdf.ds(num)
self._uqsolver.interpolate(pts)
rs = self.response_func()
last_iter = self.iteration_num-1
mean = hf['/AdapStocColl/mean/%d' % last_iter].value
var = hf['/AdapStocColl/variance/%d' % last_iter].value
std = hf['/AdapStocColl/std/%d' % last_iter].value
error = hf['/AdapStocColl/error/%d' % last_iter].value
return [('sampled_pdf', pts[:,-1]),
('mean', mean),
('dev', std),
('var', var),
('error', error),
('response_func', rs)]
def response_func(self):
iters = self.iteration_num
ndims = len(self.params)
# calculate the optimal flat grid based on the hierarchal grid
vecs = []
for p in self.params:
x = []
for iteration in range(0, iters):
x = np.concatenate((x, p.values[iteration]))
last = None
mindist = 1e309
for v in sorted(x):
if v != last:
if last != None:
mindist = min(mindist, v-last)
last = v
debug("%s: %s %s grids" % (p.name, mindist,
(p.pdf.range[1] - p.pdf.range[0])/mindist))
vecs.append(np.arange(p.pdf.range[0], p.pdf.range[1] + mindist, mindist))
xx = meshgridn(*vecs)
pts = np.vstack(map(np.ndarray.flatten, xx)).T
# add column for results
pts = np.append(pts, np.zeros((len(pts),1)), axis=1)
# interpolate function requires array in contiguous memory
if pts.flags['C_CONTIGUOUS'] == False:
pts = np.ascontiguousarray(pts)
self._uqsolver.interpolate(pts)
return SampledFunc(pts, params=self.params)
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
X = pts[:,0].reshape(xx[0].shape)
Y = pts[:,1].reshape(xx[0].shape)
try:
Z = pts[:,2].reshape(xx[0].shape)
ax.plot_surface(X,Y,Z, rstride = 1, cstride = 1, cmap=cm.jet, alpha = 0.5)
except:
plt.plot(X, Y, color='green')
plt.show()
"""
"""
def scatter2(self, hf, input_var='', output_var='', iteration='all'):
import matplotlib.pyplot as plt
from matplotlib import cm
fmt = options['plot']['format']
parameters = hdf5_get_params(hf)
parameter_names = [p.name for p in parameters]
if input_var:
ivar = [p for p in parameters if p.name == input_var][0]
else:
ivar = parameters[0]
if not ivar:
print "Error: Unrecognized input variable: %s" % input_var
raise ValueError
num_iterations = hdf5_get_iterations(hf)
if iteration == 'all':
for iteration in range(0, num_iterations):
fig = plot_figure()
plt.xlabel(ivar.description)
data = hdf5_get_result(hf, var=output_var, iteration=iteration)
plt.scatter(ivar.values[iteration], data)
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
elif iteration == 'sum':
fig = plot_figure()
plt.xlabel(ivar.description)
x = []
y = []
iters = []
for iteration in range(0, num_iterations):
x = np.concatenate((x, ivar.values[iteration]))
tmp = np.empty((len(ivar.values[iteration])))
tmp[:] = float(iteration)
iters = np.concatenate((iters, tmp))
data = hdf5_get_result(hf, var=output_var, iteration='sum')
plt.scatter(x, data, c=iters, cmap=cm.jet)
plt.suptitle("All %s Iterations" % num_iterations)
fig.canvas.manager.set_window_title("All %s Iterations" % num_iterations)
else:
fig = plot_figure()
plt.xlabel(ivar.description)
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
data = hdf5_get_result(hf, var=output_var, iteration=iteration)
plt.scatter(ivar.values[iteration], data, color='blue', alpha=.5)
#plot_customize()
if fmt != 'i':
plt.savefig("%s-scatter[%s].%s" % (output_var, input_var, fmt))
# 3D scatter plot
# iteration='all', 'last', 'sum' or number
def scatter3(self, hf, input_vars=[], output_var='', iteration='all'):
print "scatter %s" % (input_vars)
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
input_vars = hdf5_get_params(hf, input_vars)
outvars = hdf5_get_output_names(hf)
outdesc = hdf5_prog_description(hf)
if output_var and not output_var in outvars:
print "Error: Unrecognized output variable: %s" % output_var
return
if not output_var:
output_var = outvars[0]
fmt = options['plot']['format']
num_iterations = hdf5_get_iterations(hf)
if iteration == 'all':
for iteration in range(0, num_iterations):
print "iteration: %s" % iteration
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
x = np.array(input_vars[0].values[iteration])
y = np.array(input_vars[1].values[iteration])
odata = hdf5_get_result(hf, var=output_var, iteration=iteration)
ax.scatter(x, y, odata, linewidths=(2.,))
ax.set_zlabel(hdf5_data_description(hf, output_var))
elif iteration == 'sum':
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
ax.set_zlabel(hdf5_data_description(hf, output_var))
x = []
y = []
iters = []
for iteration in range(0, num_iterations):
x = np.concatenate((x, input_vars[0].values[iteration]))
y = np.concatenate((y, input_vars[1].values[iteration]))
tmp = np.empty((len(input_vars[0].values[iteration])))
tmp[:] = float(iteration)
iters = np.concatenate((iters, tmp))
odata = hdf5_get_result(hf, var=output_var, iteration='sum')
ax.scatter(x, y, odata, c=iters, cmap=cm.jet)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("All %s Iterations" % num_iterations)
fig.canvas.manager.set_window_title("All %s Iterations" % num_iterations)
else:
print "iteration: %s" % iteration
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
x = np.array(input_vars[0].values[iteration])
y = np.array(input_vars[1].values[iteration])
odata = hdf5_get_result(hf, var=output_var, iteration=iteration)
ax.scatter(x, y, odata, linewidths=(2.,))
ax.set_zlabel(hdf5_data_description(hf, output_var))
#plot_customize()
if fmt != 'i':
plt.savefig("%s-scatter.%s" % ('test', fmt))
def plot_pdfs(self, h5, kde, hist, vars):
from plot import plot_pdf
fmt = options['plot']['format']
if fmt == 'png' or fmt == 'i':
load = options['plot']['iformat']
else:
load = fmt
matplotlib.use(load, warn=False)
import matplotlib.pyplot as plt
if vars:
print "Plotting PDFs with a subset of variables"
print "is not implemented yet."
return
title = hdf5_prog_description(h5)
var = hdf5_get_output_names(h5)[0]
xlabel = hdf5_data_description(h5, var)
data = h5['AdapStocColl/%s/sampled_pdf' % var].value
plot_pdf(data, kde, hist, title, xlabel, var)
if fmt == 'i':
try:
plt.show()
except KeyboardInterrupt :
pass
"""
|
[
"huntmartinm@gmail.com"
] |
huntmartinm@gmail.com
|
b54fd0bc290b3f5a82c4cad6ff829f7b399573f4
|
ded81a7568fe04f3227562cc5f67ffc675617cc0
|
/cheer_app/migrations/0002_comment.py
|
a7803e53c60185ed5d941b24bfcce9f91293cac8
|
[] |
no_license
|
shin04/cheer
|
3e220afc1fb0a4329ff7c16bd4823da1c09ee0a9
|
da39bbc584350c0ac89c23dbbfaf1c96ab9148fd
|
refs/heads/master
| 2020-07-02T16:07:44.280390
| 2020-05-20T11:13:03
| 2020-05-20T11:13:03
| 183,242,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
# Generated by Django 2.2 on 2019-08-05 04:29
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cheer_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='cheer_app.Post')),
],
),
]
|
[
"daikon0413@gmail.com"
] |
daikon0413@gmail.com
|
3492d580508e89a75e1d9318c8c9852f7db86ac5
|
12ff1868e2fc926347b2eff819f88926d847b866
|
/Plot processing tools/stitch_all_date_pickles.py
|
e6f2b08abed2d9a72c488ec56ddf79c6dd2ace12
|
[] |
no_license
|
bunanna/image-processing
|
9d1a3beaca30289b910113ec0151b816107e304d
|
083d1ee1a81743a40ea0641bc31c8ba44624fd4e
|
refs/heads/master
| 2023-09-06T02:19:48.198345
| 2021-11-05T19:56:32
| 2021-11-05T19:56:32
| 284,755,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,942
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 11:47:03 2021
@author: User
"""
import pickle
import datetime
from opencage.geocoder import OpenCageGeocode
pkl_2020_08_01_2020_08_31 = pickle.load(open("2020-08-01_2020-08-31.pkl", "rb"))
pkl_2020_09_01_2020_09_30 = pickle.load(open("2020-09-01_2020-09-30.pkl", "rb"))
pkl_2020_10_01_2020_10_31 = pickle.load(open("2020-10-01_2020-10-31.pkl", "rb"))
pkl_2020_11_01_2020_11_30 = pickle.load(open("2020-11-01_2020-11-30.pkl", "rb"))
pkl_2020_12_01_2020_12_31 = pickle.load(open("2020-12-01_2020-12-31.pkl", "rb"))
ixps = list(pkl_2020_08_01_2020_08_31.keys())
key = 'bd6e1524012a4459bc142002b0ab8617'
geocoder = OpenCageGeocode(key)
collected_dict = {}
def stitcher(current_list_dates, current_list_vals, starting_list_dates, starting_list_vals):
crop_index = 0
cropped_list_dates = []
cropped_list_vals = []
for current_date in current_list_dates:
if current_date > starting_list_dates[len(starting_list_dates) - 1]:
cropped_list_dates = current_list_dates[crop_index:]
cropped_list_vals = current_list_vals[crop_index:]
starting_list_dates += cropped_list_dates
starting_list_vals += cropped_list_vals
break
else:
crop_index += 1
return starting_list_dates, starting_list_vals
for ixp in ixps:
collected_dict[ixp] = {}
collected_dict[ixp]['city'] = pkl_2020_08_01_2020_08_31[ixp]['city']
collected_dict[ixp]['country'] = pkl_2020_08_01_2020_08_31[ixp]['country']
collected_dict[ixp]['country_code'] = pkl_2020_08_01_2020_08_31[ixp]['country_code']
collected_dict[ixp]['continent'] = pkl_2020_08_01_2020_08_31[ixp]['continent']
keys_1 = list(pkl_2020_08_01_2020_08_31[ixp].keys())
keys_2 = list(pkl_2020_09_01_2020_09_30[ixp].keys())
keys_3 = list(pkl_2020_10_01_2020_10_31[ixp].keys())
keys_4 = list(pkl_2020_11_01_2020_11_30[ixp].keys())
keys_5 = list(pkl_2020_12_01_2020_12_31[ixp].keys())
print(ixp)
if 'week' in keys_1 and 'week' in keys_2 and 'week' in keys_3 and 'week' in keys_4 and 'week' in keys_5:
week_2020_08_01_2020_08_31 = pkl_2020_08_01_2020_08_31[ixp]['week']
base_list_dates = week_2020_08_01_2020_08_31['dates']
base_list_vals = week_2020_08_01_2020_08_31['values']
starting_list_dates = base_list_dates
starting_list_vals = base_list_vals
####################################################################
week_2020_09_01_2020_09_30 = pkl_2020_09_01_2020_09_30[ixp]['week']
current_list_dates = week_2020_09_01_2020_09_30['dates']
current_list_vals = week_2020_09_01_2020_09_30['values']
result_to_2020_09_30 = stitcher(current_list_dates, current_list_vals, starting_list_dates, starting_list_vals)
####################################################################
week_2020_10_01_2020_10_31 = pkl_2020_10_01_2020_10_31[ixp]['week']
current_list_dates = week_2020_10_01_2020_10_31['dates']
current_list_vals = week_2020_10_01_2020_10_31['values']
result_to_2020_10_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_09_30[0], result_to_2020_09_30[1])
####################################################################
week_2020_11_01_2020_11_30 = pkl_2020_11_01_2020_11_30[ixp]['week']
current_list_dates = week_2020_11_01_2020_11_30['dates']
current_list_vals = week_2020_11_01_2020_11_30['values']
result_to_2020_11_30 = stitcher(current_list_dates, current_list_vals, result_to_2020_10_31[0], result_to_2020_10_31[1])
####################################################################
week_2020_12_01_2020_12_31 = pkl_2020_12_01_2020_12_31[ixp]['week']
current_list_dates = week_2020_12_01_2020_12_31['dates']
current_list_vals = week_2020_12_01_2020_12_31['values']
result_to_2020_12_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_11_30[0], result_to_2020_11_30[1])
####################################################################
collected_dict[ixp]['week'] = {
'dates': result_to_2020_12_31[0],
'values': result_to_2020_12_31[1]
}
if 'month' in keys_1 and 'month' in keys_2 and 'month' in keys_3 and 'month' in keys_4 and 'month' in keys_5:
month_2020_08_01_2020_08_31 = pkl_2020_08_01_2020_08_31[ixp]['month']
base_list_dates = month_2020_08_01_2020_08_31['dates']
base_list_vals = month_2020_08_01_2020_08_31['values']
starting_list_dates = base_list_dates
starting_list_vals = base_list_vals
####################################################################
month_2020_09_01_2020_09_30 = pkl_2020_09_01_2020_09_30[ixp]['month']
current_list_dates = month_2020_09_01_2020_09_30['dates']
current_list_vals = month_2020_09_01_2020_09_30['values']
result_to_2020_09_30 = stitcher(current_list_dates, current_list_vals, starting_list_dates, starting_list_vals)
####################################################################
month_2020_10_01_2020_10_31 = pkl_2020_10_01_2020_10_31[ixp]['month']
current_list_dates = month_2020_10_01_2020_10_31['dates']
current_list_vals = month_2020_10_01_2020_10_31['values']
result_to_2020_10_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_09_30[0], result_to_2020_09_30[1])
####################################################################
month_2020_11_01_2020_11_30 = pkl_2020_11_01_2020_11_30[ixp]['month']
current_list_dates = month_2020_11_01_2020_11_30['dates']
current_list_vals = month_2020_11_01_2020_11_30['values']
result_to_2020_11_30 = stitcher(current_list_dates, current_list_vals, result_to_2020_10_31[0], result_to_2020_10_31[1])
####################################################################
month_2020_12_01_2020_12_31 = pkl_2020_12_01_2020_12_31[ixp]['month']
current_list_dates = month_2020_12_01_2020_12_31['dates']
current_list_vals = month_2020_12_01_2020_12_31['values']
result_to_2020_12_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_11_30[0], result_to_2020_11_30[1])
####################################################################
collected_dict[ixp]['month'] = {
'dates': result_to_2020_12_31[0],
'values': result_to_2020_12_31[1]
}
if 'year' in keys_1 and 'year' in keys_2 and 'year' in keys_3 and 'year' in keys_4 and 'year' in keys_5:
year_2020_08_01_2020_08_31 = pkl_2020_08_01_2020_08_31[ixp]['year']
base_list_dates = year_2020_08_01_2020_08_31['dates']
base_list_vals = year_2020_08_01_2020_08_31['values']
starting_list_dates = base_list_dates
starting_list_vals = base_list_vals
####################################################################
year_2020_09_01_2020_09_30 = pkl_2020_09_01_2020_09_30[ixp]['year']
current_list_dates = year_2020_09_01_2020_09_30['dates']
current_list_vals = year_2020_09_01_2020_09_30['values']
result_to_2020_09_30 = stitcher(current_list_dates, current_list_vals, starting_list_dates, starting_list_vals)
####################################################################
year_2020_10_01_2020_10_31 = pkl_2020_10_01_2020_10_31[ixp]['year']
current_list_dates = year_2020_10_01_2020_10_31['dates']
current_list_vals = year_2020_10_01_2020_10_31['values']
result_to_2020_10_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_09_30[0], result_to_2020_09_30[1])
####################################################################
year_2020_11_01_2020_11_30 = pkl_2020_11_01_2020_11_30[ixp]['year']
current_list_dates = year_2020_11_01_2020_11_30['dates']
current_list_vals = year_2020_11_01_2020_11_30['values']
result_to_2020_11_30 = stitcher(current_list_dates, current_list_vals, result_to_2020_10_31[0], result_to_2020_10_31[1])
####################################################################
year_2020_12_01_2020_12_31 = pkl_2020_12_01_2020_12_31[ixp]['year']
current_list_dates = year_2020_12_01_2020_12_31['dates']
current_list_vals = year_2020_12_01_2020_12_31['values']
result_to_2020_12_31 = stitcher(current_list_dates, current_list_vals, result_to_2020_11_30[0], result_to_2020_11_30[1])
####################################################################
collected_dict[ixp]['year'] = {
'dates': result_to_2020_12_31[0],
'values': result_to_2020_12_31[1]
}
del collected_dict['IX.br (PTT.br) Maceió']
del collected_dict['IXPN Lagos']
del collected_dict['IX.br (PTT.br) Rio de Janeiro']
del collected_dict['DE-CIX Frankfurt']
del collected_dict['MIX-IT']
ixps = list(collected_dict.keys())
for ixp in ixps:
query = collected_dict[ixp]['city']
results = geocoder.geocode(query)
collected_dict[ixp]['latitude'] = results[0]['geometry']['lat']
collected_dict[ixp]['longitude'] = results[0]['geometry']['lng']
start_year = 2020
start_month = 8
start_day = 1
end_year = 2021
end_month = 1
end_day = 1
file_string = str(datetime.date(start_year, start_month, start_day)) + '_' + str(datetime.date(end_year, end_month, end_day) - datetime.timedelta(days = 1))
pickle.dump(collected_dict, open(file_string + ".pkl", "wb"))
|
[
"noreply@github.com"
] |
bunanna.noreply@github.com
|
68118a13a4c819afa341c240ca2dcaf28e553e99
|
c3d826906b9a1be0e62c7767fc54ac22ad05a1f7
|
/python/85_MaximalRectangle.py
|
19389e33c44dac4792eed99670f00093ac43cda9
|
[] |
no_license
|
davidhuangdw/leetcode
|
177a9d48b3a5886928bea7edd053189bf839b50f
|
2af692b9f88f7f0f46e0baedd5884030e26a3d78
|
refs/heads/master
| 2020-04-14T05:46:10.515670
| 2019-05-12T14:35:15
| 2019-05-13T13:16:56
| 163,668,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
from unittest import TestCase
# https://leetcode.com/problems/maximal-rectangle
class MaximalRectangle(TestCase):
def maximalRectangle(self, matrix: 'List[List[str]]') -> 'int':
if not matrix: return 0
n, m = len(matrix), len(matrix[0])
cur, res = [0] * m, 0
for i in range(n):
st = []
for j in range(m+1):
if j < m:
cur[j] = cur[j]+1 if matrix[i][j] == '1' else 0
while st and (j == m or cur[st[-1]] >= cur[j]):
h = cur[st.pop()]
res = max(res, h*(j-1-(st[-1] if st else -1)))
st.append(j)
return res
def test1(self):
matrix = [
["1","0","1","0","0"],
["1","0","1","1","1"],
["1","1","1","1","1"],
["1","0","0","1","0"]
]
self.assertEqual(6, self.maximalRectangle(matrix))
|
[
"davidhuangdw@163.com"
] |
davidhuangdw@163.com
|
2e93dd16f33679cf624508de7f8b7e21caf4b6e6
|
05173075e7d1decb2e904b3970e52f61c4ab064c
|
/lists/urls.py
|
8b35bb3c27d216c378e0b1b4dcf6dca0e08de38f
|
[] |
no_license
|
justinheart/superlists
|
e9cb080663a7cbfd8de6b0262eecd6ec0c116eca
|
534e71a1e54be7d626d238c4f78cf7855c53af27
|
refs/heads/master
| 2021-04-05T04:27:16.423608
| 2020-03-29T11:38:31
| 2020-03-29T11:38:31
| 248,519,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
"""superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from lists import views
urlpatterns = [
url(r'^new$', views.new_list, name='new_list'),
url(r'^(\d+)/$', views.view_list, name='view_list'),
url(r'^(\d+)/add_item$', views.add_item, name='add_item'),
]
|
[
"justin2018777@gmail.com"
] |
justin2018777@gmail.com
|
9e0905260d459f9814368e3e49fbd32a31ca72c6
|
1957c31e65605b6f83cf938ad5724b57f98fcf9e
|
/main.py
|
67a0c83d88c8e6bb2dfcb162212fc98bec97e687
|
[] |
no_license
|
TheTycoon/BinarySpacePartition
|
22cc16fce0e4072233c449dfc000983ca0b56ef3
|
180e93cc9c7f23aa1695e61ce7c64341ada16bee
|
refs/heads/master
| 2021-01-11T05:48:48.295225
| 2016-11-19T02:16:29
| 2016-11-19T02:16:29
| 71,738,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,689
|
py
|
import pygame
import settings
import node
from random import randint
# Set up a window
window = pygame.display.set_mode((settings.WIDTH, settings.HEIGHT))
pygame.display.set_caption(settings.TITLE)
# parent node starts off with all of the rows and columns in a map
# generations[0]
generations = []
parent_node = node.Node(settings.COLUMNS, settings.ROWS, (0, 0), 0)
generations.append([parent_node])
# 1st generation of kids
generations.append(parent_node.split_node())
# 2nd generation of kids
generations.append([])
for node in generations[1]:
generations[2].extend(node.split_node())
# 3rd generation of kids
generations.append([])
for node in generations[2]:
generations[3].extend(node.split_node())
# 4th generation of kids
generations.append([])
for node in generations[3]:
generations[4].extend(node.split_node())
# make rooms
rooms = []
for node in generations[4]:
room = node.make_room()
rooms.append(room)
# make hallways / connect rooms
'''
This still needs a lot of work and clarification
'''
hallways = []
for i in range(0, len(rooms), 2):
test_position = pygame.math.Vector2(rooms[i].position.x + int(rooms[i].columns / 3) * settings.TILESIZE, rooms[i].position.y + rooms[i].height)
test_height = rooms[i + 1].position.y - (rooms[i].position.y + rooms[i].height)
test_width = settings.TILESIZE
test_rect = pygame.Rect(test_position, (test_width, test_height))
hallways.append(test_rect)
hallways_two = []
for i in range(0, len(rooms), 4):
if randint(0, 100) < 50:
i += 1
test_position = pygame.math.Vector2(rooms[i].position.x + rooms[i].width, rooms[i].position.y + int(rooms[i].rows / 3) * settings.TILESIZE)
test_height = settings.TILESIZE
test_width = rooms[i + 2].position.x - (rooms[i].position.x + rooms[i].width)
test_rect = pygame.Rect(test_position, (test_width, test_height))
hallways_two.append(test_rect)
hallways_three = []
for i in range(0, len(rooms), 8):
if randint(0, 100) < 50:
i += 2
test_position = pygame.math.Vector2(rooms[i].position.x + int(rooms[i].columns / 3) * settings.TILESIZE, rooms[i].position.y + rooms[i].height)
test_height = rooms[i + 4].position.y - (rooms[i].position.y + rooms[i].height)
test_width = settings.TILESIZE
test_rect = pygame.Rect(test_position, (test_width, test_height))
hallways_three.append(test_rect)
hallways_four = []
for i in range(len(rooms)):
if (i == 2 or i == 3 or i == 6 or i == 7) and randint(0, 100) < 50:
test_position = pygame.math.Vector2(rooms[i].position.x + rooms[i].width, rooms[i].position.y + int(rooms[i].rows / 3) * settings.TILESIZE)
test_height = settings.TILESIZE
test_width = rooms[i + 6].position.x - (rooms[i].position.x + rooms[i].width)
test_rect = pygame.Rect(test_position, (test_width, test_height))
hallways_four.append(test_rect)
# MAIN LOOP
running = True
while running:
# EVENT LOOP
for event in pygame.event.get():
# Close program on quit
if event.type == pygame.QUIT:
running = False
# DRAW STUFF
for i in range(len(rooms)):
pygame.draw.rect(window, settings.WHITE, rooms[i].rect)
for i in range(len(hallways)):
pygame.draw.rect(window, settings.WHITE, hallways[i])
for i in range(len(hallways_two)):
pygame.draw.rect(window, settings.WHITE, hallways_two[i])
for i in range(len(hallways_three)):
pygame.draw.rect(window, settings.WHITE, hallways_three[i])
for i in range(len(hallways_four)):
pygame.draw.rect(window, settings.WHITE, hallways_four[i])
# DISPLAY FRAME
pygame.display.update()
|
[
"jsbalebo@gmail.com"
] |
jsbalebo@gmail.com
|
9fd6aead869336e65940baa8ea0e2047c69c7a15
|
0eb6cde4979fe6c5659b546eeb3b50840977e59c
|
/bifrost/migrations/3_create_table_tco.py
|
3f11d61f1a90d692f0fb80221852327f64a9154f
|
[] |
no_license
|
devjyotipatra/bifrost_client
|
109d08fa1c1f7215bd1227753835c9b94bc70f17
|
ac63dd17a6ab83ba736b233740c859224e9bba2c
|
refs/heads/master
| 2020-07-05T13:26:24.807561
| 2019-08-16T10:44:58
| 2019-08-16T10:44:58
| 202,659,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
"""create_table_tco
Revision ID: 3
Create Date: 2019-08-16 07:57:23.828357
"""
# revision identifiers, used by Bifrost.
from data_app.migrations import Migrations
version = 3
import logging
from qds_sdk.commands import HiveCommand
from mako.template import Template
def upgrade(context):
logging.debug("Execute upgrade of `%d`" % version)
template = Template("""
CREATE TABLE IF NOT EXISTS qubole_bi_${env}_${account_id}.tco_table (
node_id BIGINT,
cluster_inst_id INT,
cluster_id INT,
instance_type STRING,
availability_zone STRING,
region STRING,
hour BIGINT,
price DOUBLE,
node_run_time INT,
approx_per_hour_price DOUBLE,
ondemand_price DOUBLE,
up_time STRING,
down_time STRING,
node_type STRING
)
PARTITIONED BY (account_id INT, event_date STRING)
STORED AS ORC
LOCATION '${defloc}/qubole_bi/tco_table/'
""")
context["revisions.upgraded"].append(version)
Migrations.upgrade(migration_number=version,
command=HiveCommand.run(query=template.render_unicode(env=context["env"],
account_id = context['account_id'],
defloc = context['defloc'])))
print context
def downgrade(context):
logging.debug("Execute downgrade of `%d`" % version)
template = Template("""
DROP TABLE IF EXISTS qubole_bi_${env}_${account_id}.tco_table;
""")
Migrations.downgrade(migration_number=version,
command=HiveCommand.run(query=template.render_unicode(env=context["env"],
account_id = context['account_id'])))
|
[
"djpatra@gmail.com"
] |
djpatra@gmail.com
|
606d6d4094d75038349d8e7f7a51de637406baed
|
45df59aabd5637fc2d6da8a6a33d69e8041b556b
|
/action_handlers.py
|
58b9772f2f5aaf33b9a7c0d478edc28e04e53950
|
[] |
no_license
|
tjbok/text_adventure_template
|
51508a49e0a2650d8e447c17bbcfc0090a77bd89
|
a9a3370a84e8590ef8d026377d14cb811ef0e1cc
|
refs/heads/master
| 2021-01-01T05:20:19.304273
| 2020-07-25T19:53:33
| 2020-07-25T19:53:33
| 58,801,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
### THIS FILE CONTAINS ACTION HANDLERS FOR YOUR ACTIONS ###
# To add a new action handler, first create a function for your action
# and then "bind" the handler to your action in the bottom section of the file.
def Get(context, item):
if item["key"] == "ALL":
context.items.GetAll()
elif item["key"] in context.player.inventory:
context.PrintItemInString("You're already carrying @.", item)
elif (not item.get("takeable?")):
context.Print("You can't pick that up!")
else:
context.items.GetItem(item["key"])
def Drop(context, item):
if item["key"] == "ALL":
context.items.DropAll()
elif not (item["key"] in context.player.inventory):
context.PrintItemInString("You're not carrying @.", item)
else:
context.items.DropItem(item["key"])
def Examine(context, item):
examine_string = item.get("examine_string")
if (not examine_string == None) and (len(examine_string) > 0):
context.Print(examine_string)
else:
context.PrintItemInString("You see nothing special about @.", item)
def Inventory(context):
context.Print("You are carrying:")
if len(context.player.inventory) == 0:
context.Print(" Nothing")
else:
for item_key in context.player.inventory:
context.Print(" a " + context.items.GetLongDescription(item_key))
def Help(context):
context.Print("This is a text adventure game.")
context.Print("Enter commands like \'GO NORTH\' or \'TAKE ROCK\' to tell the computer what you would like to do.")
context.Print("Most commands are either one or two words.")
context.Print("For a full list of commands, type \'ACTIONS\'.")
def Actions(context):
print("Available actions:")
for action_key in sorted(context.actions.actions_dictionary):
if context.actions[action_key].get("suppress_in_actions_list?"):
continue
print_string = " "
i = 0
for word in context.actions.actions_dictionary[action_key]["words"]:
if i > 0:
print_string += " / "
print_string += word
i += 1
context.Print(print_string)
def Quit(context):
context.state.quit_pending = True
context.Print("Are you sure you want to quit (Y/N)?")
def Yes(context):
context.Print("You sound really positive!")
def No(context):
context.Print("You sound awfully negative!")
def Wait(context):
context.Print("Time passes...")
# Here is where you "bind" your action handler function to a specific action.
def Register(context):
actions = context.actions
actions.AddActionHandler("GET", Get)
actions.AddActionHandler("DROP", Drop)
actions.AddActionHandler("EXAMINE", Examine)
actions.AddActionHandler("INVENTORY", Inventory)
actions.AddActionHandler("HELP", Help)
actions.AddActionHandler("ACTIONS", Actions)
actions.AddActionHandler("QUIT", Quit)
actions.AddActionHandler("YES", Yes)
actions.AddActionHandler("NO", No)
actions.AddActionHandler("WAIT",Wait)
|
[
"tomas.j.bok@gmail.com"
] |
tomas.j.bok@gmail.com
|
603a2c443b782534f90876987853f69994b38971
|
c9a92954b02d308162bd67fd0709432ff3d4797e
|
/notifications/backends/console.py
|
8c36fcb26039f397e48da87ef4da6c06a9e65d3d
|
[
"MIT"
] |
permissive
|
danidee10/django-notifs
|
2edb1d577c8a5e929983e96e23c7f90aee030bfc
|
fceee6fd6ce362decd7374c36de716a682e9f9dc
|
refs/heads/master
| 2023-05-25T02:18:32.740667
| 2023-05-13T17:05:04
| 2023-05-13T17:05:04
| 95,060,651
| 164
| 26
|
MIT
| 2022-01-31T13:33:36
| 2017-06-22T01:11:40
|
Python
|
UTF-8
|
Python
| false
| false
| 737
|
py
|
"""Console backend"""
import logging
from unittest.mock import patch
from notifications.providers import BaseNotificationProvider
from .synchronous import SynchronousBackend
class ConsoleBackend(SynchronousBackend):
logger = logging.getLogger('django_notifs.backends.console')
def produce(self, provider, payload, context, countdown):
provider_class = BaseNotificationProvider.get_provider_class(provider)
patcher_send = patch(f'{provider_class}.send')
patcher_send_bulk = patch(f'{provider_class}.send_bulk')
patcher_send.start()
patcher_send_bulk.start()
super().produce(provider, payload, context, countdown)
patcher_send.stop()
patcher_send_bulk.stop()
|
[
"noreply@github.com"
] |
danidee10.noreply@github.com
|
4714b19ef393107f335432fe7e72e94485932b98
|
ca8a64db90961130c2075edbe54c6e80d5f44cad
|
/src/densities.py
|
7ebe522422cefebc13c37d605ebef469ed4c0d19
|
[] |
no_license
|
VishakhG/normalizing-flows
|
e1a1ea6ea40b546a3b08d7219a972c46d37874cd
|
ec2c6663dfd15826f6df24ecf5bb194da2045027
|
refs/heads/master
| 2023-04-04T13:09:36.845931
| 2020-06-14T21:52:30
| 2020-06-14T21:52:30
| 236,381,377
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
import torch
import math
"""
Potential functions U(x) from Rezende et al. 2015
p(z) is then proportional to exp(-U(x)).
Since we log this value later in the optimized bound,
no need to actually exp().
"""
def w_1(z):
return torch.sin((2 * math.pi * z[:, 0]) / 4)
def w_2(z):
return 3 * torch.exp(-.5 * ((z[:, 0] - 1) / .6) ** 2)
def sigma(x):
return 1 / (1 + torch.exp(- x))
def w_3(z):
return 3 * sigma((z[:, 0] - 1) / .3)
def pot_1(z):
z_1, z_2 = z[:, 0], z[:, 1]
norm = torch.sqrt(z_1 ** 2 + z_2 ** 2)
outer_term_1 = .5 * ((norm - 2) / .4) ** 2
inner_term_1 = torch.exp((-.5 * ((z_1 - 2) / .6) ** 2))
inner_term_2 = torch.exp((-.5 * ((z_1 + 2) / .6) ** 2))
outer_term_2 = torch.log(inner_term_1 + inner_term_2 + 1e-7)
u = outer_term_1 - outer_term_2
return - u
def pot_2(z):
u = .5 * ((z[:, 1] - w_1(z)) / .4) ** 2
return - u
def pot_3(z):
term_1 = torch.exp(-.5 * (
(z[:, 1] - w_1(z)) / .35) ** 2)
term_2 = torch.exp(-.5 * (
(z[:, 1] - w_1(z) + w_2(z)) / .35) ** 2)
u = - torch.log(term_1 + term_2 + 1e-7)
return - u
def pot_4(z):
term_1 = torch.exp(-.5 * ((z[:, 1] - w_1(z)) / .4) ** 2)
term_2 = torch.exp(-.5 * ((z[:, 1] - w_1(z) + w_3(z)) / .35) ** 2)
u = - torch.log(term_1 + term_2)
return - u
|
[
"vishakhgopu@gmail.com"
] |
vishakhgopu@gmail.com
|
c514109ec84609b9a01c7a0b602dfc0656270161
|
8292f7c7a98a4e1f0b6a7a96a0d5d237cf7d97c3
|
/rango/views.py
|
a3d82f3b25cd423a16aef63f350a81307509899c
|
[] |
no_license
|
gflouriz/tango_with_django_project
|
ca12589be5ea0b8cb9a3edd830aa1f5ae77b5f17
|
335b507c962f4a68e274769f86064daf2e7aee8b
|
refs/heads/master
| 2020-12-20T20:15:40.463833
| 2020-02-13T23:36:06
| 2020-02-13T23:36:06
| 236,198,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,689
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from django.urls import reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from datetime import datetime
# A helper method
def get_server_side_cookie(request, cookie, default_val=None):
val = request.session.get(cookie)
if not val:
val = default_val
return val
def visitor_cookie_handler(request):
visits = int(get_server_side_cookie(request, 'visits', '1'))
last_visit_cookie = get_server_side_cookie(request,
'last_visit',
str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7],
'%Y-%m-%d %H:%M:%S')
# If it's been more than a day since the last visit...
if (datetime.now() - last_visit_time).days > 0:
visits = visits + 1
# Update the last visit cookie now that we have updated the count
request.session['last_visit'] = str(datetime.now())
else:
# Set the last visit cookie
request.session['last_visit'] = last_visit_cookie
# Update/set the visits cookie
request.session['visits'] = visits
def index(request):
# Query the database for a list of ALL categories currently stored.
# Order the categories by the number of likes in descending order.
# Retrieve the top 5 only -- or all if less than 5.
# Place the list in our context_dict dictionary (with our boldmessage!)
# that will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {}
context_dict['boldmessage'] ='Crunchy, creamy, cookie, candy, cupcake!'
context_dict['categories'] = category_list
context_dict['pages'] = page_list
visitor_cookie_handler(request)
# Obtain our Response object early so we can add cookie information.
response = render(request, 'rango/index.html', context=context_dict)
# Return response back to the user, updating any cookies that need changed.
return response
def about(request):
context_dict = {}
visitor_cookie_handler(request)
context_dict['visits'] = request.session['visits']
return render(request, 'rango/about.html', context=context_dict)
def show_category(request, category_name_slug):
# Create a context dictionary which we can pass
# to the template rendering engine
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# The .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
# Retrieve all of the associated pages.
# The filter() will return a list of page objects or an empty list.
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from
# the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything -
# the template will display the "no category" message for us.
context_dict['category'] = None
context_dict['pages'] = None
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context=context_dict)
@login_required
def add_category(request):
form = CategoryForm()
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now that the category is saved, we could confirm this.
# For now, just redirect the user back to the index view.
return redirect('/rango/')
else:
# The supplied form contained errors just print them to the terminal.
print(form.errors)
# Will handle the bad form, new form, or no form supplied cases.
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
# You cannot add a page to a Category that does not exist...
if category is None:
return redirect('/rango/')
form = PageForm()
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.views = 0
page.save()
return redirect(reverse('rango:show_category',
kwargs={'category_name_slug':
category_name_slug}))
else:
print(form.errors)
context_dict = {'form': form, 'category':category}
return render(request, 'rango/add_page.html', context=context_dict)
def register(request):
# A boolean value for telling the template
# whether the registration was successful.
# Set to False initially. Code changes value to
# True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(request.POST)
profile_form = UserProfileForm(request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves,
# we set commit=False. This delays saving the model
# until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and
# put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to indicate that the template
# registration was successful.
registered = True
else:
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
print(user_form.errors, profile_form.errors)
else:
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context
return render(request, 'rango/register.html',
context = {'user_form':user_form,
'profile_form':profile_form,
'registered': registered})
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
# We use request.POST.get('<variable>') as opposed
# to request.POST['<variable>'], because the
# request.POST.get('<variable>') returns None if the
# value does not exist, while request.POST['<variable>']
# will raise a KeyError exception.
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return redirect(reverse('rango:index'))
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Rango account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print(f"Invalid login details: {username}, {password}")
return HttpResponse("Invalid login details supplied.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'rango/login.html')
@login_required
def restricted(request):
return render(request, 'rango/restricted.html')
# Use the login_required() decorator to ensure only those logged in can
# access the view
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return redirect(reverse('rango:index'))
|
[
"2382904f@student.gla.ac.uk"
] |
2382904f@student.gla.ac.uk
|
fe326c7a1f655f416726ba7ab761365162799b48
|
07dbe29817df214ac7d23e3511cea991b56d4e84
|
/DiANa/InsSub/DeInsSub.py
|
a2c317746999909569cf509a4707819816bc964e
|
[] |
no_license
|
RemainRain/DiANa
|
813379513a142a2d559e25347abac59ed3e09960
|
2646f394f3b4c572f5c3a27360bba7501b786bc8
|
refs/heads/master
| 2023-05-13T11:59:13.818538
| 2020-03-14T23:24:01
| 2020-03-14T23:24:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
# -*- coding: utf-8 -*-
from barf import BARF
import sys
from DeInsSubUtil import *
import barf.arch.arm.translator
import os
# if __name__ == '__main__':
def De_IS(filename, start):
# sys.argv = ["test.py", "/Users/mark/Desktop/de_obfuscator/IS/New-test/total/inssub", "0x694"]
# if len(sys.argv) != 3:
# print 'Usage: python DeInsSub.py filename function_address(hex)'
# exit(0)
# filename = sys.argv[1]
start = int(start, 16)
filename = filename
# start = int(start, 16)
barf = BARF(filename)
base_addr = barf.binary.entry_point >> 12 << 12
cfg = barf.recover_cfg(start)
blocks = cfg.basic_blocks
print('The function has %d blocks. ' % len(blocks))
origin = open(filename, 'rb')
data = list(origin.read())
for block in blocks:
opposite = []
#查找所用的MOV指令,然后记录所用寄存器值相反的对
# for ins in block.instrs:
# if ins.mnemonic_full.startswith(u'mvn'):
# if ins.operands[0].name not in opposite:
# opposite[ins.operands[0].name] = ins.operands[1].name
block_size = len(block.instrs)
ADDHex, ADDnop = check_add(block, block_size)
data = fix_substitution(data, ADDHex, ADDnop, base_addr)
SUBHex, SUBnop = check_sub(block, block_size)
data = fix_substitution(data, SUBHex, SUBnop, base_addr)
XORHex, XORnop = check_xor(block, block_size)
data = fix_substitution(data, XORHex, XORnop, base_addr)
ANDHex, ANDnop = check_and(block, block_size)
data = fix_substitution(data, ANDHex, ANDnop, base_addr)
ORHex, ORnop = check_or(block, block_size)
data = fix_substitution(data, ORHex, ORnop, base_addr)
origin.close()
# recovery = open(filename + '_recovered', 'wb')
path = sys.argv[3]
if not os.path.exists(path + filename.split('/')[-2] + '/'):
os.mkdir(path + filename.split('/')[-2] + '/')
recovery = open(path + filename.split('/')[-2] + '/' + filename.split('/')[-1] + '_recovered', 'wb')
recovery.write(''.join(data))
recovery.close()
print 'Successful! The recovered file: %s' % (filename + '_recovered')
|
[
"kanzeliang945@gmail.com"
] |
kanzeliang945@gmail.com
|
f3c47f8eeb65b45224fa8eb02f50a2d0c89e375b
|
153fd363d1127d8e6dc809516eb9436dc81335fe
|
/venv/Scripts/pip3.8-script.py
|
499989ca5ae017f5c6b6f67ec41c77ef72ed84e7
|
[] |
no_license
|
MehrdadKamelzadeh/django_started
|
7ed950ea63471f62042129350de0d89f368dfeec
|
52ea2c564521a6ec5de173cf845f06180a943030
|
refs/heads/master
| 2021-05-25T20:42:42.887595
| 2020-04-07T21:08:50
| 2020-04-07T21:08:50
| 253,912,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
#!C:\Users\Mehrdad\PycharmProjects\django_started\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"mehrdad.kamelzadeh@gmail.com"
] |
mehrdad.kamelzadeh@gmail.com
|
9346b461b869d42f8809bb42ec48f7438a393149
|
de8e4b8b43cbf1374dd65a028c3e85951a21a11f
|
/fast-exps/lib/models/new_prop_prototype.py
|
02024f940aa1dda7cd534e8ffcd8a261a8f533e6
|
[] |
no_license
|
tcwltcwl/URT
|
626a94d7ad94c712a25602ef30cefb61ff959229
|
edc551f286ac3b0726370db70db7d6b3b0359f36
|
refs/heads/master
| 2023-04-14T04:30:35.526937
| 2021-04-21T06:48:49
| 2021-04-21T06:48:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,673
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import random, math
# TODO: integrate the two functions into the following codes
def get_dotproduct_score(proto, cache, model):
proto_emb = model['linear_q'](proto)
s_cache_emb = model['linear_k'](cache)
raw_score = F.cosine_similarity(proto_emb.unsqueeze(1), s_cache_emb.unsqueeze(0), dim=-1)
return raw_score
def get_mlp_score(proto, cache, model):
n_proto, fea_dim = proto.shape
n_cache, fea_dim = cache.shape
raw_score = model['w']( model['nonlinear'](model['w1'](proto).view(n_proto, 1, fea_dim) + model['w2'](cache).view(1, n_cache, fea_dim) ) )
return raw_score.squeeze(-1)
# this model does not need query, only key and value
class MultiHeadURT_value(nn.Module):
def __init__(self, fea_dim, hid_dim, temp=1, n_head=1):
super(MultiHeadURT_value, self).__init__()
self.w1 = nn.Linear(fea_dim, hid_dim)
self.w2 = nn.Linear(hid_dim, n_head)
self.temp = temp
def forward(self, cat_proto):
# cat_proto n_class*8*512
n_class, n_extractors, fea_dim = cat_proto.shape
raw_score = self.w2(self.w1(cat_proto)) # n_class*8*n_head
score = F.softmax(self.temp * raw_score, dim=1)
return score
class URTPropagation(nn.Module):
def __init__(self, key_dim, query_dim, hid_dim, temp=1, att="cosine"):
super(URTPropagation, self).__init__()
self.linear_q = nn.Linear(query_dim, hid_dim, bias=True)
self.linear_k = nn.Linear(key_dim, hid_dim, bias=True)
#self.linear_v_w = nn.Parameter(torch.rand(8, key_dim, key_dim))
self.linear_v_w = nn.Parameter( torch.eye(key_dim).unsqueeze(0).repeat(8,1,1))
self.temp = temp
self.att = att
# how different the init is
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
def forward_transform(self, samples):
bs, n_extractors, fea_dim = samples.shape
'''
if self.training:
w_trans = torch.nn.functional.gumbel_softmax(self.linear_v_w, tau=10, hard=True)
else:
# y_soft = torch.softmax(self.linear_v_w, -1)
# index = y_soft.max(-1, keepdim=True)[1]
index = self.linear_v_w.max(-1, keepdim=True)[1]
y_hard = torch.zeros_like(y_soft, memory_format=torch.legacy_contiguous_format).scatter_(-1, index, 1.0)
w_trans = y_hard
# w_trans = y_hard - y_soft.detach() + y_soft
'''
w_trans = self.linear_v_w
# compute regularization
regularization = w_trans @ torch.transpose(w_trans, 1, 2)
samples = samples.view(bs, n_extractors, fea_dim, 1)
w_trans = w_trans.view(1, 8, fea_dim, fea_dim)
return torch.matmul(w_trans, samples).view(bs, n_extractors, fea_dim), (regularization**2).sum()
def forward(self, cat_proto):
# cat_proto n_class*8*512
# return: n_class*8
n_class, n_extractors, fea_dim = cat_proto.shape
q = cat_proto.view(n_class, -1) # n_class * 8_512
k = cat_proto # n_class * 8 * 512
q_emb = self.linear_q(q) # n_class * hid_dim
k_emb = self.linear_k(k) # n_class * 8 * hid_dim | 8 * hid_dim
if self.att == "cosine":
raw_score = F.cosine_similarity(q_emb.view(n_class, 1, -1), k_emb.view(n_class, n_extractors, -1), dim=-1)
elif self.att == "dotproduct":
raw_score = torch.sum( q_emb.view(n_class, 1, -1) * k_emb.view(n_class, n_extractors, -1), dim=-1 ) / (math.sqrt(fea_dim))
else:
raise ValueError('invalid att type : {:}'.format(self.att))
score = F.softmax(self.temp * raw_score, dim=1)
return score
class MultiHeadURT(nn.Module):
def __init__(self, key_dim, query_dim, hid_dim, temp=1, att="cosine", n_head=1):
super(MultiHeadURT, self).__init__()
layers = []
for _ in range(n_head):
layer = URTPropagation(key_dim, query_dim, hid_dim, temp, att)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, cat_proto):
score_lst = []
for i, layer in enumerate(self.layers):
score = layer(cat_proto)
score_lst.append(score)
# n_class * n_extractor * n_head
return torch.stack(score_lst, dim=-1)
def get_lambda_urt_sample(context_features, context_labels, target_features, num_labels, model, normalize=True):
if normalize:
context_features = F.normalize(context_features, dim=-1)
target_features = F.normalize(target_features, dim=-1)
score_context, urt_context = model(context_features)
score_target, urt_target = model(target_features)
proto_list = []
for label in range(num_labels):
proto = urt_context[context_labels == label].mean(dim=0)
proto_list.append(proto)
urt_proto = torch.stack(proto_list)
# n_samples*8*512
return score_context, urt_proto, score_target, urt_target
def get_lambda_urt_avg(context_features, context_labels, num_labels, model, normalize=True):
if normalize:
context_features = F.normalize(context_features, dim=-1)
proto_list = []
for label in range(num_labels):
proto = context_features[context_labels == label].mean(dim=0)
proto_list.append(proto)
proto = torch.stack(proto_list)
# n_class*8*512
score_proto = model(proto)
# n_extractors * n_head
return torch.mean(score_proto, dim=0)
def apply_urt_avg_selection(context_features, selection_params, normalize, value="sum", transform=None):
selection_params = torch.transpose(selection_params, 0, 1) # n_head * 8
n_samples, n_extractors, fea_dim = context_features.shape
urt_fea_lst = []
if normalize:
context_features = F.normalize(context_features, dim=-1)
regularization_losses = []
for i, params in enumerate(selection_params):
# class-wise lambda
if transform:
trans_features, reg_loss = transform.module.layers[i].forward_transform(context_features)
regularization_losses.append(reg_loss)
else:
trans_features = context_features
if value == "sum":
urt_features = torch.sum(params.view(1,n_extractors,1) * trans_features, dim=1) # n_sample * 512
elif value == "cat":
urt_features = params.view(1,n_extractors,1) * trans_features # n_sample * 8 * 512
urt_fea_lst.append(urt_features)
if len(regularization_losses) == 0:
return torch.stack( urt_fea_lst, dim=1 ).view(n_samples, -1) # n_sample * (n_head * 512) or n_sample * (8 * 512)
else:
return torch.stack( urt_fea_lst, dim=1 ).view(n_samples, -1), sum(regularization_losses)
def apply_urt_selection(context_features, context_labels, selection_params, normalize):
# class-wise lambda
if normalize:
context_features = F.normalize(context_features, dim=-1)
lambda_lst = []
for lab in context_labels:
lambda_lst.append(selection_params[lab])
lambda_tensor = torch.stack(lambda_lst, dim=0)
n_sample, n_extractors = lambda_tensor.shape
urt_features = torch.sum(lambda_tensor.view(n_sample, n_extractors, 1) * context_features, dim=1)
return urt_features
class PropagationLayer(nn.Module):
def __init__(self, input_dim=512, hid_dim=128, temp=1, transform=False):
super(PropagationLayer, self).__init__()
self.linear_q = nn.Linear(input_dim, hid_dim, bias=False)
self.linear_k = nn.Linear(input_dim, hid_dim, bias=False)
self.temp = temp
if transform:
self.transform = nn.Linear(input_dim, input_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
def forward(self, proto, s_cache, data2nclss, use_topk):
if 'transform' in self.__dict__:
proto = self.transform(proto)
s_cache = self.transform(s_cache)
proto_emb = self.linear_q(proto)
s_cache_emb = self.linear_k(s_cache)
raw_score = F.cosine_similarity(proto_emb.unsqueeze(1), s_cache_emb.unsqueeze(0), dim=-1)
score = F.softmax(self.temp * raw_score, dim=1)
prop_proto = torch.matmul( score, s_cache ) # n_class * n_cache @ n_cache * n_dim
if random.random() > 0.99:
print("top_1_idx: {} in {} cache".format(torch.topk(raw_score, 1)[1], len(s_cache)))
print("score: {}".format(score))
print("mean:{}, var:{}, min:{}, max:{}".format(torch.mean(score, dim=1).data, torch.var(score, dim=1).data, torch.min(score, dim=1)[0].data, torch.max(score, dim=1)[0].data))
return raw_score, prop_proto
class MultiHeadPropagationLayer(nn.Module):
def __init__(self, input_dim, hid_dim, temp, transform, n_head):
super(MultiHeadPropagationLayer, self).__init__()
layers = []
for _ in range(n_head):
layer = PropagationLayer(input_dim, hid_dim, temp, transform)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, proto, s_cache, data2nclss, use_topk):
raw_score_lst, prop_proto_lst = [], []
for i, layer in enumerate(self.layers):
raw_score, prop_proto = layer(proto, s_cache, data2nclss, use_topk)
if torch.isnan(raw_score).any() or torch.isnan(prop_proto).any(): import pdb; pdb.set_trace()
raw_score_lst.append(raw_score)
prop_proto_lst.append(prop_proto)
return torch.stack(raw_score_lst, dim=0).mean(0), torch.stack(prop_proto_lst, dim=0).mean(0)
def get_prototypes(features, labels, num_labels, model, cache):
proto_list = []
for label in range(num_labels):
proto = features[labels == label].mean(dim=0)
proto_list.append(proto)
proto = torch.stack(proto_list)
num_devices = torch.cuda.device_count()
num_slots, feature_dim = cache.shape
cache_for_parallel = cache.view(1, num_slots, feature_dim).expand(num_devices, num_slots, feature_dim)
raw_score, prop_proto = model(proto, cache_for_parallel)
return raw_score, proto, prop_proto
|
[
"lu.liu-10@student.uts.edu.au"
] |
lu.liu-10@student.uts.edu.au
|
11160e2ef9df518f000d2f3db8f33868ab9a9b4f
|
700e609d6fd2968e5a604434b9ba07676c5e3445
|
/dmsuMarket/celery_tasks/sms/tasks.py
|
e1dca9a812ed8c2e7156faf76bd218aba902f4de
|
[
"MIT"
] |
permissive
|
IzayoiRin/DmSuMarket
|
6c7f1d45df8bdf0c1a57a78bd36071627087f89c
|
ead8380ca7edca573c920fe78ca99ec59f9564ca
|
refs/heads/master
| 2020-06-13T06:52:57.563318
| 2019-07-06T07:45:06
| 2019-07-06T07:45:06
| 194,470,285
| 0
| 0
|
MIT
| 2019-06-30T07:16:27
| 2019-06-30T03:21:32
| null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
import random
import time
from ..main import celery_app
@celery_app.task(name='send_sms_code')
def send_sms_code(*args):
time.sleep(random.randint(0,2))
print('>>>>>>>>>>>>Clouds Communicating>>>>>>>>>>>>>>')
print('%s>>>>>>>>>>>>>>>>>>>>>>%s' % args)
|
[
"izayoi@rin.com"
] |
izayoi@rin.com
|
1055112127981d61be418bf85ba34050ac329364
|
525d7afceef6e15157829778cbaeb0ada24c9960
|
/website/test/django-polls/setup.py
|
51daf0dd0f3493bd8768a2961304505cf2d9a0fe
|
[] |
no_license
|
alcesarini/horizon
|
52b4a6334e22fed2505545a891b9388513d00c76
|
f45b5c50f855cbd9e998a66604d8c2ee287f2cc8
|
refs/heads/master
| 2016-08-12T06:50:08.974292
| 2016-01-09T17:00:48
| 2016-01-09T17:00:48
| 48,617,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-polls',
version='0.1',
packages=['polls'],
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='https://www.example.com/',
author='Your Name',
author_email='yourname@example.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
[
"alcesarini@gmail.com"
] |
alcesarini@gmail.com
|
56f4d9e049c2a753c8848b86ca9c533f33b182e9
|
c4ffd208162ac2bcd86c3ea4b7a6ab419d462291
|
/cgi-bin/load_post.py
|
8541eae749fc0bdc7fc2f064e47d336f28ead6ee
|
[] |
no_license
|
captainhcg/acfun
|
99e321ee41df6475af8339179fe00dd20dfc2778
|
1884356cdcb8196556d1b626b321e00b8b064421
|
refs/heads/master
| 2021-01-19T08:42:33.757576
| 2012-04-17T18:59:58
| 2012-04-17T18:59:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,538
|
py
|
import ac_core
# coding=utf-8
conn = ac_core.connect()
cursor = conn.cursor()
def draw_post( k, a='' ):
if( k.strip == ''):
return "at least one keyword must be assigned"
keywords = k.split(' ')
words_cond = "ac_article_name LIKE %s"
for index, word in enumerate(keywords):
keywords[index] = '%'+word+'%'
if index != 0:
words_cond += " AND ac_article_name LIKE %s"
author_cond = '1'
if len(a)>0:
author_cond = "ac_author_name LIKE %s"
keywords.append(a)
num_words = len(keywords)
sql = "SELECT a.ac_article_name, a.ac_article_link, a.ac_article_number, a.ac_author_name, a.ac_article_date "\
"FROM ac_article AS a "\
"WHERE ("""+words_cond+""") AND """+author_cond+" ORDER BY ac_article_date ASC"
n = cursor.execute(sql, keywords);
if n == 0:
return "no result"
if n > 100:
return "too many result, please use more accurate keywords"
articles = cursor.fetchall()
post = draw_sortable_ul(articles)
post += "<div class='wiki_nav_buttons'><input type='button' value='下一步' class='button wiki_button_next' onclick='process_posts()'></div>"
return post+load_js()
def draw_sortable_ul(articles):
ul = "<ul id='wiki_sortable' class='wiki_post_ul'>"
for article in articles:
ul += draw_sortable_li(article)
ul += "</ul>"
return ul
def draw_sortable_li(article):
li = "<li id='wiki_post_"+str(article[2])+"' number="+str(article[2])+" class='wiki_post_li ui-state-default'><span class='ui-icon ui-icon-arrowthick-2-n-s'></span>"
li +="<span class='wiki_post_title' title='"+article[0].encode('utf-8')+"'>"
if len(article[0])>24:
li += article[0][:23].encode('utf-8')+"...</span>"
else:
li += article[0].encode('utf-8')+"</span>"
li += "<span class='wiki_post_button ui-icon ui-icon-closethick' style='float:right' onclick=\"remove_post('"+str(article[2])+"')\" title='删除'></span>"
li += "<span class='wiki_post_button ui-icon ui-icon-play' style='float:right' onclick=\"open_post('"+str(article[2])+"')\" title='查看'></span>"
li += "<span class='wiki_post_date'>"+str(article[4])+"</span>"
li += "<span class='wiki_post_author'>"+article[3].encode('utf-8')+"</span>"
return li+'</li>'
def load_js():
js = """
<script>
$(function() {
$( "#wiki_sortable" ).sortable();
$( "#wiki_sortable" ).disableSelection();
});
</script>"""
return js
|
[
"captainhcg@gmail.com"
] |
captainhcg@gmail.com
|
ee4c05679f2931e2aa4ede64a5b724ef7ce5e2eb
|
49086c6520cf6271205ae42eccf4c08e90211d5d
|
/Login/migrations/0003_auto_20201102_2002.py
|
cf0f48d235124b1bdcc9730e93efdf5993265f65
|
[] |
no_license
|
Prince294/Django-SignIn-SignUp-Page
|
f0310b9b7c37d7ed2bb1f6f9f093bf7cf7e4cd20
|
202af37b7fc096a3c46fca48c379e211f02b4eed
|
refs/heads/main
| 2023-01-03T14:01:53.721154
| 2020-11-07T11:36:10
| 2020-11-07T11:36:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 3.1.2 on 2020-11-02 14:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Login', '0002_auto_20201102_1806'),
]
operations = [
migrations.RenameField(
model_name='signup',
old_name='Username',
new_name='Uname',
),
]
|
[
"noreply@github.com"
] |
Prince294.noreply@github.com
|
2ab25f3de78fade911890ed86c4022a817459485
|
607c8e4ab9f3d319742ae2d495e3a3c1af696aee
|
/eval.py
|
9e2f04246eb227312aadc71fc2eef9a561785e99
|
[] |
no_license
|
zjf0822/TextCNN
|
36f3e8a3685e02f3fb1ee40849f0490883f576bd
|
be9028f287982bef2cd1823b4da57a8dc02e2d66
|
refs/heads/master
| 2020-03-16T02:20:06.837238
| 2018-05-07T13:14:33
| 2018-05-07T13:14:33
| 132,462,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,683
|
py
|
#! /usr/bin/env python
import csv
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
import data_helpers
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
y_test = np.argmax(y_test, axis=1)
else:
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = [1, 0]
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
|
[
"noreply@github.com"
] |
zjf0822.noreply@github.com
|
2a80b7f2ba95c08ccd1719551a91e6628d95b91e
|
d156560d96e3860fc291484f368757debb78871b
|
/sample_prediction.py
|
ffcfb6d91db58f1622fc6b4902ffc494dfed872d
|
[] |
no_license
|
timcallow/euro20_stochastic
|
4c139a9c140e2d05afa0fd69886400c6eaaf88a2
|
7824cbc05657cfe343a82d13d28e176336c85f9d
|
refs/heads/main
| 2023-05-15T23:56:07.549277
| 2021-06-11T11:30:43
| 2021-06-11T11:30:43
| 376,001,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
"""
Example input file for prediction of euro 2020 match
"""
import stochasticmodel
team1 = stochasticmodel.Team("Germany")
team2 = stochasticmodel.Team("France")
print(
"Match prediction: "
+ " ".join([team1.name, str(team1.score), "-", str(team2.score), team2.name])
)
|
[
"t.callow@hzdr.de"
] |
t.callow@hzdr.de
|
c3d0de22c121cb4e7a40f9ee7dbdefe55148a230
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-8204.py
|
c8284994a9d91b15a9ac7250620e44db2de5eea1
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,757
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:$Type, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
5285bbeb6719efe2617117dfc281492901dd9fec
|
46c3b9a26561e9a98c5bdcee781b800e2f09e006
|
/image_gen_old.py
|
f6e15f38b7ae30b164d48eabd6fa77c90670d230
|
[] |
no_license
|
Ridgebeck/CarND-Advanced-Lane-Lines
|
4a8e626702d87e986cf3288f47b4ec90c813deb1
|
6affabe265aeea2c44cead7a0a48db4d67e01535
|
refs/heads/master
| 2020-03-29T05:27:55.486692
| 2017-06-24T22:47:35
| 2017-06-24T22:47:35
| 94,654,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,712
|
py
|
import cv2
import numpy as np
import glob
import pickle
from tracker import tracker
import matplotlib.pyplot as plt
# read in the saved objpoints and imgpoints
dist_pickle = pickle.load(open("./calibration_file/calibration_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0,255)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
# apply threshold
binary_output[(scaled_sobel >= thresh[0])&(scaled_sobel <= thresh[1])] = 1
return binary_output
def mag_threshold(image, sobel_kernel=3, mag_thresh=(0,255)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.sqrt(sobelx**2 - sobely**2)
scale_factor = np.max(gradmag)*255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = np.zeros_like(gradmag)
# apply threshold
binary_output[(gradmag >= mag_thresh[0])&(gradmag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# calculate gradient direction
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
with np.errstate(divide='ignore', invalid='ignore'):
absgraddir = np.absolute(np.arctan(sobely/sobelx))
binary_output = np.zeros_like(absgraddir)
# apply threshold
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
def color_threshold(image, sthresh=(0,255), vthresh=(0,255)):
hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >=sthresh[0]) & (s_channel <= sthresh[1])] = 1
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v_channel = hsv[:,:,2]
v_binary = np.zeros_like(v_channel)
# apply threshold
v_binary[(v_channel >=sthresh[0]) & (v_channel <= sthresh[1])] = 1
output = np.zeros_like(s_channel)
output[(s_binary == 1) & (v_binary == 1)] = 1
return output
def window_mask(width, height, img_ref, center, level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height), max(0,int(center-width)):min(int(center+width), img_ref.shape[1])] = 1
return output
# make a list of test images
images = glob.glob('./test_images/test*.jpg')
for idx, fname in enumerate(images):
# read and undistort image
img = cv2.imread(fname)
img = cv2.undistort(img, mtx, dist, None, mtx)
# process image and generate binary pixels of interest
preprocessImage = np.zeros_like(img[:,:,0])
gradx = abs_sobel_thresh(img, orient='x', thresh=(25,255)) #12
grady = abs_sobel_thresh(img, orient='y', thresh=(35,255)) #25
c_binary = color_threshold(img, sthresh=(100,255), vthresh=(50,255))
preprocessImage[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255
#plt.imshow(preprocessImage, cmap = plt.get_cmap('gray'))
#plt.show()
# work on defining perspective transformation area
img_size = (img.shape[1], img.shape[0])
bot_width = .76 # percent of bottom trapezoid height 0.76
mid_width = .08 # percent of middle trapezoid height 0.08
height_pct = .62 # percent of trapezoid height 0.62
bottom_trim = .92 # percent from top to bottom to avoid car hood 0.935
src = np.float32([[img_size[0]*(.5-mid_width/2), img_size[1]*height_pct], [img_size[0]*(.5+mid_width/2), img_size[1]*height_pct], [img_size[0]*(.5+bot_width/2), img_size[1]*bottom_trim], [img_size[0]*(.5-bot_width/2), img_size[1]*bottom_trim]])
offset = img_size[0]*.25
dst = np.float32([[offset, 0], [img_size[0]-offset, 0], [img_size[0]-offset, img_size[1]], [offset, img_size[1]]])
#src = np.float32([[281,659], [598,446], [681,446], [1027,659]])
#dst = np.float32([[381,659], [381,0], [927,0], [927,659]])
# perform the transform
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
#warped = cv2.warpPerspective(preprocessImage, M, img_size, flags=cv2.INTER_LINEAR)
binary_warped = cv2.warpPerspective(preprocessImage, M, img_size, flags=cv2.INTER_LINEAR)
plt.imshow(binary_warped, cmap = plt.get_cmap('gray'))
plt.show()
#histogram = np.sum(warped[warped.shape[0]//2:,:], axis=0)
#plt.plot(histogram)
#plt.show()
# INSERTED HERE:
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 100 #50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
"""
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
#cv2.polylines(window_img, example, isClosed=False, color=(0,255,0), thickness=5, lineType=8, shift=0)
warped_color = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(warped_color)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
"""
line_marker_width = 20
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-line_marker_width, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+line_marker_width, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-line_marker_width, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+line_marker_width, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
lane_poly_pts = np.hstack((left_line_window2, right_line_window1))
marker_image = np.zeros_like(np.dstack((binary_warped, binary_warped, binary_warped))*255)
cv2.fillPoly(marker_image, np.int_([left_line_pts]), (255,0, 0))
cv2.fillPoly(marker_image, np.int_([right_line_pts]), (0,0, 255))
cv2.fillPoly(marker_image, np.int_([lane_poly_pts]), (0,120, 0))
# Calculate line in the middle
bottom_center = left_fitx[-1] + (right_fitx[-1] - left_fitx[-1])/2
lower_third = left_fitx[len(left_fitx)/3] + (right_fitx[len(right_fitx)/3] - left_fitx[len(left_fitx)/3])/2
upper_third = left_fitx[2*len(left_fitx)/3] + (right_fitx[2*len(right_fitx)/3] - left_fitx[2*len(left_fitx)/3])/2
top_center = left_fitx[0] + (right_fitx[0] - left_fitx[0])/2
center_pts_x = [bottom_center, lower_third, upper_third, top_center]
center_pts_y = [ploty[-1], ploty[len(ploty)/3], ploty[2*len(ploty)/3], ploty[0]]
middle_fit = np.polyfit(center_pts_y, center_pts_x, 2)
middle_fitx = middle_fit[0]*ploty**2 + middle_fit[1]*ploty + middle_fit[2]
middle_points = np.array([np.transpose(np.vstack([middle_fitx, ploty]))])
cv2.polylines(marker_image, np.int_([middle_points]), isClosed=False, color=(255,255,255), thickness=5, lineType=8, shift=0)
plt.imshow(marker_image)
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
# Right or left turn
if middle_fitx[0] > middle_fitx[-1]:
direction = "right"
else:
direction = "left"
# Calculate curve radius
y_eval = np.max(ploty)-200
middle_curverad = ((1 + (2*middle_fit[0]*y_eval + middle_fit[1])**2)**1.5) / np.absolute(2*middle_fit[0])
print (direction)
print(middle_curverad)
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/500 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
middle_fit_real_world = np.polyfit(ploty*ym_per_pix, middle_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
middle_curverad_m = ((1 + (2*middle_fit_real_world[0]*y_eval*ym_per_pix + middle_fit_real_world[1])**2)**1.5) / np.absolute(2*middle_fit_real_world[0])
print(middle_curverad_m, 'm')
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(marker_image, Minv, (img_size[0], img_size[1]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.8, 0)
cv2.putText(result, direction + ', radius = ' + str(round(middle_curverad_m, 1)) + '(m)', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
"""
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
print(left_curverad, 'm', right_curverad, 'm')
"""
"""
# set up the overall class to do tracking
window_width = 25
window_height = 80
curve_centers = tracker(Mywindow_width = window_width, Mywindow_height = window_height, Mymargin = 25, My_ym = 10/720, My_xm = 4/384, Mysmooth_factor = 15)
window_centroids = curve_centers.find_window_centroids(binary_warped)
# points used to draw all the left and right windows
l_points = np.zeros_like(binary_warped)
r_points = np.zeros_like(binary_warped)
# points used to find the left and right lanes
rightx = []
leftx = []
# go through each level and draw windows
for level in range(0, len(window_centroids)):
# window_mask is a function to draw window areas
l_mask = window_mask(window_width, window_height, binary_warped, window_centroids[level][0], level)
r_mask = window_mask(window_width, window_height, binary_warped, window_centroids[level][1], level)
# add center value found in frame to the list of lane points per left, right
leftx.append(window_centroids[level][0])
rightx.append(window_centroids[level][1])
# add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1))] = 255
r_points[(r_points == 255) | ((r_mask == 1))] = 255
# draw the results
template = np.array(r_points + l_points, np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(template) # create a zero color channel
template = np.array(cv2.merge((zero_channel, template, zero_channel)), np.uint8) # make window pixels green
warpage = np.array(cv2.merge((binary_warped, binary_warped, binary_warped)), np.uint8) # making the original road pixels 3 color channels
result = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the original road image with window results
plt.imshow(result, cmap = plt.get_cmap('gray'))
plt.show()
# fit the lane boundaries to the left, right center position found
yvals = range(0, binary_warped.shape[0])
res_yvals = np.arange(binary_warped.shape[0] - (window_height/2), 0, -window_height)
left_fit = np.polyfit(res_yvals, leftx, 2)
left_fitx = left_fit[0] * yvals * yvals + left_fit[1] * yvals + left_fit[2]
left_fitx = np.array(left_fitx, np.int32)
right_fit = np.polyfit(res_yvals, rightx, 2)
right_fitx = right_fit[0] * yvals * yvals + right_fit[1] * yvals + right_fit[2]
right_fitx = np.array(right_fitx, np.int32)
left_lane = np.array(list(zip(np.concatenate((left_fitx - window_width/2, left_fitx[::-1] + window_width/2), axis=0), np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
right_lane = np.array(list(zip(np.concatenate((right_fitx - window_width/2, right_fitx[::-1] + window_width/2), axis=0), np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
middle_marker = np.array(list(zip(np.concatenate((right_fitx - window_width/2, right_fitx[::-1] + window_width/2), axis=0), np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
road = np.zeros_like(img)
road_bkg = np.zeros_like(img)
cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])
road_warped = cv2.warpPerspective(road, Minv, img_size, flags=cv2.INTER_LINEAR)
road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)
#result = road
base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
result = cv2.addWeighted(base, 1.0, road_warped, 1.0, 0.0)
# Grab values from curve centers
xm_per_pix = curve_centers.xm_per_pix
ym_per_pix = curve_centers.ym_per_pix
curve_fit_cr = np.polyfit(np.array(res_yvals, np.float32) * ym_per_pix, np.array(leftx, np.float32) * xm_per_pix, 2)
curverad = ((1 + (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix + curve_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * curve_fit_cr[0])
# calculate the offset of the car on the road
camera_center = (left_fitx[-1] + right_fitx[-1])/2
center_diff = (camera_center - binary_warped.shape[1]/2) * xm_per_pix
side_pos = 'left'
if center_diff <= 0:
side_pos = 'right'
# draw the text showing curvature, offset, and speed
cv2.putText(result, 'Radius of Curvature = ' + str(round(curverad, 3)) + '(m)', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(result, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' + side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
"""
write_name = './output_images/tracked' + str(idx) + '.jpg'
cv2.imwrite(write_name, result)
|
[
"beck.niklas@gmail.com"
] |
beck.niklas@gmail.com
|
a7c2ff64776197033f6935ebd084216784ca1b4f
|
1521d32e3a2747054eea03df3195ca0fd52cfe71
|
/src/python/zensols/garmdown/__init__.py
|
1ed5dd6e2c05b14c7469f2b9a750d47327855ca1
|
[
"MIT"
] |
permissive
|
garmin-data/garmdown
|
533c525512914b97cbf42a919d670feb59c3269a
|
42509ddcc11bd7469e3a80d648fabd155657a074
|
refs/heads/master
| 2023-07-05T09:07:36.476348
| 2022-02-28T19:36:01
| 2022-02-28T19:36:01
| 191,933,069
| 15
| 6
|
MIT
| 2023-02-15T23:23:44
| 2019-06-14T11:37:45
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
from .domain import *
from .fetcher import *
from .persist import Persister
from .sheets import SheetUpdater
from .backup import *
from .reporter import *
from .mng import *
from .cli import *
from .app import *
|
[
"landes@mailc.net"
] |
landes@mailc.net
|
061203e19c2b6a03466c45a6e838e2ec9f703796
|
bfd75153048a243b763614cf01f29f5c43f7e8c9
|
/1906101064-王楠岚/day0219/作业1.py
|
05f083c6ae754833fa314cc0bdb8dce741c5bc51
|
[] |
no_license
|
gschen/sctu-ds-2020
|
d2c75c78f620c9246d35df262529aa4258ef5787
|
e1fd0226b856537ec653c468c0fbfc46f43980bf
|
refs/heads/master
| 2021-01-01T11:06:06.170475
| 2020-07-16T03:12:13
| 2020-07-16T03:12:13
| 239,245,834
| 17
| 10
| null | 2020-04-18T13:46:24
| 2020-02-09T04:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
#第一题
n=int(input())
m=[1,10,20,30,40,50]
def nian(n):
if n == 0:
return 1
else:
return n*nian(n-1)
if n in m:
print("请重新输入")
else:
print(nian(n))
#第二题
P = int(input())
R = int(input())
T = int(input())
S = (P*R*T)/100
print("输出%d"%(S))
#第三题
print(max([14,25,98,75,23,1,4,56,59]))
#第四题
list=[14,25,98,75,23,1,4,56,59]
n=int(input())
def nian(m):
return m*m
print(sum(map(nian,list[:n])))
#第五题
a,b=map(int,input().split())
nums=[14,25,98,75,23,1,4,56,59]
num_a,num_b=nums[a],nums[b]
nums[b],nums[a]=num_a,num_b
print(nums)
|
[
"1146964788@qq.com"
] |
1146964788@qq.com
|
7aea77e8b547222523f4bce0c652a23203641427
|
1ca2e4ec519e3808b68c8b0e4af4986fa92e659d
|
/controllers/UserController.py
|
8e44cee3d29178fa07fcb0307b723b1adacccf73
|
[] |
no_license
|
AymericPost/demo-flaskachemy
|
1719b7d9329ac0e3cacf4bfd0531a5cb4bfbc393
|
1892bcc9f75e0eca1818d90b504157b3f7449f25
|
refs/heads/master
| 2023-04-22T04:50:45.744343
| 2021-04-27T14:28:38
| 2021-04-27T14:28:38
| 352,602,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
from flask import Blueprint, jsonify, request
from models import db
from models.User import User
user_blueprint = Blueprint("users", __name__)
@user_blueprint.route("", methods = ["POST"])
def create_user():
body = request.json
if("name" in body and "email" in body):
newUser = User()
newUser.name = body["name"]
newUser.email = body["email"]
if("address_id" in body):
newUser.address_id = body["address_id"]
try:
db.session.add(newUser)
db.session.commit()
return jsonify( newUser.toJson() )
except:
return jsonify({"error": "BAD_REQUEST", "message": "email already exists"}), 400
else:
return jsonify({"error": "BAD_REQUEST", "message": "name and/or email missing"}), 400
@user_blueprint.route("", methods = ["GET"])
def all_users():
resp = User.query.all()
for i in range( len(resp) ):
resp[i] = resp[i].toJson()
return jsonify(resp)
@user_blueprint.route("/<userId>", methods = ["GET"])
def get_user_by_id(userId):
resp = User.query.get(userId)
if(resp == None):
return jsonify({"error": "NOT_FOUND", "message": "No entity found"}), 404
else:
return jsonify( resp.toJson() )
@user_blueprint.route("/search", methods = ["GET"])
def get_user_by_name():
term = request.args["q"]
resp = User.query.filter_by(name = term).all()
for i in range( len(resp) ):
resp[i] = resp[i].toJson()
return jsonify(resp)
@user_blueprint.route("", methods = ["PUT"])
def update_user():
body = request.json
if("id" not in body):
return jsonify({"error": "BAD_REQUEST", "message": "no id in request"}), 400
updating = User.query.get(body["id"])
if("name" in body):
updating.name = body["name"]
if("email" in body):
updating.email = body["email"]
if("address_id" in body):
newUser.address_id = body["address_id"]
try:
db.session.commit()
except:
return jsonify({"error": "BAD_REQUEST", "message": "email already exists"}), 400
return jsonify( updating.toJson() )
@user_blueprint.route("/<userId>", methods = ["DELETE"])
def del_user(userId):
usr = User.query.get(userId)
if( usr == None ):
return jsonify({"error": "NOT_FOUND", "message": "No entity found"}), 404
else:
db.session.delete(usr)
db.session.commit()
return jsonify({"status": "ACCEPTED", "message": "Entity deleted"}), 202
|
[
"aymeric.post@gmail.com"
] |
aymeric.post@gmail.com
|
069536def38021019ca99e507eb463eb7a4ccd5b
|
a0d429127686bc520d6154846d003dc5e3c7a17c
|
/first/migrations/0006_auto_20210731_1614.py
|
80b18cc645860df8b01d67862f59832686c2e4f6
|
[] |
no_license
|
sailorchen/tenclass_bg
|
742ad8749d0f1c5d1a6d9bd1a0d773c552cedc63
|
823c9ab0c5a4ba0faeb6b5106ee0bbfab12d7e90
|
refs/heads/master
| 2023-07-14T10:42:05.686795
| 2021-08-22T08:09:32
| 2021-08-22T08:09:32
| 387,149,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
# Generated by Django 3.1.4 on 2021-07-31 08:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first', '0005_menu_submenu'),
]
operations = [
migrations.RenameField(
model_name='menu',
old_name='menu_name',
new_name='menuName',
),
migrations.AlterField(
model_name='submenu',
name='menu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submenu', to='first.menu', verbose_name='父菜单id'),
),
]
|
[
"857893632@qq.com"
] |
857893632@qq.com
|
d22f9e180410bcb47f4308eb442280a1d6958408
|
b3197b795911a2ebdd3308f39d0e7be4b4626a44
|
/homework.4/4.task1.py
|
a81fcfb717c8ca4018adb4ef5c82f3125501d029
|
[] |
no_license
|
VPetrashchu/python-course
|
9e2af9582f1600201c6f28681ead7426501a82b6
|
d188c3f42f7fd70aad1535e0141e7ff5fddd1d8f
|
refs/heads/master
| 2023-02-23T09:41:56.079047
| 2021-01-31T20:12:08
| 2021-01-31T20:12:08
| 317,589,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
number = int(input('Enter number: '))
fact = 1
for i in range(1, number + 1):
fact = fact * i
print('Factorial is: {}'.format(fact))
# while ( number > 0):
# fact = fact * number
# number = number - 1
# print('Factorial is: {}'.format(fact))
|
[
"="
] |
=
|
e3b3a30c72a6c0950086f5a17a603edcf26bcc29
|
a702761341f4210f1235642c0d07e2e3f0fc83bf
|
/setup.py
|
f40e4f953292c42f722d37d57905c7166566f809
|
[
"MIT"
] |
permissive
|
WHU-SoftwareSecurity/final
|
5c93d7436a5e528ed9c559bd7281e41e648b1adc
|
cdb703c274bc62ca7b091495d673e64c3d730f8a
|
refs/heads/main
| 2023-02-05T09:53:20.067946
| 2020-12-31T01:33:37
| 2020-12-31T01:33:37
| 324,720,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from setuptools import setup, find_packages
setup(
name='src',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
cli=src.cli:cli
''',
)
|
[
"690364065@qq.com"
] |
690364065@qq.com
|
5e8efd9eb59f40d86c42c63a6d9310545e0a1134
|
51f2492a5c207e3664de8f6b2d54bb93e313ca63
|
/atcoder/abc102/b.py
|
4be8dec8f9d480d7b0af81ef662a21f1f1ef5c4f
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abeaumont/competitive-programming
|
23c5aabd587d7bb15a61efd3428838cb934233dd
|
a24c9b89941a59d344b51dc1010de66522b1a0dd
|
refs/heads/master
| 2023-09-01T09:50:58.267361
| 2023-07-31T18:00:10
| 2023-07-31T18:00:10
| 117,589,708
| 618
| 262
|
WTFPL
| 2023-07-12T17:36:20
| 2018-01-15T20:00:56
|
C++
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
#!/usr/bin/env python3
# https://abc102.contest.atcoder.jp/tasks/abc102_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
print(a[-1] - a[0])
|
[
"alfredo.beaumont@gmail.com"
] |
alfredo.beaumont@gmail.com
|
215d0564daeceb18cdbfe7df3305df4cf9aaddc4
|
ddea930392ac5360b21e9043b620e703a9ccb31c
|
/tfx/components/example_gen/csv_example_gen/component.py
|
e98fab352364bc59a5a175075c9b90dce53af5c7
|
[
"Apache-2.0"
] |
permissive
|
Ark-kun/tfx
|
9c82b688776c80b2435bbb6154476526e8525ec8
|
f685f0387bd145316f43ceb484e64f893e749dcb
|
refs/heads/master
| 2021-07-25T05:58:15.168607
| 2020-05-22T01:07:44
| 2020-05-22T01:08:18
| 180,868,735
| 0
| 0
|
Apache-2.0
| 2019-04-11T20:01:57
| 2019-04-11T20:01:57
| null |
UTF-8
|
Python
| false
| false
| 3,690
|
py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX CsvExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.base import executor_spec
from tfx.components.example_gen import component
from tfx.components.example_gen.csv_example_gen import executor
from tfx.proto import example_gen_pb2
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX CsvExampleGen component.
The csv examplegen component takes csv data, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
input: types.Channel = None, # pylint: disable=redefined-builtin
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
input_base: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing csv
files (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split. If any field is provided as a RuntimeParameter,
input_config should be constructed as a dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as a RuntimeParameter,
output_config should be constructed as a dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
input_base: Backwards compatibility alias for the 'input' argument.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
CsvExampleGen component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
"""
super(CsvExampleGen, self).__init__(
input=input,
input_config=input_config,
output_config=output_config,
example_artifacts=example_artifacts,
input_base=input_base,
instance_name=instance_name,
enable_cache=enable_cache)
|
[
"tensorflow-extended-team@google.com"
] |
tensorflow-extended-team@google.com
|
7788d6d2554c64b729e9701d0fe4596e17cccfe8
|
5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4
|
/__competitions/2014/11_03_w12/01.py
|
d50b3c914cc79f47bca1e6cd9529281c8b5f817c
|
[] |
no_license
|
salvador-dali/algorithms_general
|
04950bd823fc354adc58a4f23b7d2f3d39664798
|
aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d
|
refs/heads/master
| 2020-12-14T06:24:10.466601
| 2016-07-17T06:00:17
| 2016-07-17T06:00:17
| 47,397,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
# https://www.hackerrank.com/contests/w12/challenges/priyanka-and-toys
# sort numbers and use greedy solution to find which one overlap
def toBuy(arr):
arr.sort()
num, maxPrice = 0, -1
for i in arr:
if i > maxPrice:
num += 1
maxPrice = i + 4
return num
input()
print toBuy(list(map(int, raw_input().split())))
|
[
"dmytro@knowlabs.com"
] |
dmytro@knowlabs.com
|
2b243345de7291445fa4bed95f95e76b7dfd2db2
|
41d9b307fdd476ec95677d3414c3ac63775847b7
|
/Ejercicios para practicar OOP/juego_taller WIP.py
|
cb6701b232466a249c5100f79cddde6b15f2d32c
|
[] |
no_license
|
NickATC/Taller_Python_OOP_2018
|
ccf4970f88e73cc5458c1c773469808f75e5cf8e
|
78707abfbcfd4e230bbd672f633fdcb609502633
|
refs/heads/master
| 2021-07-20T23:18:53.317986
| 2021-06-08T00:32:05
| 2021-06-08T00:32:05
| 120,635,969
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,039
|
py
|
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msg
import random
NOMBRES_MONSTRUOS = ["Mistich", "Mutt", "Guttalon", "Cinder", "Hagger", "Dusty"]
NOMBRES_HEROE = ["Blaster", "Tracer", "Tim"]
ARMAS = ["una espada", "un hacha", "una varita mágica"]
class SerViviente:
"""Clase para definir tanto héroes como mónstruos en nuestro juego"""
def __init__(self, tipo, nombre, fuerza, arma):
self.tipo = tipo
self.nombre = nombre
self.fuerza = fuerza
self.arma = arma
self.describir_ser_viviente()
def estado_ser_viviente(self):
print("El {} ahora tiene un nivel de fuerza de {}".format(self.tipo, self.fuerza))
if self.fuerza == 0:
print("El {} ha muerto!!!!".format(self.tipo))
def describir_ser_viviente(self):
print("Este es un {} llamado {}. Tiene fuerza nivel {} y posee {}.".format(self.tipo, self.nombre, self.fuerza, self.arma))
class Monster(SerViviente):
"""Sub clase para redefinir al monstruo"""
def atacar(self):
print("El mónstruo te ataca")
heroe1.fuerza = heroe1.fuerza -1
heroe1.estado_ser_viviente()
def defender(self):
print("El mónstruo se defiende de tu ataque")
chance = random.randint(1,3)
if chance is 2:
self.atacar()
def huir(self):
print("El mónstruo huye ante tu presencia!")
class Hero(SerViviente):
"""Sub clase para redefinir al heroe"""
def atacar(self):
print("Has decidido atacar al mónstruo!")
monster1.fuerza = monster1.fuerza -1
label_monster_fuerza.config(text = f"Fuerza {monster1.fuerza}")
monster1.estado_ser_viviente()
def defender(self):
print("Te defiendes del ataque del monstruo")
def huir(self):
print("Decides huir... COBARDE!!!")
monster1 = Monster("mónstruo", random.choice(NOMBRES_MONSTRUOS), random.randint(1, 10), random.choice(ARMAS))
heroe1 = Hero("héroe", random.choice(NOMBRES_HEROE), random.randint(1, 10), random.choice(ARMAS))
#####################################
###### Diseño de GUI inicia acá
#####################################
window = tk.Tk()
window.title("Monsters vs Heroes")
window.geometry("500x400")
window.resizable(False, False)
#Label-Frame for the monster specs:
label_frame1 = ttk.LabelFrame(window, text = "Your monster specs:")
label_frame1.grid(column = 0, row = 1, padx = 20, pady = 5)
label_monster_name = ttk.Label(label_frame1, text = f"Nombre: {monster1.nombre}")
label_monster_name.grid(column = 0, row = 0, pady = 20)
label_monster_fuerza = ttk.Label(label_frame1, text = f"Fuerza {monster1.fuerza}")
label_monster_fuerza.grid(column = 0, row = 1, pady = 20)
label_monster_arma = ttk.Label(label_frame1, text = f"Arma: {monster1.arma.capitalize()}")
label_monster_arma.grid(column = 0, row = 2, pady = 20)
#Label-Frame for the heroe specs:
label_frame2 = ttk.LabelFrame(window, text = "Your heroe specs:")
label_frame2.grid(column = 1, row = 1, padx = 20, pady = 5)
label_heroe_name = ttk.Label(label_frame2, text = f"Nombre: {heroe1.nombre}")
label_heroe_name.grid(column = 0, row = 0, pady = 20)
label_heroe_fuerza = ttk.Label(label_frame2, text = f"Fuerza: {heroe1.fuerza}")
label_heroe_fuerza.grid(column = 0, row = 1, pady = 20)
label_heroe_arma = ttk.Label(label_frame2, text = f"Arma: {heroe1.arma}")
label_heroe_arma.grid(column = 0, row = 2, pady = 20)
# Action buttons:
attack_button = ttk.Button(window, text = "Atacar!!", command = heroe1.atacar)
attack_button.grid(column = 0, row = 10)
defender_button = ttk.Button(window, text = "Defenderse!!")
defender_button.grid(column = 1, row = 10)
huir_button = ttk.Button(window, text = "Huir, cobarde!!")
huir_button.grid(column = 2, row = 10)
window.mainloop()
|
[
"noreply@github.com"
] |
NickATC.noreply@github.com
|
4e6032878370ac367ce8229d77078e44cd9327d3
|
97c93aa87a54686daddc988066a8ad4bb0b17e70
|
/class97Program3.py
|
98603d6ecc7883c2a8555d9fb87592430770b2ba
|
[] |
no_license
|
hitarth-pixel/class97
|
92fa23cb3ac189c7b0f3f13b9e09457cf4d43148
|
8886bd84df5ff28c990c0d9972794ccd98fdaaeb
|
refs/heads/main
| 2023-08-20T10:16:38.876034
| 2021-10-25T11:27:17
| 2021-10-25T11:27:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
no=int(input("enter your no."))
if(no%2==0):
print("the no. is even")
else:
print("the no. is odd")
|
[
"noreply@github.com"
] |
hitarth-pixel.noreply@github.com
|
6d75f464bfc5dab8974a58bc1fb72ee468f050c7
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-cts/huaweicloudsdkcts/v3/model/update_tracker_request_body.py
|
86a0e288fb2da06e6d914be15bba419009029213
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 11,674
|
py
|
# coding: utf-8
import pprint
import re
import six
class UpdateTrackerRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tracker_type': 'str',
'tracker_name': 'str',
'status': 'str',
'is_lts_enabled': 'bool',
'obs_info': 'TrackerObsInfo',
'is_support_trace_files_encryption': 'bool',
'kms_id': 'str',
'is_support_validate': 'bool',
'data_bucket': 'DataBucket'
}
attribute_map = {
'tracker_type': 'tracker_type',
'tracker_name': 'tracker_name',
'status': 'status',
'is_lts_enabled': 'is_lts_enabled',
'obs_info': 'obs_info',
'is_support_trace_files_encryption': 'is_support_trace_files_encryption',
'kms_id': 'kms_id',
'is_support_validate': 'is_support_validate',
'data_bucket': 'data_bucket'
}
def __init__(self, tracker_type=None, tracker_name=None, status=None, is_lts_enabled=None, obs_info=None, is_support_trace_files_encryption=None, kms_id=None, is_support_validate=None, data_bucket=None):
"""UpdateTrackerRequestBody - a model defined in huaweicloud sdk"""
self._tracker_type = None
self._tracker_name = None
self._status = None
self._is_lts_enabled = None
self._obs_info = None
self._is_support_trace_files_encryption = None
self._kms_id = None
self._is_support_validate = None
self._data_bucket = None
self.discriminator = None
self.tracker_type = tracker_type
self.tracker_name = tracker_name
if status is not None:
self.status = status
if is_lts_enabled is not None:
self.is_lts_enabled = is_lts_enabled
if obs_info is not None:
self.obs_info = obs_info
if is_support_trace_files_encryption is not None:
self.is_support_trace_files_encryption = is_support_trace_files_encryption
if kms_id is not None:
self.kms_id = kms_id
if is_support_validate is not None:
self.is_support_validate = is_support_validate
if data_bucket is not None:
self.data_bucket = data_bucket
@property
def tracker_type(self):
"""Gets the tracker_type of this UpdateTrackerRequestBody.
标识追踪器类型。 目前支持系统追踪器类型有管理类追踪器(system)和数据类追踪器(data)。 数据类追踪器和管理类追踪器共同参数有:is_lts_enabled, obs_info; 管理类追踪器参数:is_support_trace_files_encryption, kms_id, is_support_validate, is_support_validate; 数据类追踪器参数:tracker_name, data_bucket。
:return: The tracker_type of this UpdateTrackerRequestBody.
:rtype: str
"""
return self._tracker_type
@tracker_type.setter
def tracker_type(self, tracker_type):
"""Sets the tracker_type of this UpdateTrackerRequestBody.
标识追踪器类型。 目前支持系统追踪器类型有管理类追踪器(system)和数据类追踪器(data)。 数据类追踪器和管理类追踪器共同参数有:is_lts_enabled, obs_info; 管理类追踪器参数:is_support_trace_files_encryption, kms_id, is_support_validate, is_support_validate; 数据类追踪器参数:tracker_name, data_bucket。
:param tracker_type: The tracker_type of this UpdateTrackerRequestBody.
:type: str
"""
self._tracker_type = tracker_type
@property
def tracker_name(self):
"""Gets the tracker_name of this UpdateTrackerRequestBody.
标识追踪器名称。 当\"tracker_type\"参数值为\"system\"时该参数为默认值\"system\"。 当\"tracker_type\"参数值为\"data\"时该参数需要指定追踪器名称\"。
:return: The tracker_name of this UpdateTrackerRequestBody.
:rtype: str
"""
return self._tracker_name
@tracker_name.setter
def tracker_name(self, tracker_name):
"""Sets the tracker_name of this UpdateTrackerRequestBody.
标识追踪器名称。 当\"tracker_type\"参数值为\"system\"时该参数为默认值\"system\"。 当\"tracker_type\"参数值为\"data\"时该参数需要指定追踪器名称\"。
:param tracker_name: The tracker_name of this UpdateTrackerRequestBody.
:type: str
"""
self._tracker_name = tracker_name
@property
def status(self):
"""Gets the status of this UpdateTrackerRequestBody.
标识追踪器状态,该接口中可修改的状态包括正常(enabled)和停止(disabled)。如果选择修改状态为停止,则修改成功后追踪器停止记录事件。
:return: The status of this UpdateTrackerRequestBody.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this UpdateTrackerRequestBody.
标识追踪器状态,该接口中可修改的状态包括正常(enabled)和停止(disabled)。如果选择修改状态为停止,则修改成功后追踪器停止记录事件。
:param status: The status of this UpdateTrackerRequestBody.
:type: str
"""
self._status = status
@property
def is_lts_enabled(self):
"""Gets the is_lts_enabled of this UpdateTrackerRequestBody.
是否打开事件分析。
:return: The is_lts_enabled of this UpdateTrackerRequestBody.
:rtype: bool
"""
return self._is_lts_enabled
@is_lts_enabled.setter
def is_lts_enabled(self, is_lts_enabled):
"""Sets the is_lts_enabled of this UpdateTrackerRequestBody.
是否打开事件分析。
:param is_lts_enabled: The is_lts_enabled of this UpdateTrackerRequestBody.
:type: bool
"""
self._is_lts_enabled = is_lts_enabled
@property
def obs_info(self):
"""Gets the obs_info of this UpdateTrackerRequestBody.
:return: The obs_info of this UpdateTrackerRequestBody.
:rtype: TrackerObsInfo
"""
return self._obs_info
@obs_info.setter
def obs_info(self, obs_info):
"""Sets the obs_info of this UpdateTrackerRequestBody.
:param obs_info: The obs_info of this UpdateTrackerRequestBody.
:type: TrackerObsInfo
"""
self._obs_info = obs_info
@property
def is_support_trace_files_encryption(self):
"""Gets the is_support_trace_files_encryption of this UpdateTrackerRequestBody.
事件文件转储加密功能开关。 当\"tracker_type\"参数值为\"system\"时该参数值有效。 该参数必须与kms_id参数同时使用。
:return: The is_support_trace_files_encryption of this UpdateTrackerRequestBody.
:rtype: bool
"""
return self._is_support_trace_files_encryption
@is_support_trace_files_encryption.setter
def is_support_trace_files_encryption(self, is_support_trace_files_encryption):
"""Sets the is_support_trace_files_encryption of this UpdateTrackerRequestBody.
事件文件转储加密功能开关。 当\"tracker_type\"参数值为\"system\"时该参数值有效。 该参数必须与kms_id参数同时使用。
:param is_support_trace_files_encryption: The is_support_trace_files_encryption of this UpdateTrackerRequestBody.
:type: bool
"""
self._is_support_trace_files_encryption = is_support_trace_files_encryption
@property
def kms_id(self):
"""Gets the kms_id of this UpdateTrackerRequestBody.
事件文件转储加密所采用的秘钥id(从KMS获取)。 当\"tracker_type\"参数值为\"system\"时该参数值有效。 当\"is_support_trace_files_encryption\"参数值为“是”时,此参数为必选项。
:return: The kms_id of this UpdateTrackerRequestBody.
:rtype: str
"""
return self._kms_id
@kms_id.setter
def kms_id(self, kms_id):
"""Sets the kms_id of this UpdateTrackerRequestBody.
事件文件转储加密所采用的秘钥id(从KMS获取)。 当\"tracker_type\"参数值为\"system\"时该参数值有效。 当\"is_support_trace_files_encryption\"参数值为“是”时,此参数为必选项。
:param kms_id: The kms_id of this UpdateTrackerRequestBody.
:type: str
"""
self._kms_id = kms_id
@property
def is_support_validate(self):
"""Gets the is_support_validate of this UpdateTrackerRequestBody.
事件文件转储时是否打开事件文件校验。 当\"tracker_type\"参数值为\"system\"时该参数值有效。
:return: The is_support_validate of this UpdateTrackerRequestBody.
:rtype: bool
"""
return self._is_support_validate
@is_support_validate.setter
def is_support_validate(self, is_support_validate):
"""Sets the is_support_validate of this UpdateTrackerRequestBody.
事件文件转储时是否打开事件文件校验。 当\"tracker_type\"参数值为\"system\"时该参数值有效。
:param is_support_validate: The is_support_validate of this UpdateTrackerRequestBody.
:type: bool
"""
self._is_support_validate = is_support_validate
@property
def data_bucket(self):
"""Gets the data_bucket of this UpdateTrackerRequestBody.
:return: The data_bucket of this UpdateTrackerRequestBody.
:rtype: DataBucket
"""
return self._data_bucket
@data_bucket.setter
def data_bucket(self, data_bucket):
"""Sets the data_bucket of this UpdateTrackerRequestBody.
:param data_bucket: The data_bucket of this UpdateTrackerRequestBody.
:type: DataBucket
"""
self._data_bucket = data_bucket
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateTrackerRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
f5fcdcfd0b66da3bd565862fc21ebb29e7b65e06
|
474ca3fbc2b3513d92ed9531a9a99a2248ec7f63
|
/ThirdParty/boost_1_63_0/tools/build/test/BoostBuild.py
|
9ff7818a20a18c81d89c5ea1516951c1e7991621
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0"
] |
permissive
|
LazyPlanet/MX-Architecture
|
17b7b2e6c730409b22b7f38633e7b1f16359d250
|
732a867a5db3ba0c716752bffaeb675ebdc13a60
|
refs/heads/master
| 2020-12-30T15:41:18.664826
| 2018-03-02T00:59:12
| 2018-03-02T00:59:12
| 91,156,170
| 4
| 0
| null | 2018-02-04T03:29:46
| 2017-05-13T07:05:52
|
C++
|
UTF-8
|
Python
| false
| false
| 53,282
|
py
|
# Copyright 2002-2005 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import TestCmd
import copy
import fnmatch
import glob
import math
import os
import os.path
import re
import shutil
import StringIO
import subprocess
import sys
import tempfile
import time
import traceback
import tree
import types
from xml.sax.saxutils import escape
class TestEnvironmentError(Exception):
pass
annotations = []
def print_annotation(name, value, xml):
"""Writes some named bits of information about the current test run."""
if xml:
print escape(name) + " {{{"
print escape(value)
print "}}}"
else:
print name + " {{{"
print value
print "}}}"
def flush_annotations(xml=0):
global annotations
for ann in annotations:
print_annotation(ann[0], ann[1], xml)
annotations = []
def clear_annotations():
global annotations
annotations = []
defer_annotations = 0
def set_defer_annotations(n):
global defer_annotations
defer_annotations = n
def annotate_stack_trace(tb=None):
if tb:
trace = TestCmd.caller(traceback.extract_tb(tb), 0)
else:
trace = TestCmd.caller(traceback.extract_stack(), 1)
annotation("stacktrace", trace)
def annotation(name, value):
"""Records an annotation about the test run."""
annotations.append((name, value))
if not defer_annotations:
flush_annotations()
def get_toolset():
toolset = None
for arg in sys.argv[1:]:
if not arg.startswith("-"):
toolset = arg
return toolset or "gcc"
# Detect the host OS.
cygwin = hasattr(os, "uname") and os.uname()[0].lower().startswith("cygwin")
windows = cygwin or os.environ.get("OS", "").lower().startswith("windows")
def prepare_prefixes_and_suffixes(toolset):
prepare_suffix_map(toolset)
prepare_library_prefix(toolset)
def prepare_suffix_map(toolset):
"""
Set up suffix translation performed by the Boost Build testing framework
to accomodate different toolsets generating targets of the same type using
different filename extensions (suffixes).
"""
global suffixes
suffixes = {}
if windows:
if toolset == "gcc":
suffixes[".lib"] = ".a" # mingw static libs use suffix ".a".
suffixes[".obj"] = ".o"
if cygwin:
suffixes[".implib"] = ".lib.a"
else:
suffixes[".implib"] = ".lib"
else:
suffixes[".exe"] = ""
suffixes[".dll"] = ".so"
suffixes[".lib"] = ".a"
suffixes[".obj"] = ".o"
suffixes[".implib"] = ".no_implib_files_on_this_platform"
if hasattr(os, "uname") and os.uname()[0] == "Darwin":
suffixes[".dll"] = ".dylib"
def prepare_library_prefix(toolset):
"""
Setup whether Boost Build is expected to automatically prepend prefixes
to its built library targets.
"""
global lib_prefix
lib_prefix = "lib"
global dll_prefix
if cygwin:
dll_prefix = "cyg"
elif windows and toolset != "gcc":
dll_prefix = None
else:
dll_prefix = "lib"
def re_remove(sequence, regex):
me = re.compile(regex)
result = filter(lambda x: me.match(x), sequence)
if not result:
raise ValueError()
for r in result:
sequence.remove(r)
def glob_remove(sequence, pattern):
result = fnmatch.filter(sequence, pattern)
if not result:
raise ValueError()
for r in result:
sequence.remove(r)
class Tester(TestCmd.TestCmd):
"""Main tester class for Boost Build.
Optional arguments:
`arguments` - Arguments passed to the run executable.
`executable` - Name of the executable to invoke.
`match` - Function to use for compating actual and
expected file contents.
`boost_build_path` - Boost build path to be passed to the run
executable.
`translate_suffixes` - Whether to update suffixes on the the file
names passed from the test script so they
match those actually created by the current
toolset. For example, static library files
are specified by using the .lib suffix but
when the "gcc" toolset is used it actually
creates them using the .a suffix.
`pass_toolset` - Whether the test system should pass the
specified toolset to the run executable.
`use_test_config` - Whether the test system should tell the run
executable to read in the test_config.jam
configuration file.
`ignore_toolset_requirements` - Whether the test system should tell the run
executable to ignore toolset requirements.
`workdir` - Absolute directory where the test will be
run from.
`pass_d0` - If set, when tests are not explicitly run
in verbose mode, they are run as silent
(-d0 & --quiet Boost Jam options).
Optional arguments inherited from the base class:
`description` - Test description string displayed in case
of a failed test.
`subdir` - List of subdirectories to automatically
create under the working directory. Each
subdirectory needs to be specified
separately, parent coming before its child.
`verbose` - Flag that may be used to enable more
verbose test system output. Note that it
does not also enable more verbose build
system output like the --verbose command
line option does.
"""
def __init__(self, arguments=None, executable="bjam",
match=TestCmd.match_exact, boost_build_path=None,
translate_suffixes=True, pass_toolset=True, use_test_config=True,
ignore_toolset_requirements=True, workdir="", pass_d0=True,
**keywords):
assert arguments.__class__ is not str
self.original_workdir = os.getcwd()
if workdir and not os.path.isabs(workdir):
raise ("Parameter workdir <%s> must point to an absolute "
"directory: " % workdir)
self.last_build_timestamp = 0
self.translate_suffixes = translate_suffixes
self.use_test_config = use_test_config
self.toolset = get_toolset()
self.pass_toolset = pass_toolset
self.ignore_toolset_requirements = ignore_toolset_requirements
prepare_prefixes_and_suffixes(pass_toolset and self.toolset or "gcc")
use_default_bjam = "--default-bjam" in sys.argv
if not use_default_bjam:
jam_build_dir = ""
if os.name == "nt":
jam_build_dir = "bin.ntx86"
elif (os.name == "posix") and os.__dict__.has_key("uname"):
if os.uname()[0].lower().startswith("cygwin"):
jam_build_dir = "bin.cygwinx86"
if ("TMP" in os.environ and
os.environ["TMP"].find("~") != -1):
print("Setting $TMP to /tmp to get around problem "
"with short path names")
os.environ["TMP"] = "/tmp"
elif os.uname()[0] == "Linux":
cpu = os.uname()[4]
if re.match("i.86", cpu):
jam_build_dir = "bin.linuxx86"
else:
jam_build_dir = "bin.linux" + os.uname()[4]
elif os.uname()[0] == "SunOS":
jam_build_dir = "bin.solaris"
elif os.uname()[0] == "Darwin":
if os.uname()[4] == "i386":
jam_build_dir = "bin.macosxx86"
elif os.uname()[4] == "x86_64":
jam_build_dir = "bin.macosxx86_64"
else:
jam_build_dir = "bin.macosxppc"
elif os.uname()[0] == "AIX":
jam_build_dir = "bin.aix"
elif os.uname()[0] == "IRIX64":
jam_build_dir = "bin.irix"
elif os.uname()[0] == "FreeBSD":
jam_build_dir = "bin.freebsd"
elif os.uname()[0] == "OSF1":
jam_build_dir = "bin.osf"
else:
raise ("Do not know directory where Jam is built for this "
"system: %s/%s" % (os.name, os.uname()[0]))
else:
raise ("Do not know directory where Jam is built for this "
"system: %s" % os.name)
# Find where jam_src is located. Try for the debug version if it is
# lying around.
dirs = [os.path.join("..", "src", "engine", jam_build_dir + ".debug"),
os.path.join("..", "src", "engine", jam_build_dir)]
for d in dirs:
if os.path.exists(d):
jam_build_dir = d
break
else:
print("Cannot find built Boost.Jam")
sys.exit(1)
verbosity = ["-d0", "--quiet"]
if not pass_d0:
verbosity = []
if "--verbose" in sys.argv:
keywords["verbose"] = True
verbosity = ["-d+2"]
if boost_build_path is None:
boost_build_path = self.original_workdir + "/.."
program_list = []
if use_default_bjam:
program_list.append(executable)
else:
program_list.append(os.path.join(jam_build_dir, executable))
program_list.append('-sBOOST_BUILD_PATH="' + boost_build_path + '"')
if verbosity:
program_list += verbosity
if arguments:
program_list += arguments
TestCmd.TestCmd.__init__(self, program=program_list, match=match,
workdir=workdir, inpath=use_default_bjam, **keywords)
os.chdir(self.workdir)
def cleanup(self):
try:
TestCmd.TestCmd.cleanup(self)
os.chdir(self.original_workdir)
except AttributeError:
# When this is called during TestCmd.TestCmd.__del__ we can have
# both 'TestCmd' and 'os' unavailable in our scope. Do nothing in
# this case.
pass
#
# Methods that change the working directory's content.
#
def set_tree(self, tree_location):
# It is not possible to remove the current directory.
d = os.getcwd()
os.chdir(os.path.dirname(self.workdir))
shutil.rmtree(self.workdir, ignore_errors=False)
if not os.path.isabs(tree_location):
tree_location = os.path.join(self.original_workdir, tree_location)
shutil.copytree(tree_location, self.workdir)
os.chdir(d)
def make_writable(unused, dir, entries):
for e in entries:
name = os.path.join(dir, e)
os.chmod(name, os.stat(name).st_mode | 0222)
os.path.walk(".", make_writable, None)
def write(self, file, content, wait=True):
nfile = self.native_file_name(file)
self.__makedirs(os.path.dirname(nfile), wait)
f = open(nfile, "wb")
try:
f.write(content)
finally:
f.close()
self.__ensure_newer_than_last_build(nfile)
def copy(self, src, dst):
try:
self.write(dst, self.read(src, 1))
except:
self.fail_test(1)
def copy_preserving_timestamp(self, src, dst):
src_name = self.native_file_name(src)
dst_name = self.native_file_name(dst)
stats = os.stat(src_name)
self.write(dst, self.read(src, 1))
os.utime(dst_name, (stats.st_atime, stats.st_mtime))
def touch(self, names, wait=True):
if names.__class__ is str:
names = [names]
for name in names:
path = self.native_file_name(name)
if wait:
self.__ensure_newer_than_last_build(path)
else:
os.utime(path, None)
def rm(self, names):
if not type(names) == types.ListType:
names = [names]
if names == ["."]:
# If we are deleting the entire workspace, there is no need to wait
# for a clock tick.
self.last_build_timestamp = 0
# Avoid attempts to remove the current directory.
os.chdir(self.original_workdir)
for name in names:
n = glob.glob(self.native_file_name(name))
if n: n = n[0]
if not n:
n = self.glob_file(name.replace("$toolset", self.toolset + "*")
)
if n:
if os.path.isdir(n):
shutil.rmtree(n, ignore_errors=False)
else:
os.unlink(n)
# Create working dir root again in case we removed it.
if not os.path.exists(self.workdir):
os.mkdir(self.workdir)
os.chdir(self.workdir)
def expand_toolset(self, name):
"""
Expands $toolset placeholder in the given file to the name of the
toolset currently being tested.
"""
self.write(name, self.read(name).replace("$toolset", self.toolset))
def dump_stdio(self):
annotation("STDOUT", self.stdout())
annotation("STDERR", self.stderr())
def run_build_system(self, extra_args=None, subdir="", stdout=None,
stderr="", status=0, match=None, pass_toolset=None,
use_test_config=None, ignore_toolset_requirements=None,
expected_duration=None, **kw):
assert extra_args.__class__ is not str
if os.path.isabs(subdir):
print("You must pass a relative directory to subdir <%s>." % subdir
)
return
self.previous_tree, dummy = tree.build_tree(self.workdir)
if match is None:
match = self.match
if pass_toolset is None:
pass_toolset = self.pass_toolset
if use_test_config is None:
use_test_config = self.use_test_config
if ignore_toolset_requirements is None:
ignore_toolset_requirements = self.ignore_toolset_requirements
try:
kw["program"] = []
kw["program"] += self.program
if extra_args:
kw["program"] += extra_args
if pass_toolset:
kw["program"].append("toolset=" + self.toolset)
if use_test_config:
kw["program"].append('--test-config="%s"' % os.path.join(
self.original_workdir, "test-config.jam"))
if ignore_toolset_requirements:
kw["program"].append("--ignore-toolset-requirements")
if "--python" in sys.argv:
# -z disables Python optimization mode.
# this enables type checking (all assert
# and if __debug__ statements).
kw["program"].extend(["--python", "-z"])
if "--stacktrace" in sys.argv:
kw["program"].append("--stacktrace")
kw["chdir"] = subdir
self.last_program_invocation = kw["program"]
build_time_start = time.time()
apply(TestCmd.TestCmd.run, [self], kw)
build_time_finish = time.time()
except:
self.dump_stdio()
raise
old_last_build_timestamp = self.last_build_timestamp
self.tree, self.last_build_timestamp = tree.build_tree(self.workdir)
self.difference = tree.tree_difference(self.previous_tree, self.tree)
if self.difference.empty():
# If nothing has been changed by this build and sufficient time has
# passed since the last build that actually changed something,
# there is no need to wait for touched or newly created files to
# start getting newer timestamps than the currently existing ones.
self.last_build_timestamp = old_last_build_timestamp
self.difference.ignore_directories()
self.unexpected_difference = copy.deepcopy(self.difference)
if (status and self.status) is not None and self.status != status:
expect = ""
if status != 0:
expect = " (expected %d)" % status
annotation("failure", '"%s" returned %d%s' % (kw["program"],
self.status, expect))
annotation("reason", "unexpected status returned by bjam")
self.fail_test(1)
if stdout is not None and not match(self.stdout(), stdout):
annotation("failure", "Unexpected stdout")
annotation("Expected STDOUT", stdout)
annotation("Actual STDOUT", self.stdout())
stderr = self.stderr()
if stderr:
annotation("STDERR", stderr)
self.maybe_do_diff(self.stdout(), stdout)
self.fail_test(1, dump_stdio=False)
# Intel tends to produce some messages to stderr which make tests fail.
intel_workaround = re.compile("^xi(link|lib): executing.*\n", re.M)
actual_stderr = re.sub(intel_workaround, "", self.stderr())
if stderr is not None and not match(actual_stderr, stderr):
annotation("failure", "Unexpected stderr")
annotation("Expected STDERR", stderr)
annotation("Actual STDERR", self.stderr())
annotation("STDOUT", self.stdout())
self.maybe_do_diff(actual_stderr, stderr)
self.fail_test(1, dump_stdio=False)
if expected_duration is not None:
actual_duration = build_time_finish - build_time_start
if actual_duration > expected_duration:
print("Test run lasted %f seconds while it was expected to "
"finish in under %f seconds." % (actual_duration,
expected_duration))
self.fail_test(1, dump_stdio=False)
def glob_file(self, name):
result = None
if hasattr(self, "difference"):
for f in (self.difference.added_files +
self.difference.modified_files +
self.difference.touched_files):
if fnmatch.fnmatch(f, name):
result = self.native_file_name(f)
break
if not result:
result = glob.glob(self.native_file_name(name))
if result:
result = result[0]
return result
def read(self, name, binary=False):
try:
if self.toolset:
name = name.replace("$toolset", self.toolset + "*")
name = self.glob_file(name)
openMode = "r"
if binary:
openMode += "b"
else:
openMode += "U"
f = open(name, openMode)
result = f.read()
f.close()
return result
except:
annotation("failure", "Could not open '%s'" % name)
self.fail_test(1)
return ""
def read_and_strip(self, name):
if not self.glob_file(name):
return ""
f = open(self.glob_file(name), "rb")
lines = f.readlines()
f.close()
result = "\n".join(x.rstrip() for x in lines)
if lines and lines[-1][-1] != "\n":
return result + "\n"
return result
def fail_test(self, condition, dump_difference=True, dump_stdio=True,
dump_stack=True):
if not condition:
return
if dump_difference and hasattr(self, "difference"):
f = StringIO.StringIO()
self.difference.pprint(f)
annotation("changes caused by the last build command",
f.getvalue())
if dump_stdio:
self.dump_stdio()
if "--preserve" in sys.argv:
print
print "*** Copying the state of working dir into 'failed_test' ***"
print
path = os.path.join(self.original_workdir, "failed_test")
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=False)
elif os.path.exists(path):
raise "Path " + path + " already exists and is not a directory"
shutil.copytree(self.workdir, path)
print "The failed command was:"
print " ".join(self.last_program_invocation)
if dump_stack:
annotate_stack_trace()
sys.exit(1)
# A number of methods below check expectations with actual difference
# between directory trees before and after a build. All the 'expect*'
# methods require exact names to be passed. All the 'ignore*' methods allow
# wildcards.
# All names can be either a string or a list of strings.
def expect_addition(self, names):
for name in self.adjust_names(names):
try:
glob_remove(self.unexpected_difference.added_files, name)
except:
annotation("failure", "File %s not added as expected" % name)
self.fail_test(1)
def ignore_addition(self, wildcard):
self.__ignore_elements(self.unexpected_difference.added_files,
wildcard)
def expect_removal(self, names):
for name in self.adjust_names(names):
try:
glob_remove(self.unexpected_difference.removed_files, name)
except:
annotation("failure", "File %s not removed as expected" % name)
self.fail_test(1)
def ignore_removal(self, wildcard):
self.__ignore_elements(self.unexpected_difference.removed_files,
wildcard)
def expect_modification(self, names):
for name in self.adjust_names(names):
try:
glob_remove(self.unexpected_difference.modified_files, name)
except:
annotation("failure", "File %s not modified as expected" %
name)
self.fail_test(1)
def ignore_modification(self, wildcard):
self.__ignore_elements(self.unexpected_difference.modified_files,
wildcard)
def expect_touch(self, names):
d = self.unexpected_difference
for name in self.adjust_names(names):
# We need to check both touched and modified files. The reason is
# that:
# (1) Windows binaries such as obj, exe or dll files have slight
# differences even with identical inputs due to Windows PE
# format headers containing an internal timestamp.
# (2) Intel's compiler for Linux has the same behaviour.
filesets = [d.modified_files, d.touched_files]
while filesets:
try:
glob_remove(filesets[-1], name)
break
except ValueError:
filesets.pop()
if not filesets:
annotation("failure", "File %s not touched as expected" % name)
self.fail_test(1)
def ignore_touch(self, wildcard):
self.__ignore_elements(self.unexpected_difference.touched_files,
wildcard)
def ignore(self, wildcard):
self.ignore_addition(wildcard)
self.ignore_removal(wildcard)
self.ignore_modification(wildcard)
self.ignore_touch(wildcard)
def expect_nothing(self, names):
for name in self.adjust_names(names):
if name in self.difference.added_files:
annotation("failure",
"File %s added, but no action was expected" % name)
self.fail_test(1)
if name in self.difference.removed_files:
annotation("failure",
"File %s removed, but no action was expected" % name)
self.fail_test(1)
pass
if name in self.difference.modified_files:
annotation("failure",
"File %s modified, but no action was expected" % name)
self.fail_test(1)
if name in self.difference.touched_files:
annotation("failure",
"File %s touched, but no action was expected" % name)
self.fail_test(1)
def expect_nothing_more(self):
# Not totally sure about this change, but I do not see a good
# alternative.
if windows:
self.ignore("*.ilk") # MSVC incremental linking files.
self.ignore("*.pdb") # MSVC program database files.
self.ignore("*.rsp") # Response files.
self.ignore("*.tds") # Borland debug symbols.
self.ignore("*.manifest") # MSVC DLL manifests.
# Debug builds of bjam built with gcc produce this profiling data.
self.ignore("gmon.out")
self.ignore("*/gmon.out")
# Boost Build's 'configure' functionality (unfinished at the time)
# produces this file.
self.ignore("bin/config.log")
self.ignore("bin/project-cache.jam")
# Compiled Python files created when running Python based Boost Build.
self.ignore("*.pyc")
if not self.unexpected_difference.empty():
annotation("failure", "Unexpected changes found")
output = StringIO.StringIO()
self.unexpected_difference.pprint(output)
annotation("unexpected changes", output.getvalue())
self.fail_test(1)
def expect_output_lines(self, lines, expected=True):
self.__expect_lines(self.stdout(), lines, expected)
def expect_content_lines(self, filename, line, expected=True):
self.__expect_lines(self.__read_file(filename), line, expected)
def expect_content(self, name, content, exact=False):
actual = self.__read_file(name, exact)
content = content.replace("$toolset", self.toolset + "*")
matched = False
if exact:
matched = fnmatch.fnmatch(actual, content)
else:
def sorted_(x):
x.sort()
return x
actual_ = map(lambda x: sorted_(x.split()), actual.splitlines())
content_ = map(lambda x: sorted_(x.split()), content.splitlines())
if len(actual_) == len(content_):
matched = map(
lambda x, y: map(lambda n, p: fnmatch.fnmatch(n, p), x, y),
actual_, content_)
matched = reduce(
lambda x, y: x and reduce(
lambda a, b: a and b,
y),
matched)
if not matched:
print "Expected:\n"
print content
print "Got:\n"
print actual
self.fail_test(1)
def maybe_do_diff(self, actual, expected):
if os.environ.get("DO_DIFF"):
e = tempfile.mktemp("expected")
a = tempfile.mktemp("actual")
f = open(e, "w")
f.write(expected)
f.close()
f = open(a, "w")
f.write(actual)
f.close()
print("DIFFERENCE")
# Current diff should return 1 to indicate 'different input files'
# but some older diff versions may return 0 and depending on the
# exact Python/OS platform version, os.system() call may gobble up
# the external process's return code and return 0 itself.
if os.system('diff -u "%s" "%s"' % (e, a)) not in [0, 1]:
print('Unable to compute difference: diff -u "%s" "%s"' % (e, a
))
os.unlink(e)
os.unlink(a)
else:
print("Set environmental variable 'DO_DIFF' to examine the "
"difference.")
# Internal methods.
def adjust_lib_name(self, name):
global lib_prefix
global dll_prefix
result = name
pos = name.rfind(".")
if pos != -1:
suffix = name[pos:]
if suffix == ".lib":
(head, tail) = os.path.split(name)
if lib_prefix:
tail = lib_prefix + tail
result = os.path.join(head, tail)
elif suffix == ".dll":
(head, tail) = os.path.split(name)
if dll_prefix:
tail = dll_prefix + tail
result = os.path.join(head, tail)
# If we want to use this name in a Jamfile, we better convert \ to /,
# as otherwise we would have to quote \.
result = result.replace("\\", "/")
return result
def adjust_suffix(self, name):
if not self.translate_suffixes:
return name
pos = name.rfind(".")
if pos == -1:
return name
suffix = name[pos:]
return name[:pos] + suffixes.get(suffix, suffix)
# Acceps either a string or a list of strings and returns a list of
# strings. Adjusts suffixes on all names.
def adjust_names(self, names):
if names.__class__ is str:
names = [names]
r = map(self.adjust_lib_name, names)
r = map(self.adjust_suffix, r)
r = map(lambda x, t=self.toolset: x.replace("$toolset", t + "*"), r)
return r
def native_file_name(self, name):
name = self.adjust_names(name)[0]
return os.path.normpath(os.path.join(self.workdir, *name.split("/")))
def wait_for_time_change(self, path, touch):
"""
Wait for newly assigned file system modification timestamps for the
given path to become large enough for the timestamp difference to be
correctly recognized by both this Python based testing framework and
the Boost Jam executable being tested. May optionally touch the given
path to set its modification timestamp to the new value.
"""
self.__wait_for_time_change(path, touch, last_build_time=False)
def __build_timestamp_resolution(self):
"""
Returns the minimum path modification timestamp resolution supported
by the used Boost Jam executable.
"""
dir = tempfile.mkdtemp("bjam_version_info")
try:
jam_script = "timestamp_resolution.jam"
f = open(os.path.join(dir, jam_script), "w")
try:
f.write("EXIT $(JAM_TIMESTAMP_RESOLUTION) : 0 ;")
finally:
f.close()
p = subprocess.Popen([self.program[0], "-d0", "-f%s" % jam_script],
stdout=subprocess.PIPE, cwd=dir, universal_newlines=True)
out, err = p.communicate()
finally:
shutil.rmtree(dir, ignore_errors=False)
if p.returncode != 0:
raise TestEnvironmentError("Unexpected return code (%s) when "
"detecting Boost Jam's minimum supported path modification "
"timestamp resolution version information." % p.returncode)
if err:
raise TestEnvironmentError("Unexpected error output (%s) when "
"detecting Boost Jam's minimum supported path modification "
"timestamp resolution version information." % err)
r = re.match("([0-9]{2}):([0-9]{2}):([0-9]{2}\\.[0-9]{9})$", out)
if not r:
# Older Boost Jam versions did not report their minimum supported
# path modification timestamp resolution and did not actually
# support path modification timestamp resolutions finer than 1
# second.
# TODO: Phase this support out to avoid such fallback code from
# possibly covering up other problems.
return 1
if r.group(1) != "00" or r.group(2) != "00": # hours, minutes
raise TestEnvironmentError("Boost Jam with too coarse minimum "
"supported path modification timestamp resolution (%s:%s:%s)."
% (r.group(1), r.group(2), r.group(3)))
return float(r.group(3)) # seconds.nanoseconds
def __ensure_newer_than_last_build(self, path):
"""
Updates the given path's modification timestamp after waiting for the
newly assigned file system modification timestamp to become large
enough for the timestamp difference between it and the last build
timestamp to be correctly recognized by both this Python based testing
framework and the Boost Jam executable being tested. Does nothing if
there is no 'last build' information available.
"""
if self.last_build_timestamp:
self.__wait_for_time_change(path, touch=True, last_build_time=True)
def __expect_lines(self, data, lines, expected):
"""
Checks whether the given data contains the given lines.
Data may be specified as a single string containing text lines
separated by newline characters.
Lines may be specified in any of the following forms:
* Single string containing text lines separated by newlines - the
given lines are searched for in the given data without any extra
data lines between them.
* Container of strings containing text lines separated by newlines
- the given lines are searched for in the given data with extra
data lines allowed between lines belonging to different strings.
* Container of strings containing text lines separated by newlines
and containers containing strings - the same as above with the
internal containers containing strings being interpreted as if
all their content was joined together into a single string
separated by newlines.
A newline at the end of any multi-line lines string is interpreted as
an expected extra trailig empty line.
"""
# str.splitlines() trims at most one trailing newline while we want the
# trailing newline to indicate that there should be an extra empty line
# at the end.
splitlines = lambda x : (x + "\n").splitlines()
if data is None:
data = []
elif data.__class__ is str:
data = splitlines(data)
if lines.__class__ is str:
lines = [splitlines(lines)]
else:
expanded = []
for x in lines:
if x.__class__ is str:
x = splitlines(x)
expanded.append(x)
lines = expanded
if _contains_lines(data, lines) != bool(expected):
output = []
if expected:
output = ["Did not find expected lines:"]
else:
output = ["Found unexpected lines:"]
first = True
for line_sequence in lines:
if line_sequence:
if first:
first = False
else:
output.append("...")
output.extend(" > " + line for line in line_sequence)
output.append("in output:")
output.extend(" > " + line for line in data)
annotation("failure", "\n".join(output))
self.fail_test(1)
def __ignore_elements(self, list, wildcard):
"""Removes in-place 'list' elements matching the given 'wildcard'."""
list[:] = filter(lambda x, w=wildcard: not fnmatch.fnmatch(x, w), list)
def __makedirs(self, path, wait):
"""
Creates a folder with the given path, together with any missing
parent folders. If WAIT is set, makes sure any newly created folders
have modification timestamps newer than the ones left behind by the
last build run.
"""
try:
if wait:
stack = []
while path and path not in stack and not os.path.isdir(path):
stack.append(path)
path = os.path.dirname(path)
while stack:
path = stack.pop()
os.mkdir(path)
self.__ensure_newer_than_last_build(path)
else:
os.makedirs(path)
except Exception:
pass
def __python_timestamp_resolution(self, path, minimum_resolution):
"""
Returns the modification timestamp resolution for the given path
supported by the used Python interpreter/OS/filesystem combination.
Will not check for resolutions less than the given minimum value. Will
change the path's modification timestamp in the process.
Return values:
0 - nanosecond resolution supported
positive decimal - timestamp resolution in seconds
"""
# Note on Python's floating point timestamp support:
# Python interpreter versions prior to Python 2.3 did not support
# floating point timestamps. Versions 2.3 through 3.3 may or may not
# support it depending on the configuration (may be toggled by calling
# os.stat_float_times(True/False) at program startup, disabled by
# default prior to Python 2.5 and enabled by default since). Python 3.3
# deprecated this configuration and 3.4 removed support for it after
# which floating point timestamps are always supported.
ver = sys.version_info[0:2]
python_nanosecond_support = ver >= (3, 4) or (ver >= (2, 3) and
os.stat_float_times())
# Minimal expected floating point difference used to account for
# possible imprecise floating point number representations. We want
# this number to be small (at least smaller than 0.0001) but still
# large enough that we can be sure that increasing a floating point
# value by 2 * eta guarantees the value read back will be increased by
# at least eta.
eta = 0.00005
stats_orig = os.stat(path)
def test_time(diff):
"""Returns whether a timestamp difference is detectable."""
os.utime(path, (stats_orig.st_atime, stats_orig.st_mtime + diff))
return os.stat(path).st_mtime > stats_orig.st_mtime + eta
# Test for nanosecond timestamp resolution support.
if not minimum_resolution and python_nanosecond_support:
if test_time(2 * eta):
return 0
# Detect the filesystem timestamp resolution. Note that there is no
# need to make this code 'as fast as possible' as, this function gets
# called before having to sleep until the next detectable modification
# timestamp value and that, since we already know nanosecond resolution
# is not supported, will surely take longer than whatever we do here to
# detect this minimal detectable modification timestamp resolution.
step = 0.1
if not python_nanosecond_support:
# If Python does not support nanosecond timestamp resolution we
# know the minimum possible supported timestamp resolution is 1
# second.
minimum_resolution = max(1, minimum_resolution)
index = max(1, int(minimum_resolution / step))
while step * index < minimum_resolution:
# Floating point number representation errors may cause our
# initially calculated start index to be too small if calculated
# directly.
index += 1
while True:
# Do not simply add up the steps to avoid cumulative floating point
# number representation errors.
next = step * index
if next > 10:
raise TestEnvironmentError("File systems with too coarse "
"modification timestamp resolutions not supported.")
if test_time(next):
return next
index += 1
def __read_file(self, name, exact=False):
name = self.adjust_names(name)[0]
result = ""
try:
if exact:
result = self.read(name)
else:
result = self.read_and_strip(name).replace("\\", "/")
except (IOError, IndexError):
print "Note: could not open file", name
self.fail_test(1)
return result
def __wait_for_time_change(self, path, touch, last_build_time):
"""
Wait until a newly assigned file system modification timestamp for
the given path is large enough for the timestamp difference between it
and the last build timestamp or the path's original file system
modification timestamp (depending on the last_build_time flag) to be
correctly recognized by both this Python based testing framework and
the Boost Jam executable being tested. May optionally touch the given
path to set its modification timestamp to the new value.
"""
assert self.last_build_timestamp or not last_build_time
stats_orig = os.stat(path)
if last_build_time:
start_time = self.last_build_timestamp
else:
start_time = stats_orig.st_mtime
build_resolution = self.__build_timestamp_resolution()
assert build_resolution >= 0
# Check whether the current timestamp is already new enough.
if stats_orig.st_mtime > start_time and (not build_resolution or
stats_orig.st_mtime >= start_time + build_resolution):
return
resolution = self.__python_timestamp_resolution(path, build_resolution)
assert resolution >= build_resolution
# Implementation notes:
# * Theoretically time.sleep() API might get interrupted too soon
# (never actually encountered).
# * We encountered cases where we sleep just long enough for the
# filesystem's modifiction timestamp to change to the desired value,
# but after waking up, the read timestamp is still just a tiny bit
# too small (encountered on Windows). This is most likely caused by
# imprecise floating point timestamp & sleep interval representation
# used by Python. Note though that we never encountered a case where
# more than one additional tiny sleep() call was needed to remedy
# the situation.
# * We try to wait long enough for the timestamp to change, but do not
# want to waste processing time by waiting too long. The main
# problem is that when we have a coarse resolution, the actual times
# get rounded and we do not know the exact sleep time needed for the
# difference between two such times to pass. E.g. if we have a 1
# second resolution and the original and the current file timestamps
# are both 10 seconds then it could be that the current time is
# 10.99 seconds and that we can wait for just one hundredth of a
# second for the current file timestamp to reach its next value, and
# using a longer sleep interval than that would just be wasting
# time.
while True:
os.utime(path, None)
c = os.stat(path).st_mtime
if resolution:
if c > start_time and (not build_resolution or c >= start_time
+ build_resolution):
break
if c <= start_time - resolution:
# Move close to the desired timestamp in one sleep, but not
# close enough for timestamp rounding to potentially cause
# us to wait too long.
if start_time - c > 5:
if last_build_time:
error_message = ("Last build time recorded as "
"being a future event, causing a too long "
"wait period. Something must have played "
"around with the system clock.")
else:
error_message = ("Original path modification "
"timestamp set to far into the future or "
"something must have played around with the "
"system clock, causing a too long wait "
"period.\nPath: '%s'" % path)
raise TestEnvironmentError(message)
_sleep(start_time - c)
else:
# We are close to the desired timestamp so take baby sleeps
# to avoid sleeping too long.
_sleep(max(0.01, resolution / 10))
else:
if c > start_time:
break
_sleep(max(0.01, start_time - c))
if not touch:
os.utime(path, (stats_orig.st_atime, stats_orig.st_mtime))
class List:
def __init__(self, s=""):
elements = []
if s.__class__ is str:
# Have to handle escaped spaces correctly.
elements = s.replace("\ ", "\001").split()
else:
elements = s
self.l = [e.replace("\001", " ") for e in elements]
def __len__(self):
return len(self.l)
def __getitem__(self, key):
return self.l[key]
def __setitem__(self, key, value):
self.l[key] = value
def __delitem__(self, key):
del self.l[key]
def __str__(self):
return str(self.l)
def __repr__(self):
return "%s.List(%r)" % (self.__module__, " ".join(self.l))
def __mul__(self, other):
result = List()
if not isinstance(other, List):
other = List(other)
for f in self:
for s in other:
result.l.append(f + s)
return result
def __rmul__(self, other):
if not isinstance(other, List):
other = List(other)
return List.__mul__(other, self)
def __add__(self, other):
result = List()
result.l = self.l[:] + other.l[:]
return result
def _contains_lines(data, lines):
data_line_count = len(data)
expected_line_count = reduce(lambda x, y: x + len(y), lines, 0)
index = 0
for expected in lines:
if expected_line_count > data_line_count - index:
return False
expected_line_count -= len(expected)
index = _match_line_sequence(data, index, data_line_count -
expected_line_count, expected)
if index < 0:
return False
return True
def _match_line_sequence(data, start, end, lines):
if not lines:
return start
for index in xrange(start, end - len(lines) + 1):
data_index = index
for expected in lines:
if not fnmatch.fnmatch(data[data_index], expected):
break;
data_index += 1
else:
return data_index
return -1
def _sleep(delay):
if delay > 5:
raise TestEnvironmentError("Test environment error: sleep period of "
"more than 5 seconds requested. Most likely caused by a file with "
"its modification timestamp set to sometime in the future.")
time.sleep(delay)
###############################################################################
#
# Initialization.
#
###############################################################################
# Make os.stat() return file modification times as floats instead of integers
# to get the best possible file timestamp resolution available. The exact
# resolution depends on the underlying file system and the Python os.stat()
# implementation. The better the resolution we achieve, the shorter we need to
# wait for files we create to start getting new timestamps.
#
# Additional notes:
# * os.stat_float_times() function first introduced in Python 2.3. and
# suggested for deprecation in Python 3.3.
# * On Python versions 2.5+ we do not need to do this as there os.stat()
# returns floating point file modification times by default.
# * Windows CPython implementations prior to version 2.5 do not support file
# modification timestamp resolutions of less than 1 second no matter whether
# these timestamps are returned as integer or floating point values.
# * Python documentation states that this should be set in a program's
# __main__ module to avoid affecting other libraries that might not be ready
# to support floating point timestamps. Since we use no such external
# libraries, we ignore this warning to make it easier to enable this feature
# in both our single & multiple-test scripts.
if (2, 3) <= sys.version_info < (2, 5) and not os.stat_float_times():
os.stat_float_times(True)
# Quickie tests. Should use doctest instead.
if __name__ == "__main__":
assert str(List("foo bar") * "/baz") == "['foo/baz', 'bar/baz']"
assert repr("foo/" * List("bar baz")) == "__main__.List('foo/bar foo/baz')"
assert _contains_lines([], [])
assert _contains_lines([], [[]])
assert _contains_lines([], [[], []])
assert _contains_lines([], [[], [], []])
assert not _contains_lines([], [[""]])
assert not _contains_lines([], [["a"]])
assert _contains_lines([""], [])
assert _contains_lines(["a"], [])
assert _contains_lines(["a", "b"], [])
assert _contains_lines(["a", "b"], [[], [], []])
assert _contains_lines([""], [[""]])
assert not _contains_lines([""], [["a"]])
assert not _contains_lines(["a"], [[""]])
assert _contains_lines(["a", "", "b", ""], [["a"]])
assert _contains_lines(["a", "", "b", ""], [[""]])
assert _contains_lines(["a", "", "b"], [["b"]])
assert not _contains_lines(["a", "b"], [[""]])
assert not _contains_lines(["a", "", "b", ""], [["c"]])
assert _contains_lines(["a", "", "b", "x"], [["x"]])
data = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
assert _contains_lines(data, [["1", "2"]])
assert not _contains_lines(data, [["2", "1"]])
assert not _contains_lines(data, [["1", "3"]])
assert not _contains_lines(data, [["1", "3"]])
assert _contains_lines(data, [["1"], ["2"]])
assert _contains_lines(data, [["1"], [], [], [], ["2"]])
assert _contains_lines(data, [["1"], ["3"]])
assert not _contains_lines(data, [["3"], ["1"]])
assert _contains_lines(data, [["3"], ["7"], ["8"]])
assert not _contains_lines(data, [["1"], ["3", "5"]])
assert not _contains_lines(data, [["1"], [""], ["5"]])
assert not _contains_lines(data, [["1"], ["5"], ["3"]])
assert not _contains_lines(data, [["1"], ["5", "3"]])
assert not _contains_lines(data, [[" 3"]])
assert not _contains_lines(data, [["3 "]])
assert not _contains_lines(data, [["3", ""]])
assert not _contains_lines(data, [["", "3"]])
print("tests passed")
|
[
"1211618464@qq.com"
] |
1211618464@qq.com
|
97d915750244d1397fea6975d202218d1ad853f4
|
29f4de72b9aadaba277b4adb5e5cee5d8dd71f1e
|
/projection_data/make_ai.py
|
49ce8d7363987cb88c00c23533048229bdb00207
|
[] |
no_license
|
fgassert/aqueduct_atlas
|
87be4e1fbe9686cf06ff9c65257deabc617344e9
|
d00cd78ef3122aeda6eb563d0913baf73a9bb80e
|
refs/heads/master
| 2021-01-21T21:48:26.821562
| 2016-04-21T22:02:58
| 2016-04-21T22:02:58
| 15,684,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
import arcpy as ap
AI_RES = 600
def render_fields(prefix, polygons, mxd_template, layer_template, map_fields, map_layer_labels, map_values, ai_res=AI_RES):
mxd = ap.mapping.MapDocument(mxd_template)
df = ap.mapping.ListDataFrames(mxd)[0]
if map_layer_labels is None:
map_layer_labels = map_fields
if len(map_fields)!=len(map_layer_labels):
print "ERROR: Labels != fields"
if mxd.relativePaths == False:
print "ERROR: RelativePaths == False"
if type(map_fields) == str:
map_fields = [map_fields]
grpLyr = ap.mapping.ListLayers(mxd,"indicators")[0]
for i in range(len(map_fields)):
print "dissolving %s" % map_fields[i]
dissolved = "dis_%s_%s" % (map_layer_labels[i])
if not ap.Exists(dissolved):
ap.Dissolve_management(polygons,dissolved,map_fields[i])
else:
print "%s exists, skipping" % dissolved
lyr = ap.mapping.Layer(layer_template)
lyr.name = map_layer_labels[i]
lyr.replaceDataSource(WORKSPACE, "FILEGDB_WORKSPACE", dissolved, True)
lyr.symbology.valueField = map_fields[i]
lyr.symbology.classValues = map_values[map_fields[i]]
if grpLyr.isGroupLayer:
ap.mapping.AddLayerToGroup(df,grpLyr,lyr)
else:
ap.mapping.AddLayer(df, lyr)
outfile = "bin/%s%s_%s.ai"%(prefix, map_layer_labels[i])
print "exporting %s" % outfile
ap.mapping.ExportToAI(mxd, outfile,resolution=ai_res)
ap.mapping.RemoveLayer(df, ap.mapping.ListLayers(mxd,lyr.name)[0])
|
[
"cowbox314@gmail.com"
] |
cowbox314@gmail.com
|
2fa2387ea7542b00d01c04bff3aee0d13d23e027
|
84ef137d6c112f9a5f6fc2ef56acfc9ce3bb8948
|
/新建文件夹/活动推广.py
|
c0b32363418d65d12d02e964ef132ac3846c556f
|
[] |
no_license
|
liuxiyin/Good-boys
|
253f68aea4a632c10eb5080a25d83bf63c3b1b46
|
207b3e6c113d4f9ec97eef17493f4f3d1537ece7
|
refs/heads/master
| 2022-03-03T22:24:20.152040
| 2022-02-27T11:06:56
| 2022-02-27T11:06:56
| 82,903,301
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,175
|
py
|
"""
抖店-活动推广机器人
"""
import time
from playwright.sync_api import sync_playwright
from loguru import logger
import pandas as pd
import utility
import uuid
import os
import re
import json
import datetime
setting ={
"login_url":"https://fxg.jinritemai.com/ffa/mshop/homepage/index",
"start_time": "2021-05-30",
"end_time" : "2021-05-31"
}
class PromoteRobot:
"""
推广机器人
"""
def __init__(self):
self.order_id_list = []
self.content_list = []
self.data = []
self.columns = ["日期", "订单号", "营销话术", "发送结果"]
self.excel_file = os.getcwd() + "\\data\\" + "{}.xlsx".format(str(uuid.uuid4()))
def login(self, playwright):
self.context = playwright.firefox.launch_persistent_context(headless=False, user_data_dir='UserData_firefox',
# executable_path='../ms-playwright/firefox-1234/firefox/firefox.exe',
)
self.login_page = self.context.pages[0]
self.login_page.goto(setting['login_url'])
for t in range(300, 0, -1):
time.sleep(1)
if self.login_page.is_visible('//h1[contains(text(),"抖店")]'):
logger.info(' - 账号已登陆。')
return True
def recoding(self, id, msg="发送成功"):
"""
添加记录
:param id: 订单号
:param msg: 发送结果
:return:
"""
today = datetime.datetime.today().date()
date_items = [str(today), str(id), setting["content"], msg]
self.data.append(date_items)
def wait(self, page, seconds=1):
page.wait_for_load_state('networkidle')
time.sleep(seconds)
def remove_excess(self, page):
self.wait(page)
# 去掉通知
if page.is_visible('//span[@class="ant-modal-close-x"]'):
page.click('//span[@class="ant-modal-close-x"]')
self.wait(page)
# 去掉广告
if page.is_visible('//div[@class="ant-modal-body"]'):
page.click('//span[@aria-label="close-circle"]')
self.wait(page)
# 去掉引导
if page.is_visible('text="新功能引导"'):
page.click('//button[contains(.,"知道了")]')
self.wait(page)
page.click('//button[contains(.,"知道了")]')
def choose_date(self):
"""
选择日期
:return:
"""
self.wait(self.page)
start_time = setting["start_time"] + " 00:00:00"
end_time = setting["end_time"] + " 00:00:00"
self.page.query_selector('//input[@id="compact_time"]').click()
self.page.query_selector('//input[@id="compact_time"]').fill(start_time)
self.page.click('//input[@placeholder="结束日期"]')
self.page.fill('//input[@placeholder="结束日期"]', end_time)
self.page.press('//input[@placeholder="结束日期"]', 'Enter')
def get_order_id(self, data_list):
"""
获取订单号
:param date_list:
:return:
"""
for data in data_list:
shop_order_id = data["shop_order_id"]
self.order_id_list.append(shop_order_id)
def query_content_id(self):
"""
查询有评论的订单
:return:
"""
logger.info('正在查询订单是否有评价-------')
for index, id in enumerate(self.order_id_list):
self.page1.fill('//input[@placeholder="商品编号/订单编号/商品名称"]', str(id))
with self.page1.expect_response(f'**/product/tcomment/commentList?**id={id}**') as response_info:
self.page1.click('//span[contains(text(),"查询")]')
response = response_info.value.text()
resp_json = json.loads(response)
data_list = resp_json.get("data")
if len(data_list) == 0:
logger.info(f'{id}无评价内容')
logger.info(f"共{len(self.order_id_list)}条订单, 当前查询第{index + 1}条")
continue
data_info = data_list[0]
content = data_info["content"]
self.content_list.append(id)
def send_files(self):
"""
发送信息
:return:
"""
logger.info('----正在发送推送信息----')
for index, id in enumerate(self.content_list):
logger.info(f'共{len(self.content_list)}条订单需要发送,当前发送第{ index + 1}条')
self.page.fill('//input[@placeholder="搜用户/180天订单(Ctrl+F)"]', id)
self.page2.click('//div[contains(@class,"TdhgxWD_")]')
if self.page2.is_visible('//div[contains(text(),"会话超过7天不可回复")]'):
err_msg = "会话超过7天不可回复"
self.recoding(id, msg=err_msg)
continue
self.page2.fill('//textarea', setting["msg"])
self.page2.click('//div[contains(text(),"发送")]')
# 发送图片
self.page2.set_input_files('//textarea/preceding-sibling::div[1]/div/label/input', setting["image_path"])
div_list = self.page2.query_selector('//div[@id="root"]/following-sibling::div[4]/div/div[3]')
for div in div_list:
text = div.text_content()
if text == "发送":
div.click()
# 发送视频
if "video_path" in setting.keys() or setting["video_path"] is not None:
self.page2.set_input_files('//textarea/preceding-sibling::div[1]/div/label[2]', '')
div_list = self.page2.query_selector('//div[@id="root"]/following-sibling::div[4]/div/div[3]')
for div in div_list:
text = div.text_content()
if text == "发送":
div.click()
"""
关闭会话
"""
self.recoding(id)
def process(self):
"""
处理流程
:return:
"""
self.page.query_selector('//span[@title="10 条/页"]').click()
self.page.query_selector('//div[contains(text(),"50 条/页")]').click()
self.wait(self.page, seconds=2)
self.page.query_selector('//div[@data-kora="展开"]').click()
self.page.click('//span[@title="下单时间"]')
self.page.click('//div[contains(text(),"完成时间")]')
self.choose_date() # 输入日期
with self.page.expect_response('**/api/order/searchlist**&page=0**') as response_info:
self.page.click('text=\"查询\"')
self.wait(self.page)
all_num1 = self.page.query_selector('//li[@title="上一页"]/preceding-sibling::li[1]').text_content()
count_page1 = int(re.findall('\d+', all_num1)[0]) // 50 + 1
print("count_page1", count_page1)
response = response_info.value.text()
resp_json = json.loads(response)
data_list = resp_json.get("data")
self.get_order_id(data_list)
for i in range(1, count_page1):
with self.page.expect_response(f'**/api/order/searchlist**&page={i}**') as response_info:
self.page.click('//li[@title="下一页"]')
response = response_info.value.text()
resp_json = json.loads(response)
data_list = resp_json.get("data")
self.get_order_id(data_list)
time.sleep(1)
def write_excel(self):
"""
生成表格
:return:
"""
df = pd.DataFrame(data=self.data, columns=self.columns)
df.to_excel(self.excel_file, index=False)
def main(self, playwright):
self.login(playwright)
self.page = self.context.new_page()
self.page.goto('https://fxg.jinritemai.com/ffa/morder/order/list') # 进入订单管理页面
self.login_page.close()
self.remove_excess(self.page) # 去掉广告
self.process()
self.page1 = self.context.new_page()
self.page1.goto('https://fxg.jinritemai.com/ffa/g/comment') # 进入评价管理页面
self.page.close()
self.remove_excess(self.page1)
self.query_content_id()
self.page1.close()
df = pd.DataFrame(data=self.content_list, columns=["订单号"])
df.to_excel(self.excel_file, index=False)
self.page2 = self.context.new_page()
self.page2.goto('https://im.jinritemai.com/pc_seller/main/chat') # 进入飞鸽系统
self.remove_excess(self.page2)
self.send_files()
if __name__ == '__main__':
pr = PromoteRobot()
with sync_playwright() as playwright:
pr.main(playwright)
|
[
"noreply@github.com"
] |
liuxiyin.noreply@github.com
|
01aeefdd8ab7ea79a46a7dffa84f833c991a6fc0
|
0cbf5f3149e391b7bceff2cd8fe6dc61a75d6ac4
|
/Robolib.py
|
3ae4a1546844c064891e0e7a062470cb7a5cd240
|
[] |
no_license
|
madhavkhoslaa/Differential-Drive
|
afb8f921b35fda4900d7bd153fbe28952d68e7a8
|
b3b24dbf0cccc5d4cb94ce0b081b13e6a4f84409
|
refs/heads/master
| 2021-05-11T04:53:41.890469
| 2018-01-20T17:56:28
| 2018-01-20T17:56:28
| 117,950,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,636
|
py
|
class Robo:
try:
import time
import RPi.GPIO as GPIO
mode = GPIO.getmode()
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD) # or set as GPIO.setmode(GPIO.BCM) according to prefrence
except:
print('Install the dependencies')
def __init__(self , Left_1 , Left_2 , Right_1 , Right_2 , Enl , Enr): #For the current built PWM is not there
self.Left_1 = Left_1
self.Left_2 - Left_2
self.Right_1 = Right_1
self.Right_2 = Right_2
self.Enl = Enl
self.Enr = Enr
start_time = time.time()
GPIO.setup(self.Left_1 , GPIO.OUT)
GPIO.setup(self.Left_2 , GPIO.OUT)
GPIO.setup(self.Right_1 , GPIO.OUT)
GPIO.setup(self.Right_2, GPIO.OUT)
GPIO.setup(self.Enl , GPIO.OUT)
GPIO.setup(self.Enr, GPIO.OUT)
def left_toggle(self , togg , time):
curr = time.time()
step = curr + time
if togg == 1:
while curr < step:
curr = time.time()
GPIO.output(self.Left_1 , True)
GPIO.output(self.Left_2 , False)
if togg == 0 :
while curr < step:
curr = time.time()
GPIO.output(self.Left_1 , False)
GPIO.output(self.Left_2 , True)
def right_toggle(self , togg , time):
curr = time.time()
step = curr + time
if togg == 1:
while curr < step:
curr = time.time()
GPIO.output(self.Right_1, True)
GPIO.output(self.Right_2 ,False)
if togg == 0:
while curr < step:
curr = time.time()
GPIO.output(self.Right_2, False)
GPIO.output(self.Right_1 ,True)
def forward(self , time):
curr = time.time()
step = curr + time
while curr < step:
curr = time.time()
GPIO.output(self.Left_1 , True)
GPIO.output(self.Left_2 ,False)
GPIO.output(self.Right_1 , True)
GPIO.output(self.Right_2 ,False)
def backward(self , time):
curr = time.time()
step = curr
while curr < step:
curr = time.time()
GPIO.output(self.Left_1,False)
GPIO.output(self.Left_2,True)
GPIO.output(self.right_toggle , True)
GPIO.output(self.Right_1 ,False)
pass
def servo(self):
pass
|
[
"noreply@github.com"
] |
madhavkhoslaa.noreply@github.com
|
063fa18d33d789a16c7f61a0cc60e227ef6aa085
|
6ac8c704903f49d5e3fb3591047c1c3eba59f423
|
/env/lib/python2.7/site-packages/wheel/tool/__init__.py
|
de1c70ca2cd108d9ff1be387d18884a917c29d38
|
[] |
no_license
|
wuilfred/test
|
91dd2f47259f4599783c3f9a6abd334d40e8e7a7
|
e0b8678bbd46a662379db8a6a5a1680b3ff33d38
|
refs/heads/master
| 2020-04-02T05:52:26.476969
| 2016-06-02T22:57:30
| 2016-06-02T22:57:30
| 60,301,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,312
|
py
|
"""
Wheel command-line utility.
"""
import os
import hashlib
import sys
import json
import wheel.paths
from glob import iglob
from .. import signatures
from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary,
matches_requirement)
from ..install import WheelFile
def require_pkgresources(name):
try:
import pkg_resources
except ImportError:
raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
import argparse
class WheelError(Exception): pass
# For testability
def get_keyring():
try:
from ..signatures import keys
import keyring
except ImportError:
raise WheelError("Install wheel[signatures] (requires keyring, dirspec) for signatures.")
return keys.WheelKeys, keyring
def keygen(get_keyring=get_keyring):
"""Generate a public/private key pair."""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wk = WheelKeys().load()
keypair = ed25519ll.crypto_sign_keypair()
vk = native(urlsafe_b64encode(keypair.vk))
sk = native(urlsafe_b64encode(keypair.sk))
kr = keyring.get_keyring()
kr.set_password("wheel", vk, sk)
sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk))
if isinstance(kr, keyring.backends.file.BaseKeyring):
sys.stdout.write("in {0}\n".format(kr.file_path))
else:
sys.stdout.write("in %r\n" % kr.__class__)
sk2 = kr.get_password('wheel', vk)
if sk2 != sk:
raise WheelError("Keyring is broken. Could not retrieve secret key.")
sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk))
wk.add_signer('+', vk)
wk.trust('+', vk)
wk.save()
def sign(wheelfile, replace=False, get_keyring=get_keyring):
"""Sign a wheel"""
WheelKeys, keyring = get_keyring()
ed25519ll = signatures.get_ed25519ll()
wf = WheelFile(wheelfile, append=True)
wk = WheelKeys().load()
name = wf.parsed_filename.group('name')
sign_with = wk.signers(name)[0]
sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1]))
vk = sign_with[1]
kr = keyring.get_keyring()
sk = kr.get_password('wheel', vk)
keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)),
urlsafe_b64decode(binary(sk)))
record_name = wf.distinfo_name + '/RECORD'
sig_name = wf.distinfo_name + '/RECORD.jws'
if sig_name in wf.zipfile.namelist():
raise WheelError("Wheel is already signed.")
record_data = wf.zipfile.read(record_name)
payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))}
sig = signatures.sign(payload, keypair)
wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True))
wf.zipfile.close()
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
info = vzf.infolist()
if not (len(info) and info[-1].filename.endswith('/RECORD.jws')):
raise WheelError("RECORD.jws not found at end of archive.")
vzf.pop()
vzf.close()
def verify(wheelfile):
"""Verify a wheel.
The signature will be verified for internal consistency ONLY and printed.
Wheel's own unpack/install commands verify the manifest against the
signature and file contents.
"""
wf = WheelFile(wheelfile)
sig_name = wf.distinfo_name + '/RECORD.jws'
sig = json.loads(native(wf.zipfile.open(sig_name).read()))
verified = signatures.verify(sig)
sys.stderr.write("Signatures are internally consistent.\n")
sys.stdout.write(json.dumps(verified, indent=2))
sys.stdout.write('\n')
def unpack(wheelfile, dest='.'):
"""Unpack a wheel.
Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
is the package name and {ver} its version.
:param wheelfile: The path to the wheel.
:param dest: Destination directory (default to current directory).
"""
wf = WheelFile(wheelfile)
namever = wf.parsed_filename.group('namever')
destination = os.path.join(dest, namever)
sys.stderr.write("Unpacking to: %s\n" % (destination))
wf.zipfile.extractall(destination)
wf.zipfile.close()
def install(requirements, requirements_file=None,
wheel_dirs=None, force=False, list_files=False,
dry_run=False):
"""Install wheels.
:param requirements: A list of requirements or wheel files to install.
:param requirements_file: A file containing requirements to install.
:param wheel_dirs: A list of directories to search for wheels.
:param force: Install a wheel file even if it is not compatible.
:param list_files: Only list the files to install, don't install them.
:param dry_run: Do everything but the actual install.
"""
# If no wheel directories specified, use the WHEELPATH environment
# variable, or the current directory if that is not set.
if not wheel_dirs:
wheelpath = os.getenv("WHEELPATH")
if wheelpath:
wheel_dirs = wheelpath.split(os.pathsep)
else:
wheel_dirs = [ os.path.curdir ]
# Get a list of all valid wheels in wheel_dirs
all_wheels = []
for d in wheel_dirs:
for w in os.listdir(d):
if w.endswith('.whl'):
wf = WheelFile(os.path.join(d, w))
if wf.compatible:
all_wheels.append(wf)
# If there is a requirements file, add it to the list of requirements
if requirements_file:
# If the file doesn't exist, search for it in wheel_dirs
# This allows standard requirements files to be stored with the
# wheels.
if not os.path.exists(requirements_file):
for d in wheel_dirs:
name = os.path.join(d, requirements_file)
if os.path.exists(name):
requirements_file = name
break
with open(requirements_file) as fd:
requirements.extend(fd)
to_install = []
for req in requirements:
if req.endswith('.whl'):
# Explicitly specified wheel filename
if os.path.exists(req):
wf = WheelFile(req)
if wf.compatible or force:
to_install.append(wf)
else:
msg = ("{0} is not compatible with this Python. "
"--force to install anyway.".format(req))
raise WheelError(msg)
else:
# We could search on wheel_dirs, but it's probably OK to
# assume the user has made an error.
raise WheelError("No such wheel file: {}".format(req))
continue
# We have a requirement spec
# If we don't have pkg_resources, this will raise an exception
matches = matches_requirement(req, all_wheels)
if not matches:
raise WheelError("No match for requirement {}".format(req))
to_install.append(max(matches))
# We now have a list of wheels to install
if list_files:
sys.stdout.write("Installing:\n")
if dry_run:
return
for wf in to_install:
if list_files:
sys.stdout.write(" {0}\n".format(wf.filename))
continue
wf.install(force=force)
wf.zipfile.close()
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist)
def convert(installers, dest_dir, verbose):
require_pkgresources('wheel convert')
# Only support wheel convert if pkg_resources is present
from ..wininst2wheel import bdist_wininst2wheel
from ..egg2wheel import egg2wheel
for pat in installers:
for installer in iglob(pat):
if os.path.splitext(installer)[1] == '.egg':
conv = egg2wheel
else:
conv = bdist_wininst2wheel
if verbose:
sys.stdout.write("{0}... ".format(installer))
sys.stdout.flush()
conv(installer, dest_dir)
if verbose:
sys.stdout.write("OK\n")
def parser():
p = argparse.ArgumentParser()
s = p.add_subparsers(help="commands")
def keygen_f(args):
keygen()
keygen_parser = s.add_parser('keygen', help='Generate signing key')
keygen_parser.set_defaults(func=keygen_f)
def sign_f(args):
sign(args.wheelfile)
sign_parser = s.add_parser('sign', help='Sign wheel')
sign_parser.add_argument('wheelfile', help='Wheel file')
sign_parser.set_defaults(func=sign_f)
def unsign_f(args):
unsign(args.wheelfile)
unsign_parser = s.add_parser('unsign', help=unsign.__doc__)
unsign_parser.add_argument('wheelfile', help='Wheel file')
unsign_parser.set_defaults(func=unsign_f)
def verify_f(args):
verify(args.wheelfile)
verify_parser = s.add_parser('verify', help=verify.__doc__)
verify_parser.add_argument('wheelfile', help='Wheel file')
verify_parser.set_defaults(func=verify_f)
def unpack_f(args):
unpack(args.wheelfile, args.dest)
unpack_parser = s.add_parser('unpack', help='Unpack wheel')
unpack_parser.add_argument('--dest', '-d', help='Destination directory',
default='.')
unpack_parser.add_argument('wheelfile', help='Wheel file')
unpack_parser.set_defaults(func=unpack_f)
def install_f(args):
install(args.requirements, args.requirements_file,
args.wheel_dirs, args.force, args.list_files)
install_parser = s.add_parser('install', help='Install wheels')
install_parser.add_argument('requirements', nargs='*',
help='Requirements to install.')
install_parser.add_argument('--force', default=False,
action='store_true',
help='Install incompatible wheel files.')
install_parser.add_argument('--wheel-dir', '-d', action='append',
dest='wheel_dirs',
help='Directories containing wheels.')
install_parser.add_argument('--requirements-file', '-r',
help="A file containing requirements to "
"install.")
install_parser.add_argument('--list', '-l', default=False,
dest='list_files',
action='store_true',
help="List wheels which would be installed, "
"but don't actually install anything.")
install_parser.set_defaults(func=install_f)
def install_scripts_f(args):
install_scripts(args.distributions)
install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts')
install_scripts_parser.add_argument('distributions', nargs='*',
help='Regenerate console_scripts for these distributions')
install_scripts_parser.set_defaults(func=install_scripts_f)
def convert_f(args):
convert(args.installers, args.dest_dir, args.verbose)
convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
convert_parser.add_argument('installers', nargs='*', help='Installers to convert')
convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
convert_parser.add_argument('--verbose', '-v', action='store_true')
convert_parser.set_defaults(func=convert_f)
def version_f(args):
from .. import __version__
sys.stdout.write("wheel %s\n" % __version__)
version_parser = s.add_parser('version', help='Print version and exit')
version_parser.set_defaults(func=version_f)
def help_f(args):
p.print_help()
help_parser = s.add_parser('help', help='Show this help')
help_parser.set_defaults(func=help_f)
return p
def main():
p = parser()
args = p.parse_args()
if not hasattr(args, 'func'):
p.print_help()
else:
# XXX on Python 3.3 we get 'args has no func' rather than short help.
try:
args.func(args)
return 0
except WheelError as e:
sys.stderr.write(e.message + "\n")
return 1
|
[
"wuilfred@gmail.com"
] |
wuilfred@gmail.com
|
8176f29c210a52c8544016e57564ace030a86875
|
155a25eb18213664da9978030e6743b04d570141
|
/manage.py
|
825bd789c5cf66c550320c139be766171af95606
|
[] |
no_license
|
powellc/timberwyck
|
0d6fd6e46c2899f32dda37faa8030a8c7080bc97
|
583cbc2ee33cb56187db13c94d5d4af74f51c9bd
|
refs/heads/master
| 2020-05-18T13:59:20.394609
| 2014-05-03T05:09:18
| 2014-05-03T05:09:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timberwyck.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"colin.powell@gmail.com"
] |
colin.powell@gmail.com
|
4442d3be186c0780c78d903f7110f0e29096dfb6
|
8cbf335c5a39f2bbf1912b937ea4c3a31ab76f53
|
/kakuro.py
|
3869cab2399a04e10f9778aee78dd0fa41a9b26b
|
[] |
no_license
|
louisabraham/kakuro.py
|
e72e5a0dd4d1fc8b43bb8b1004ce7b46e5bf88bf
|
28ab8e5b066773a0f27f9eff6629391d21b167fc
|
refs/heads/master
| 2023-08-13T12:28:18.538669
| 2021-10-14T21:28:19
| 2021-10-14T21:28:19
| 417,281,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,969
|
py
|
from functools import lru_cache, partial
from collections import defaultdict
from set_cover import solve as solve_set_cover
def encode(pattern, cols, lines):
grid = [[c == "0" for c in line] for line in pattern.split()]
n = len(grid)
constraints = []
# build constraints on lines
vars = []
cur = 0
for i in range(n):
for j in range(n):
if grid[i][j]:
vars.append((i, j))
if (j == n - 1 or not grid[i][j]) and vars:
constraints.append((lines[cur], vars))
cur += 1
vars = []
# build constraints on columns
vars = []
cur = 0
for j in range(n):
for i in range(n):
if grid[i][j]:
vars.append((i, j))
if (i == n - 1 or not grid[i][j]) and vars:
constraints.append((cols[cur], vars))
cur += 1
vars = []
# map variables to constraints
var_to_cons = defaultdict(list)
for c, (_, vars) in enumerate(constraints):
for var in vars:
var_to_cons[var].append(c)
Y = {}
for i in range(n):
for j in range(n):
if not grid[i][j]:
continue
for x in range(1, 10):
# each cell has exactly one value
Y[i, j, x] = [("pos", i, j)]
for c in var_to_cons[i, j]:
# each value can be used at most once
Y[i, j, x].append(("con", c, x))
# add the "complement" values
for c, (tot, vars) in enumerate(constraints):
for t in decomp(45 - tot, 9 - len(vars)):
Y[c, t] = [("con", c)]
for x in t:
Y[c, t].append(("con", c, x))
# build X from Y
X = defaultdict(set)
for y, l in Y.items():
for x in l:
X[x].add(y)
return n, X, Y
@lru_cache(None)
def decomp(n, k, mini=1):
if n < mini:
return []
if k == 1:
return [(n,)] if n < 10 else []
ans = []
for x in range(mini, 10):
for t in decomp(n - x, k - 1, mini=x + 1):
ans.append((x,) + t)
return ans
def pp_sol(n, sol):
grid = [[0 for _ in range(n)] for _ in range(n)]
for x in sol:
if len(x) == 3:
i, j, x = x
grid[i][j] = x
return "\n".join("".join(str(x) for x in line) for line in grid)
def solve(pattern, cols, lines):
n, X, Y = encode(pattern, cols, lines)
yield from map(partial(pp_sol, n), solve_set_cover(X, Y))
if __name__ == "__main__":
pattern = """
0000X000
00000000
00000000
X0000000
0000000X
00000000
00000000
000X0000
"""
cols = [10, 13, 38, 39, 31, 28, 36, 39, 12, 10]
lines = [14, 8, 38, 36, 35, 35, 37, 36, 6, 11]
print(next(solve(pattern, cols, lines)))
|
[
"louis.abraham@yahoo.fr"
] |
louis.abraham@yahoo.fr
|
c36ac2bc001fcaa57d1844413a53409c0fcacc1e
|
e21499aa817af1eef534c1423fe50b2802e0b40e
|
/vidly/urls.py
|
ce6005336d34c15d9de75a3e9c511a97d26ef8d2
|
[] |
no_license
|
Adnan0o7/vidly
|
cb03cd29b06dd713b1897b81ea2d8527a5659077
|
cb46cf716771d42b58f49259d22613c2becd4a86
|
refs/heads/master
| 2023-05-27T04:13:57.396743
| 2021-06-13T15:38:55
| 2021-06-13T15:38:55
| 323,496,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
"""vidly URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from api.models import MovieResource
from . import views
movie_resource = MovieResource()
urlpatterns = [
path('', views.home, name='home'),
path('admin/', admin.site.urls),
path('movies/', include('movies.urls')),
path('api/', include(movie_resource.urls)),
]
|
[
"adnanalam326@gmail.com"
] |
adnanalam326@gmail.com
|
13990e2b0cab9680fab03083d32eed4dee03defb
|
3ed768683750339de40e99ef38fa5bd3dd892870
|
/RayTracer/Constants.py
|
137c953a48bbe6f2462f905d743ef7e9d7cf3551
|
[] |
no_license
|
bgazdz/RayTracer
|
e602e88311e852cc0df7c5b413b70c097fd8c583
|
a6a57fac82298284c4b0e2d04c8195b30e793f87
|
refs/heads/master
| 2021-01-20T22:31:35.000835
| 2016-06-27T17:26:26
| 2016-06-27T17:26:26
| 62,074,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
#kEpsilon Value for object intersection error
kEpsilon = 0.0000001
#Max times a ray can be reflected
RAYDEPTH = 5
#Unless MAX_DEPTH is reached
MAX_OBJ = 10
#Maximum depth of the OcTree, overrides the MAX_OBJ at the max depth
MAX_DEPTH = 10
#Big positive value for time of intersections
BIGTIME = 1e6
|
[
"gazdzia2@illinois.edu"
] |
gazdzia2@illinois.edu
|
173312dba36b9bc8f0e3bfeb09c8c2caa6c7d7b2
|
3449e7f1d380caff4a2defca51fc81f280fc3df0
|
/usage.py
|
7d082ee15f65a7522331f6023f661b5dd9964b9c
|
[] |
no_license
|
jlfsjunior/dash_dthree_hooks
|
649a1d85717507ee38afb9ba79cee6fa228efa08
|
89cfa556c02a3c5b08b53e6e33c7ccc053c82473
|
refs/heads/master
| 2023-05-13T04:29:45.819866
| 2021-02-26T18:38:34
| 2021-02-26T18:38:34
| 340,701,342
| 14
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
import dash_dthree_hooks
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_bootstrap_components as dbc
import random
import json
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = html.Div(
[
dash_dthree_hooks.Bubble(
id='bubble-chart',
),
html.Div(
[
dbc.Button(id="update-data", children="Update Data", color="success", className="mr-1"),
html.P(id='clicked-output')
]
)
],
style={
"padding": "25px 50px"
}
)
@app.callback(
Output('bubble-chart', 'data'),
[Input("update-data", 'n_clicks')]
)
def change_data(n_clicks):
colors = ['red', 'green', 'blue', 'orange', 'yellow', 'purple', 'gray']
n_points = random.randint(1, 10)
data = [
{
'id': id,
'x': random.randint(0, 100),
'y': random.randint(0, 100),
'r': random.randint(0, 100),
'color': random.choice(colors)
} for id in range(n_points) ]
return data
@app.callback(
Output('clicked-output', 'children'),
[Input("bubble-chart", 'clicked')]
)
def click_point(datum):
if datum is None:
return "Click on something!"
else:
datum_str = json.dumps(datum)
return datum_str
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"jlfsjunior@gmail.com"
] |
jlfsjunior@gmail.com
|
491161ac62fe5c86902e78fd6934719505e4db0a
|
5f1518696c4ed270138581c9ab48f3f7ab128b5f
|
/utils/losses_pytorch/lovasz_loss.py
|
4cde88354c0d320753a914c511694f3acbc167cd
|
[] |
no_license
|
rabbitlamp/MyImageSegmentation
|
19c7197953d1b8edaa930c4dbf2e312595bdad84
|
1647730df4f194afe682596908ffe5363f27b733
|
refs/heads/master
| 2023-01-31T19:41:14.719754
| 2020-12-17T09:10:20
| 2020-12-17T09:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
import torch
import torch.nn as nn
# from torch.autograd import Function
# copy from: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/LovaszSoftmax/lovasz_loss.py
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
class LovaszSoftmax(nn.Module):
def __init__(self, reduction='mean'):
super(LovaszSoftmax, self).__init__()
self.reduction = reduction
def prob_flatten(self, input, target):
assert input.dim() in [4, 5]
num_class = input.size(1)
if input.dim() == 4:
input = input.permute(0, 2, 3, 1).contiguous()
input_flatten = input.view(-1, num_class)
elif input.dim() == 5:
input = input.permute(0, 2, 3, 4, 1).contiguous()
input_flatten = input.view(-1, num_class)
target_flatten = target.view(-1)
return input_flatten, target_flatten
def lovasz_softmax_flat(self, inputs, targets):
num_classes = inputs.size(1)
losses = []
for c in range(num_classes):
target_c = (targets == c).float()
if num_classes == 1:
input_c = inputs[:, 0]
else:
input_c = inputs[:, c]
loss_c = (torch.autograd.Variable(target_c) - input_c).abs()
loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True)
target_c_sorted = target_c[loss_index]
losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(lovasz_grad(target_c_sorted))))
losses = torch.stack(losses)
if self.reduction == 'none':
loss = losses
elif self.reduction == 'sum':
loss = losses.sum()
else:
loss = losses.mean()
return loss
def forward(self, inputs, targets):
# print(inputs.shape, targets.shape) # (batch size, class_num, x,y,z), (batch size, 1, x,y,z)
inputs, targets = self.prob_flatten(inputs, targets)
# print(inputs.shape, targets.shape)
losses = self.lovasz_softmax_flat(inputs, targets)
return losses
|
[
"475596527@qq.com"
] |
475596527@qq.com
|
74baf6e30202f26b7f2cf0d7e12c508046f426dc
|
a45fe3f29a7011e8b20c0860486c84e903bed4cf
|
/api-service/api/upload_category_refs.py
|
8e8cd894899625af0b6e2ef90d841d6ed818a210
|
[] |
no_license
|
Jmg-21/aws-ms-react-flask-postgress-docker-haproxy
|
90bea8012e8a46a30afd34310b418578ee617271
|
b59edb376b3647c9c19d0ff0f769eca91ae0dced
|
refs/heads/master
| 2023-06-22T15:58:10.421544
| 2021-07-22T03:53:22
| 2021-07-22T03:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
import os
from utils import server_generated_id
import psycopg2
import xlrd
from utils import UPLOAD_FOLDER
from flask_restful import Resource,request
from database import Database
class ApiUploadCategoryRefs(Resource):
def post(self):
conn = Database()
template = request.files['file']
return UploadCategoryRefs(conn,template)
def UploadCategoryRefs(conn,template):
data = []
result = {'status': 'success','message':'sucess'}
if template.filename != '':
filename = server_generated_id('catrefs',2)+'.'+ template.filename.split(".")[-1]
file_path = os.path.join(UPLOAD_FOLDER+'/templates', filename)
template.save(file_path)
book = xlrd.open_workbook(file_path)
sheet = book.sheet_by_index(0)
for r in range(1, sheet.nrows):
refid = str(sheet.cell(r, 0).value).replace('.0', '')
catid = str(sheet.cell(r, 1).value).replace('.0', '')
cat_name = str(sheet.cell(r, 2).value).replace('.0', '')
segment = str(sheet.cell(r, 3).value).replace('.0', '')
brand = str(sheet.cell(r, 5).value).replace('.0', '')
percent_share = str(sheet.cell(r, 6).value).replace('.0', '')
facing_count = str(sheet.cell(r, 7).value).replace('.0', '')
pulloutday = str(sheet.cell(r, 8).value).replace('.0', '')
facing_segment = str(sheet.cell(r, 9).value).replace('.0', '')
facing_brand = str(sheet.cell(r, 10).value).replace('.0', '')
data.append((refid,catid,cat_name,segment,brand,percent_share,facing_count,pulloutday,facing_segment,facing_brand))
# print('template',data)
query = None
if len(data) != 0:
args_str = ','.join(['%s'] * len(data))
try:
query = conn.mogrify("""
insert into category_refs (refsid,catid,cat_name,segment,brand,percent_share,facing_count,pulloutday,facing_segment,facing_brand) values {}
ON CONFLICT (catid,refsid) DO UPDATE
SET (segment,brand,percent_share,facing_count,pulloutday,facing_segment,facing_brand,date_updated) =
(
EXCLUDED.segment,
EXCLUDED.brand,
EXCLUDED.percent_share,
EXCLUDED.facing_count,
EXCLUDED.pulloutday,
EXCLUDED.facing_segment,
EXCLUDED.facing_brand,
now()
);
""".format(args_str) , data , commit=True)
except psycopg2.OperationalError as err:
# print('err',err)
result = {
'status': 'error',
'message':'Please check your network '+str(err)
}
except psycopg2.errors.SyntaxError as err:
print('err',err)
result = {
'status': 'error',
'message':'Transcation Query '+str(err)
}
except psycopg2.errors.DuplicateColumn as err:
print('err',err)
result = {
'status': 'error',
'message':'Duplicated '+str(err)
}
print('result',query)
return result
|
[
"jakegarbo21@gmail.com"
] |
jakegarbo21@gmail.com
|
f3c359439c26eb8a7a4587fca00e7a7dba67e738
|
92cc9ab5547f47a73cc8c855a559ddedfdedb80a
|
/Exercicios_Resolvidos/Parte 2/Semana 5/exercicio adicional/insertion_sort.py
|
d6355d21b887091e44d3c150be2520f271c844d9
|
[
"MIT"
] |
permissive
|
EmersonAires/Introducao_a_ciencia_da_computacao_com_Python
|
f592c9cddb8a7a9ad7bb5ae51646ff2c88fa9194
|
152f95410a638d84adf97367be17ec67fdb8bf3c
|
refs/heads/main
| 2023-08-05T11:50:58.970836
| 2021-09-19T12:55:53
| 2021-09-19T12:55:53
| 331,036,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
def insertion_sort(lista):
for i in range(1, len(lista)):
element = lista[i] # Guarda o valor do elemento analisado
j = i
while j > 0 and lista[j-1] > element:
lista[j] = lista[j-1] # Desloca o número para a direita
j -= 1
lista[j] = element # Inseri o elemento na posição adequada
return lista
|
[
"emersoneduardo.airesnunes@gmail.com"
] |
emersoneduardo.airesnunes@gmail.com
|
c5001ecfa2716239bb437211c0ca5878f4942947
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_underscoring.py
|
b4a890d3243fc3207ae8047c40277eb6f93f3f90
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from xai.brain.wordbase.verbs._underscore import _UNDERSCORE
#calss header
class _UNDERSCORING(_UNDERSCORE, ):
def __init__(self,):
_UNDERSCORE.__init__(self)
self.name = "UNDERSCORING"
self.specie = 'verbs'
self.basic = "underscore"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1f31d3541fcf1f3d31a4e933baf995fe6f33ac91
|
6cd1ec241ea9d66c03c94403d3a2fa2417503bc1
|
/src/saxophone/rulechain.py
|
1826e2db960e0410c5d50e0bef4d811d5af522fd
|
[] |
no_license
|
NicholasDAllen/saxophone
|
65f286c5078f9aafd0368739c37f15628b3a0651
|
55bb297685528b662c3c9479bb8c5b25eb613a4a
|
refs/heads/master
| 2021-07-19T05:02:03.302112
| 2021-02-21T17:05:00
| 2021-02-21T17:05:00
| 48,620,708
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
"""
A series of rules to match against
"""
class SaveRule(object):
pass
class RuleChain(object):
def __init__(self):
self._rule_queue = []
self._rule_pointer = 0
# Stack that represents the dom. As we pop the
# stack we will decriment the select pointer as
# we leave elements we previously selected
self._select_dec_stack = []
self.results = []
def _intermediate_result(self, tag):
if len(self._rule_queue) == self._rule_pointer + 1:
self.results.append(tag)
self._rule_pointer += 1
self._select_dec_stack.append(1)
elif isinstance(self._rule_queue[self._rule_pointer + 1], SaveRule):
self.results.append(tag)
self._rule_pointer += 2
self._select_dec_stack.append(2)
else:
self._rule_pointer += 1
self._select_dec_stack.append(1)
def add(self, rule):
"""
Add a new rule to this RuleChain
"""
self._rule_queue.append(rule)
def current_rule(self):
"""
Gets the current rule we are trying to match,
based on the rule pointer.
"""
if self._rule_pointer == len(self._rule_queue):
return None
return self._rule_queue[self._rule_pointer]
def pop(self):
"""
Move us back down the rule queue as we exit
tags in the dom. _select_dec_stack effectively
tracks our dom location.
"""
self._rule_pointer -= self._select_dec_stack.pop()
|
[
"nick@cartlogic.com"
] |
nick@cartlogic.com
|
c17989e789ee286344975b054f845f1f8df6c037
|
ea198339d0a17b223108306b66c916331e3f4824
|
/Source Code/sentimentAnalysis.py
|
63a64d0084929dd496ef27f73a87c3ea1efda100
|
[] |
no_license
|
rajdas2016/Master_Thesis_Rajib-Das
|
f1c707cd9cd12040b6341683aac59dff7dadb6c4
|
bab0524d8f4e13a089fda06e6543e3d6f4ddea69
|
refs/heads/master
| 2020-05-26T12:37:07.238502
| 2017-03-28T14:13:51
| 2017-03-28T14:13:51
| 84,999,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,302
|
py
|
"""
Class to score sentiment of text.
Use domain-independent method of dictionary lookup of sentiment words,
handling negations and multiword expressions. Based on SentiWordNet 3.0.
"""
import nltk
import re
class SentimentAnalysis(object):
"""Class to get sentiment score based on analyzer."""
def __init__(self, filename='SentiWordNet.txt', weighting='geometric'):
"""Initialize with filename and choice of weighting."""
if weighting not in ('geometric', 'harmonic', 'average'):
raise ValueError(
'Allowed weighting options are geometric, harmonic, average')
# parse file and build sentiwordnet dicts
self.swn_pos = {'a': {}, 'v': {}, 'r': {}, 'n': {}}
self.swn_all = {}
self.build_swn(filename, weighting)
def average(self, score_list):
"""Get arithmetic average of scores."""
if(score_list):
return sum(score_list) / float(len(score_list))
else:
return 0
def geometric_weighted(self, score_list):
""""Get geometric weighted sum of scores."""
weighted_sum = 0
num = 1
for el in score_list:
weighted_sum += (el * (1 / float(2**num)))
num += 1
return weighted_sum
# another possible weighting instead of average
def harmonic_weighted(self, score_list):
"""Get harmonic weighted sum of scores."""
weighted_sum = 0
num = 2
for el in score_list:
weighted_sum += (el * (1 / float(num)))
num += 1
return weighted_sum
def build_swn(self, filename, weighting):
"""Build class's lookup based on SentiWordNet 3.0."""
records = [line.split('\t') for line in open(filename)]
for rec in records:
# has many words in 1 entry
words = rec[4].split()
pos = rec[0]
for word_num in words:
if len(word_num) != 0:
word = word_num.split('#')[0]
sense_num = int(word_num.split('#')[1])
# build a dictionary key'ed by sense number
if word not in self.swn_pos[pos]:
self.swn_pos[pos][word] = {}
self.swn_pos[pos][word][sense_num] = float(
rec[2]) - float(rec[3])
if word not in self.swn_all:
self.swn_all[word] = {}
self.swn_all[word][sense_num] = float(rec[2]) - float(rec[3])
# convert innermost dicts to ordered lists of scores
for pos in self.swn_pos.keys():
for word in self.swn_pos[pos].keys():
newlist = [self.swn_pos[pos][word][k] for k in sorted(
self.swn_pos[pos][word].keys())]
if weighting == 'average':
self.swn_pos[pos][word] = self.average(newlist)
if weighting == 'geometric':
self.swn_pos[pos][word] = self.geometric_weighted(newlist)
if weighting == 'harmonic':
self.swn_pos[pos][word] = self.harmonic_weighted(newlist)
for word in self.swn_all.keys():
newlist = [self.swn_all[word][k] for k in sorted(
self.swn_all[word].keys())]
if weighting == 'average':
self.swn_all[word] = self.average(newlist)
if weighting == 'geometric':
self.swn_all[word] = self.geometric_weighted(newlist)
if weighting == 'harmonic':
self.swn_all[word] = self.harmonic_weighted(newlist)
def pos_short(self, pos):
"""Convert NLTK POS tags to SWN's POS tags."""
if pos in set(['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']):
return 'v'
elif pos in set(['JJ', 'JJR', 'JJS']):
return 'a'
elif pos in set(['RB', 'RBR', 'RBS']):
return 'r'
elif pos in set(['NNS', 'NN', 'NNP', 'NNPS']):
return 'n'
else:
return 'a'
def score_word(self, word, pos):
"""Get sentiment score of word based on SWN and part of speech."""
try:
return self.swn_pos[pos][word]
except KeyError:
try:
return self.swn_all[word]
except KeyError:
return 0
def score(self, sentence):
"""Sentiment score a sentence."""
# init sentiwordnet lookup/scoring tools
impt = set(['NNS', 'NN', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS',
'RB', 'RBR', 'RBS', 'VB', 'VBD', 'VBG', 'VBN',
'VBP', 'VBZ', 'unknown'])
non_base = set(['VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'NNS', 'NNPS'])
negations = set(['not', 'n\'t', 'less', 'no', 'never',
'nothing', 'nowhere', 'hardly', 'barely',
'scarcely', 'nobody', 'none'])
stopwords = nltk.corpus.stopwords.words('english')
wnl = nltk.WordNetLemmatizer()
scores = []
tokens = nltk.tokenize.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
index = 0
for el in tagged:
pos = el[1]
try:
word = re.match('(\w+)', el[0]).group(0).lower()
start = index - 5
if start < 0:
start = 0
neighborhood = tokens[start:index]
# look for trailing multiword expressions
word_minus_one = tokens[index-1:index+1]
word_minus_two = tokens[index-2:index+1]
# if multiword expression, fold to one expression
if(self.is_multiword(word_minus_two)):
if len(scores) > 1:
scores.pop()
scores.pop()
if len(neighborhood) > 1:
neighborhood.pop()
neighborhood.pop()
word = '_'.join(word_minus_two)
pos = 'unknown'
elif(self.is_multiword(word_minus_one)):
if len(scores) > 0:
scores.pop()
if len(neighborhood) > 0:
neighborhood.pop()
word = '_'.join(word_minus_one)
pos = 'unknown'
# perform lookup
if (pos in impt) and (word not in stopwords):
if pos in non_base:
word = wnl.lemmatize(word, self.pos_short(pos))
score = self.score_word(word, self.pos_short(pos))
if len(negations.intersection(set(neighborhood))) > 0:
score = -score
scores.append(score)
except AttributeError:
pass
index += 1
if len(scores) > 0:
return sum(scores) / float(len(scores))
else:
return 0
def is_multiword(self, words):
"""Test if a group of words is a multiword expression."""
joined = '_'.join(words)
return joined in self.swn_all
def get_score(noun):
s = SentimentAnalysis(filename='senti.txt',weighting='geometric')
score = s.score(noun)
return score
|
[
"noreply@github.com"
] |
rajdas2016.noreply@github.com
|
e73d557ca3d976b0bb8a9578c0bc11fae9ae58bc
|
f40329de9ea63341e0db4a60347ea45ad446719e
|
/main_wx.py
|
7a4a30cdb0513dd1e02046b9c8ecb5535f611cc7
|
[] |
no_license
|
django000/bitcoin-vitoz
|
e7b95ab29c3a6602702b0f300f3fca9b35ddd8a8
|
7f0bb2ba65d609c35f74a4bbe44c66d4003f0752
|
refs/heads/master
| 2021-04-12T06:05:29.651324
| 2018-12-14T03:55:28
| 2018-12-14T03:55:28
| 94,501,452
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
# -*- coding: utf-8 -*-
import wx
import time
import binascii
import base_wx
from file2key import main
from ecdsa import SigningKey, NIST256p
class MainFrame(base_wx.BaseFrame):
"""docstring for MainFrame"""
def __init__(self, parent):
base_wx.BaseFrame.__init__(self, parent)
def exchangeSignature(self, event):
skA = SigningKey.generate(curve=NIST256p)
skB = SigningKey.generate(curve=NIST256p)
# vk = sk.get_verifying_key()
skAM = self.m_textCtrl13.GetValue()
skBM = self.m_textCtrl23.GetValue()
sigAM = str(binascii.hexlify(skA.sign(skAM.encode("utf-8"))), "utf-8")
sigBM = str(binascii.hexlify(skB.sign(skBM.encode("utf-8"))), "utf-8")
print("The signature of A: %s" % sigAM)
print("The signature of B: %s" % sigBM)
self.m_textCtrl31.SetValue(sigAM)
self.m_textCtrl32.SetValue(sigBM)
result = False
while not result:
with open("mining/mine_result.txt", "r") as f:
result = f.readline().strip() == "success"
time.sleep(2)
with open("mining/mine_result.txt", "w") as f:
f.write("")
print("A receive B's signature: %s" % sigBM)
print("B receive A's signature: %s" % sigAM)
self.m_textCtrl33.SetValue(sigBM)
self.m_textCtrl34.SetValue(sigAM)
def generateKeypairOne(self, event):
con = self.m_textCtrl11.GetValue()
if con == "":
sk = "null"
pk = "null"
else:
h, sk, pk = main("str", con, 2048)
self.m_textCtrl12.SetValue(sk)
self.m_textCtrl13.SetValue(pk)
event.Skip()
def generateKeypairTwo(self, event):
con = self.m_textCtrl21.GetValue()
if con == "":
sk = "null"
pk = "null"
else:
h, sk, pk = main("str", con, 2048)
self.m_textCtrl22.SetValue(sk)
self.m_textCtrl23.SetValue(pk)
event.Skip()
if __name__ == '__main__':
app = wx.App()
mainFrame = MainFrame(None)
mainFrame.Show(True)
app.MainLoop()
|
[
"zhangwentao0601@163.com"
] |
zhangwentao0601@163.com
|
fd5e4fe6024116245528a3f8cab92a2573a6bd40
|
1ad7267fcd745a5609259cc14b6df87d66f5eed4
|
/deviceCamera.py
|
a7b48aa3dceedc5b7a3d913e2350ed2687406e01
|
[] |
no_license
|
stargazer2377/facerobo
|
7da4304fc83ab7b362142354bfea6f810d77f115
|
cf124234fac3b113d839dcc264e2287c768e3729
|
refs/heads/master
| 2020-05-29T20:03:06.540202
| 2015-05-17T23:29:57
| 2015-05-17T23:29:57
| 35,785,297
| 0
| 1
| null | 2015-05-17T22:25:22
| 2015-05-17T22:19:30
| null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
# camera related functions
|
[
"stargazer2377@gmail.com"
] |
stargazer2377@gmail.com
|
a97f5a634c05e848cc04f021971ab996c3b468bd
|
c89e1bedd57bf8ca0ffde49370b9ba68ba827717
|
/company_search_results_page.py
|
7ef518371ca7d7193d45c350e55dbbaaa9d6c61b
|
[] |
no_license
|
lawrenceabaeo/scraper-for-interview-stories-pytest
|
69bbde947b5e00efb2d35f8e3a49da50fa82c307
|
540dd1ad38cd6196bee62c844019d636ed30d501
|
refs/heads/master
| 2021-08-28T00:55:15.998717
| 2017-12-10T23:59:09
| 2017-12-10T23:59:09
| 112,061,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
import logging
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
import popup
class CompanySearchResultsPageObject():
def __init__(self, driver):
self.driver = driver
def on_company_search_results_page(self):
logging.info(" checking if on company search results page")
expected_title = "Reviews "
try:
WebDriverWait(self.driver, 10) \
.until(expected_conditions.title_contains(expected_title))
logging.info(" title of current page: " + self.driver.title)
logging.info(" on search results page (containing '"
+ expected_title + "')")
return True
except TimeoutException:
logging.info(" NOT on search results page (containing '"
+ expected_title + "')")
logging.info(" title of current page: " + self.driver.title)
return False
def click_first_company_link(self):
logging.info(" clicking link for company")
company_link_css = "a.h2.tightAll" # this has changed in the past
self.driver.find_element_by_css_selector(company_link_css).click()
# ^handle popup
# sometimes a popup appears after clicking company
popup_object = popup.PopupObject(self.driver)
popup_object.check_and_close_any_popups()
def no_matching_companies(self):
# Not waiting, assuming page rendered already
logging.info(" checking if no matches were returned")
suggestions_text_css = "div#SearchSuggestions" # This css changed before
if (len(self.driver.find_elements_by_css_selector(suggestions_text_css)) > 0):
logging.info(" suggestions css found")
# NOTE: The search suggestions text used to be 'Search Suggestions',
# now it is 'Adjust your search'. Will NOT verify the text anymore,
# and instead rely on if the SearchSuggestions div exists
return True
else:
logging.info(" no search suggestions css found...")
logging.info(" ... assuming companies were found")
return False
|
[
"labaeo@gmail.com"
] |
labaeo@gmail.com
|
97f79e16e6f24dd301ced1daad53506b413b2bcf
|
0a6aa864c8c767607d828bce63279cd6dd9b5950
|
/Django-Web/FastCampus/day3_detail/day3_review/photos/views.py
|
beb15189d48b6f6fbfb6ab3e0e36c4f7dbfe751f
|
[] |
no_license
|
kim1124/Projects
|
70314a4ae399cafe5ea7d26d412c575114eb3b07
|
96592d0bd26d2622dead3359e060e0a4fe83664e
|
refs/heads/master
| 2021-01-22T07:32:10.796774
| 2017-11-24T03:50:16
| 2017-11-24T03:50:16
| 81,641,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from django.shortcuts import render
from photos.models import Tag
from photos.models import Post
from photos.models import Comment
# Create your views here.
def mainpage(req):
arr_posts = []
qus_posts = Post.objects.all()
qus_first = qus_posts.first()
if qus_first is not None:
for post in qus_posts:
arr_posts.append(post)
ctx = {}
ctx['arr_posts'] = arr_posts
return render(req, 'mainpage.html', ctx)
|
[
"kim112mgxld@gmail.com"
] |
kim112mgxld@gmail.com
|
ce29362ce5b4fd605a165c0605c5996a2e47b978
|
63b649ceed6cc4d6b7861002f44d993536d0d7bd
|
/set/set9.py
|
ecfa8ca9913551a6d1ae3b2229e95d2b2c07db08
|
[] |
no_license
|
Maxim-Deriuha/decor
|
d1ad89c278cdf7319be9e7b4d4c064c64484b805
|
b2a96226073a7c87ff32f7b91904c1c58985a39e
|
refs/heads/master
| 2023-07-18T15:59:47.009933
| 2021-09-07T13:58:18
| 2021-09-07T13:58:18
| 403,590,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
myset = set('python')
item = myset.pop()
print(item, len(myset))
|
[
"maksym.deriuha@ideus.biz"
] |
maksym.deriuha@ideus.biz
|
729477fad0e79e344faad4099045ccca07134ddf
|
cc10bfc3d258eb7ef776639fab2e1cc0795591c0
|
/FastMVS/fastmvsnet/train1.py
|
54d32f6738e6ad2c2884cf8b772cee6a6620a984
|
[] |
no_license
|
keqpan/mvs_w_depth_sk
|
4a0ba58453802c8d774b8eba8737cec83cc1129f
|
6471b47842d720ab3fd7225ccc7dc6921a5cf473
|
refs/heads/master
| 2022-10-27T23:25:58.545587
| 2020-06-06T21:16:51
| 2020-06-06T21:16:51
| 269,454,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,013
|
py
|
#!/usr/bin/env python
import argparse
import os.path as osp
import logging
import time
import sys
sys.path.insert(0, osp.dirname(__file__) + '/..')
import torch
import torch.nn as nn
from fastmvsnet.config import load_cfg_from_file
from fastmvsnet.utils.io import mkdir
from fastmvsnet.utils.logger import setup_logger
from fastmvsnet.utils.torch_utils import set_random_seed
from fastmvsnet.model1 import build_pointmvsnet as build_model
from fastmvsnet.solver import build_optimizer, build_scheduler
from fastmvsnet.utils.checkpoint import Checkpointer
from fastmvsnet.dataset1 import build_data_loader
from fastmvsnet.utils.tensorboard_logger import TensorboardLogger
from fastmvsnet.utils.metric_logger import MetricLogger
from fastmvsnet.utils.file_logger import file_logger
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training")
parser.add_argument(
"--cfg",
dest="config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
optimizer,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.train")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
path_list = []
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
path_list.extend(curr_ref_img_path)
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
#print("LOSS DICT", loss_dict['coarse_loss'])
#print("LOSSES", loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
losses.backward()
# print(poop)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
"lr: {lr:.2e}",
"max mem: {memory:.0f}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train")
tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train")
return meters
def validate_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.validate")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
with torch.no_grad():
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
)
)
tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid")
return meters
def train(cfg, output_dir=""):
logger = logging.getLogger("fastmvsnet.trainer")
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, metric_fn = build_model(cfg)
logger.info("Build model:\n{}".format(str(model)))
model = nn.DataParallel(model).cuda()
# build optimizer
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
checkpointer = Checkpointer(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger)
checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build data loader
train_data_loader = build_data_loader(cfg, mode="train")
val_period = cfg.TRAIN.VAL_PERIOD
val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None
# build tensorboard logger (optionally by comment)
tensorboard_logger = TensorboardLogger(output_dir)
# train
max_epoch = cfg.SCHEDULER.MAX_EPOCH
start_epoch = checkpoint_data.get("epoch", 0)
best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info("Start training from epoch {}".format(start_epoch))
for epoch in range(start_epoch, max_epoch):
cur_epoch = epoch + 1
scheduler.step()
start_time = time.time()
train_meters = train_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.TRAIN.IMG_SCALES,
inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=train_data_loader,
optimizer=optimizer,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TRAIN.LOG_PERIOD,
output_dir=output_dir,
)
epoch_time = time.time() - start_time
logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format(
cur_epoch, train_meters.summary_str, epoch_time))
# checkpoint
if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data)
# validate
if val_period < 1:
continue
if cur_epoch % val_period == 0 or cur_epoch == max_epoch:
val_meters = validate_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.VAL.IMG_SCALES,
inter_scales=cfg.MODEL.VAL.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=val_data_loader,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TEST.LOG_PERIOD,
output_dir=output_dir,
)
logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str))
# best validation
cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg
if best_metric is None or cur_metric > best_metric:
best_metric = cur_metric
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_best", **checkpoint_data)
logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric))
return model
def main():
args = parse_args()
num_gpus = torch.cuda.device_count()
cfg = load_cfg_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
config_path = osp.splitext(args.config_file)[0]
config_path = config_path.replace("configs", "outputs1")
output_dir = output_dir.replace('@', config_path)
mkdir(output_dir)
logger = setup_logger("fastmvsnet", output_dir, prefix="train")
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
train(cfg, output_dir)
if __name__ == "__main__":
main()
|
[
"guzovskiy.eyu@phystech.edu"
] |
guzovskiy.eyu@phystech.edu
|
242a32c2b815b2212ff22207ee853b99dd0f4f41
|
34f9de521e5489b11eebc21ebafb938bc639d267
|
/reservation/landing.py
|
0e83ab27c72cf8de14bf526668d683f74504c1e3
|
[] |
no_license
|
QingqingXiao/ost_15fall
|
95f4c88a13ae13b5e3d0e141822363712878fa58
|
82d3b315f0e943cbb88b8697fee50a5cd77e539b
|
refs/heads/master
| 2021-01-10T16:08:11.988577
| 2015-12-19T18:21:54
| 2015-12-19T18:21:54
| 46,835,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
import webapp2
import jinja2
import os
import model
import re
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Landing(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
self.response.write(template.render())
application = webapp2.WSGIApplication([
('/', Landing)], debug=True)
|
[
"qx307@nyu.edu"
] |
qx307@nyu.edu
|
9d7d3d6808abbef6400da3ff9d8b746caaa25258
|
db0d15b989134e42c42de461692959a08dd83719
|
/gradebook/migrations/0010_auto__chg_field_semester_final_gpa__chg_field_semester_gpa_points__chg.py
|
3e0924edbdc1e78cec2a0644959293e855783da2
|
[] |
no_license
|
AndCook/mygrades
|
0d2c024664cdaacbc83715faa6498afbf8100140
|
68fb000ab272305b2a92b0e84e2ad69886a756ba
|
refs/heads/master
| 2021-01-19T14:57:15.694186
| 2014-06-01T03:49:15
| 2014-06-01T03:49:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,818
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Semester.final_gpa'
db.alter_column(u'gradebook_semester', 'final_gpa', self.gf('django.db.models.fields.FloatField')())
# Changing field 'Semester.grade_points'
db.alter_column(u'gradebook_semester', 'grade_points', self.gf('django.db.models.fields.FloatField')())
# Changing field 'Course.grade_points'
db.alter_column(u'gradebook_course', 'grade_points', self.gf('django.db.models.fields.FloatField')())
def backwards(self, orm):
# Changing field 'Semester.final_gpa'
db.alter_column(u'gradebook_semester', 'final_gpa', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=3))
# Changing field 'Semester.grade_points'
db.alter_column(u'gradebook_semester', 'grade_points', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Changing field 'Course.grade_points'
db.alter_column(u'gradebook_course', 'grade_points', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'gradebook.assignment': {
'Meta': {'object_name': 'Assignment'},
'awardedPoints': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gradebook.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'percentage': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'possiblePoints': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
u'gradebook.category': {
'Meta': {'object_name': 'Category'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gradebook.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'worth': ('django.db.models.fields.FloatField', [], {})
},
u'gradebook.course': {
'Meta': {'object_name': 'Course'},
'final_grade': ('django.db.models.fields.CharField', [], {'default': "'#'", 'max_length': '2'}),
'grade_points': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'hours': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructor': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'semester': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gradebook.Semester']"})
},
u'gradebook.semester': {
'Meta': {'object_name': 'Semester'},
'end_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'final_gpa': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'gpa_hours': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'grade_points': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'hours_passed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hours_planned': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_future': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['gradebook']
|
[
"newalth@cs.utexas.edu"
] |
newalth@cs.utexas.edu
|
6c902414d01f93bdf5dc08f48d8f55edec85dd88
|
8e94794d00ae2f6770f2b1c4c13269ee8f2a5de3
|
/app/controllers/BackendController/Polygon/ForexController.py
|
eefbdab2a8593bc74769ae6fffb52509591ee27c
|
[] |
no_license
|
webclinic017/Trading-app-1
|
debd99edf075d54aec7655a5b99fb36f91859315
|
70308541c9c44c858953e18097ee5f6440b40b34
|
refs/heads/main
| 2023-08-14T23:43:43.579705
| 2021-10-14T09:54:59
| 2021-10-14T09:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
from app.models.sqa_models import *
from app.db.tasks import database_session
from app.helpers.database_helper import bulk_insert
from sqlalchemy.future import select
from fastapi_pagination.ext.sqlalchemy import paginate
from sqlalchemy.orm import lazyload
from datetime import date,datetime
class ForexController:
def __init__(self):
self.session, self.sync_session = database_session()
async def query_data(self,r_obj,relation,parameter_val=None,start=None,end=None,limit=50000,parameter="ticker"):
session=self.sync_session()
q_res=session.query(Forex).filter( getattr(Forex,parameter) == parameter_val).one()
if parameter:
if start and end:
my_time = datetime.min.time()
start = datetime.combine(start, my_time)
end=datetime.combine(end,my_time)
data=getattr(q_res,relation).filter(r_obj.datetime.between(start,end)).limit(limit).all()
else:
data=getattr(q_res,relation).limit(limit).all()
else:
if start and end:
my_time = datetime.min.time()
start = datetime.combine(start, my_time)
end=datetime.combine(end,my_time)
data=getattr(q_res,relation).filter(r_obj.datetime.between(start,end)).limit(limit).all()
else:
data=getattr(q_res,relation).limit(limit).all()
data=[dict(d) for d in data]
return data
async def get_forex_price_daily_adj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesDailyAdj,"price_daily_adjusted",ticker,start,end)
return prices
async def get_forex_price_daily_unadj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesDailyUnadj,"price_daily_unadjusted",ticker,start,end)
return prices
async def get_forex_price_minute_adj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesMinAdj,"prices_min_adjusted",ticker,start,end)
return prices
async def get_forex_price_minute_unadj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesMinUnadj,"price_min_unadjusted",ticker,start,end)
return prices
async def get_forex_price_hourly_unadj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesHourlyAdj,"prices_hourly_unadjusted",ticker,start,end)
return prices
async def get_forex_price_hourly_adj(self,ticker=None,start=None,end=None):
prices=await self.query_data(ForexPricesHourlyUnadj,"prices_hourly_adjusted",ticker,start,end)
return prices
|
[
"faisal@Faisals-MacBook-Pro.local"
] |
faisal@Faisals-MacBook-Pro.local
|
0b3dc3a59db53806888bdd7e67e5c3eedde4374c
|
ad36278ad436573626763f9b82c145bce9ae4a26
|
/app/core/migrations/0003_ingredient.py
|
cd7ced8f85b1cd41f2b85cf2ee3999ae4194cc7e
|
[
"MIT"
] |
permissive
|
dusantrtica/recipe-app-api
|
8b8e88d2f334deed09ec3602ee73db4fea40d583
|
154e87b1cb0596fb3ab86c69ec131307ea9a5e9a
|
refs/heads/master
| 2022-12-12T16:03:29.621569
| 2020-09-01T12:14:12
| 2020-09-01T12:14:12
| 275,015,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Generated by Django 2.1.15 on 2020-08-01 11:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"dusan.trtica@hotmail.com"
] |
dusan.trtica@hotmail.com
|
065a7ba25278105449e7b3d0bc7e9d75e0141fe2
|
b564a7d17f224e9512fec36bab4333353381e22c
|
/lib/exaproxy/html/humans.py
|
f15fdaa44f84ee1894da006b05dcba9b027d9279
|
[
"BSD-2-Clause"
] |
permissive
|
Exa-Networks/exaproxy
|
464f9c72449b12d4f3960e9829a0f93fec8db0da
|
8b7291b79c1cd6542213a5e7d8dda3cf5a676166
|
refs/heads/master
| 2023-09-03T16:10:56.656549
| 2022-06-28T16:52:48
| 2022-06-28T16:52:48
| 13,495,150
| 127
| 25
|
NOASSERTION
| 2022-06-28T16:52:49
| 2013-10-11T09:36:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
# encoding: utf-8
"""
humans.py
Created by Thomas Mangin on 2012-02-25.
Copyright (c) 2011-2013 Exa Networks. All rights reserved.
"""
from .images import thomas,david
class humans:
txt = """\
/* TEAM */
Slave Driver / Grand Visionary: Thomas Mangin
Google+: https://plus.google.com/104241996506596749840
Github: https://github.com/thomas-mangin
Engineer Extraordinaire: David Farrar
Google+: https://plus.google.com/108845019528954357090
Github: https://github.com/david-farrar
/* Other contributors */
Marek Obuchowicz (kqueue reactor)
Github: https://github.com/marek-obuchowicz
"""
html = """\
<div style="padding: 20px 20px 20px 20px;">
<b>/* TEAM */</b><br/>
<br/>
<div style="margin-left:20px;margin-right:10px;">
<img width="100px" src="data:image/png;base64,%s"/>
</div>
<br/>
Slave Driver / Grand Visionary<br/>
<a href="https://plus.google.com/104241996506596749840">Thomas Mangin</a><br/>
<br/>
<div style="margin-left:20px;margin-right:10px;">
<img width="100px" src="data:image/png;base64,%s"/>
</div>
<br/>
Engineer Extraordinaire<br/>
<a href="https://plus.google.com/108845019528954357090">David Farrar</a><br/>
</div>
<div style="padding: 20px 20px 20px 20px;">
<b>/* Other contributors */</b>
<br/>
<a href="https://github.com/marek-obuchowicz">Marek Obuchowicz</a> (kqueue reactor)
<br/>
</div>
""" % (thomas,david)
|
[
"thomas.mangin@exa-networks.co.uk"
] |
thomas.mangin@exa-networks.co.uk
|
9b1f8bdeaee01d6e5a6d11f6555fb7500d924f31
|
284a6efb9ad99297204ab3b269214d7244406b08
|
/user.py
|
ab79f98969ae724faf5a0b48a6a232ccc17b9612
|
[] |
no_license
|
sidmadethis/learning_lists
|
1d1b22094e85e4016ef2da243ea71f9d543310a8
|
1a5088b42cbe429197aff7b53d55bed81258ef6a
|
refs/heads/master
| 2020-12-31T07:41:51.491725
| 2017-05-08T15:23:43
| 2017-05-08T15:23:43
| 86,543,739
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
user_0= {
'username':'efermi',
'first':'enrico',
'last':'fermi',
}
for key,value in user_0.items():
print("\nKey: " + key)
print("Value: " + value)
# key value on line 7 can be replaced with k v, just variables
favorite_languages = {
'jen': 'python',
'sarah':'c',
'edward':'ruby',
'phil':'python',
}
print("\n")
for name,language in favorite_languages.items():
print(name.title()+ "'s favorite language is " + language.title() + ".")
print("\n")
for name in favorite_languages.keys():
print(name.title())
# this is the same as for name in favorite_languages:
# use sort to get dictionary in order
# note this does alphabetical sorting
print("\n")
for name in sorted(favorite_languages.keys()):
print(name.title() + ", thank you for taking our poll!")
print("\n")
print("the following langauges have been mentioned")
for language in favorite_languages.values():
print(language.title())
# use set to not have duplicates
print("\nNo duplicates")
for language in set(favorite_languages.values()):
print(language)
# nesting is storing dictionaries inside of a dictionary
alien_a = {'color':'green', 'points':5}
alien_b = {'color':'blue', 'points':52}
alien_c = {'color':'white', 'points':15}
print("\n")
aliens = [alien_a, alien_b, alien_c]
print(aliens)
|
[
"sidmadethis@gmail.com"
] |
sidmadethis@gmail.com
|
dede0e6fe67177d3d44f2c90885fd4fddc0d7502
|
a410f7c6ba7d9de999627f6a6a1803f056f660eb
|
/compositional/conv_4/train.py
|
f7a52169b250fdfdd211bf8acd4f66ac381a04aa
|
[] |
no_license
|
codesubmissionforpaper/entropy_regularised_capsule
|
6ebfde7f15df7e9402b30da2ec787b42a486db90
|
cad41c94c73d8addce87d1542bf6213a233f2f73
|
refs/heads/master
| 2022-12-24T10:08:58.179752
| 2020-10-05T09:51:25
| 2020-10-05T09:51:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,527
|
py
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
from torch.utils.data import DataLoader
from model import *
from constants import *
from data_loaders import *
from utils import *
def train(epoch,trainloader,trial):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0.0
correct = 0.0
total = 0.0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
scheduler.step()
with torch.no_grad():
#save checkpoint (not for restarting training. Only for analysis.
state = {
'model': model.state_dict(),
'loss': train_loss/(batch_idx+1),
'acc': correct/total,
'epoch': epoch
}
torch.save(state,'./checkpoints/epoch_'+str(epoch)+'_trial_'+str(trial)+'.pth')
def test(epoch,testloader,trial):
global best_accuracy
model.eval()
test_loss = 0.0
correct = 0.0
total = 0.0
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(DEVICE), targets.to(DEVICE)
outputs = model(inputs)
loss = loss_criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*float(correct)/total
print('test accuracy: ',acc)
if acc >= best_accuracy:
print('Saving..')
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'loss': test_loss/(batch_idx+1),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
torch.save(state, './checkpoints/trial_'+str(trial)+'_best_accuracy.pth')
best_accuracy = acc
def get_mean_variance(batch_size,entropies,old_mean_entropies,old_var_entropies,):
mean_entropies = []
var_entropies = []
new_batch_size = entropies[1].size(0)
for entropy, old_mean_entropy, old_var_entropy in zip(entropies,old_mean):
new_mean_entropy = torch.mean(entropy,dim=0)
mean_entropy = (batch_size*old_mean_entropy+new_batch_size*new_mean_entropy)/(batch_size+new_batch_size)
new_var_entropy = torch.var(entropy,dim=0,unbiased=False)
var_entropy = (batch_size*old_var_entropy+new_batch_size*new_var_entropy)/(batch_size+new_batch_size)
var_entropy += (batch_size*new_batch_size)*((old_mean_entropy-new_mean_entropy)/(batch_size_new_batch_size))**2
mean_entropies.append(mean_entropy)
var_entropies.append(var_entropy)
return mean_entropies, var_entropies
def analysis(path,loader,trial):
model = nn.DataParallel(ResnetCnnsovnetDynamicRouting(analysis=True)).to(DEVICE)
model.load_state_dict(path)
total = 0.0
model.eval()
with torch.no_grad():
for batch_idx, (data,label) in enumerate(loader):
data, label = data.to(DEVICE), label.to(DEVICE)
_, entropies = model(data)
if batch_idx == 0:
mean_entropies = []
var_entropies = []
for entropy in entropies:
mean_entropies.append(torch.mean(entropy,dim=0))
var_entropies.append(torch.var(entropy,dim=0,unbiased=False))
else:
mean_entropies, var_entropies = get_mean_variance(total,entropies,mean_entropies,var_entropies)
total += label.size(0)
return mean_entropies, var_entropies
def loss_criterion(outputs,targets):
targets = one_hot(targets)
lamda = 0.5
m_plus = 0.9
m_minus = 0.1
tmp1 = F.relu(m_plus-outputs).view(outputs.size(0),-1)**2
tmp2 = F.relu(outputs-m_minus).view(outputs.size(0),-1)**2
loss = targets*tmp1 + lamda*(1-targets)*tmp1
loss = loss.sum(dim=1).mean()
return loss
for trial in range(NUMBER_OF_TRIALS):
trainloader, testloader = get_data_loaders()
best_accuracy = 0.0
num_epochs = 50
#loss_criterion = CrossEntropyLoss()
model = nn.DataParallel(Conv4()).to(DEVICE)
optimizer = optim.Adam(model.parameters(),lr=0.001)
#lr_lambda = lambda epoch: max(1e-3,0.95**(epoch))
#scheduler = optim.lr_scheduler.LambdaLR(optimizer,lr_lambda)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 200], gamma=0.1)
for epoch in range(num_epochs):
train(epoch,trainloader,trial)
test(epoch,testloader,trial)
|
[
"vsairaam@sssihl.edu.in"
] |
vsairaam@sssihl.edu.in
|
3943bc5e512aec87ced61f515f6f3bb5a430b673
|
40ed9522978662ed38c0432ec7f29469d9704ed6
|
/seq.py
|
7e3f7f8d22564cbc012b028fb256d181af3b5bbd
|
[] |
no_license
|
ValentinKarnaukhov/Python
|
31b4b446bfc7b526b345045b14e39845cee7caf6
|
ce0e1801e5bb83999ceb6d0c59a7bb28d691adbb
|
refs/heads/master
| 2021-01-02T08:23:45.099550
| 2017-08-06T09:32:11
| 2017-08-06T09:32:11
| 99,001,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
n=int(input())
s=0
for i in range(n+1):
for j in range(i):
s=s+1
if s>n:
break
print(i,end=' ')
|
[
"wippi2010@rambler.ru"
] |
wippi2010@rambler.ru
|
d480892f43b011d55204454153502b18cd950061
|
7cac4fc7e2a1ef45044fbde037c66fe9e76fc2fc
|
/pyemd/__about__.py
|
2da8b2f639bdddc3e2ad5e106bf9d042dec09eb1
|
[
"MIT"
] |
permissive
|
b5510546671/wmayner_pyemd
|
5e8f7f43be339d3b56cab17aa04007969b7749de
|
ef08133be117a5592b10e88c09c26bf977ce229e
|
refs/heads/master
| 2021-01-19T18:39:48.111194
| 2017-07-19T05:15:21
| 2017-07-19T05:15:21
| 101,150,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __about__.py
"""PyEMD metadata"""
__title__ = 'pyemd'
__version__ = '0.4.4'
__description__ = ("A Python wrapper for Ofir Pele and Michael Werman's "
"implementation of the Earth Mover's Distance.")
__author__ = 'Will Mayner'
__author_email__ = 'wmayner@gmail.com'
__author_website__ = 'http://willmayner.com'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2014-2017 Will Mayner'
__url__ = 'http://github.com/wmayner/pyemd'
__all__ = ['__title__', '__version__', '__description__', '__author__',
'__author_email__', '__author_website__', '__license__',
'__copyright__', '__url__']
|
[
"wmayner@gmail.com"
] |
wmayner@gmail.com
|
0dbfafd296306eb0da4d50abfbbd0bd416929656
|
6f103e8ceaa6ebd8e5c4c34fd96da0bf102719d0
|
/blog_porject/wsgi.py
|
15cf2109242c607df6aeedb608ae857ea0c00faa
|
[] |
no_license
|
RobbyTheFish/BlogApp
|
7e596e34427728e493b2f7877614217e8f9c1e24
|
1a1899105eded0876d463b8c686be5250fb4a19c
|
refs/heads/master
| 2023-06-25T06:41:42.538539
| 2021-07-31T08:40:30
| 2021-07-31T08:40:30
| 388,342,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for blog_porject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_porject.settings')
application = get_wsgi_application()
|
[
"oyaroslav2004@gmail.com"
] |
oyaroslav2004@gmail.com
|
6469709fcf868289b689c5a64db4c625a21116ff
|
904b4b7cd6b1709e9aded92737766a3b5a978838
|
/bissextile.py
|
d90e2359ddb92bf8a0938ca97e262464bbf19394
|
[] |
no_license
|
NicolasLagaillardie/Python
|
3ec7aa6eb21ffa86fad33060bb53e42cb7957dc9
|
a30037d688d8f11a195d7fa611347528c313d71b
|
refs/heads/master
| 2020-03-30T13:48:27.038592
| 2018-10-02T16:54:42
| 2018-10-02T16:54:42
| 151,288,608
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
Python
| false
| false
| 382
|
py
|
# -*- coding: cp1252 -*-
def bissextile(annee):
if annee%4!=0:
print annee,' n\'est pas une année bissextile'
else:
if annee%100==0:
if annee%400==0:
print annee,' est bissextile'
else:
print annee,' n\'est pas une année bissextile'
else:
print annee,' est une année bissextile'
|
[
"lagaillardie.nicolas@live.fr"
] |
lagaillardie.nicolas@live.fr
|
71263917d37ee283555a09f5a3a26e272e03b3e6
|
7968e4bcd76b68d2632725ecf5cb27e389ea083e
|
/src/search.py
|
3af34847132e8709dae9f95acc63caad1f8a1ac6
|
[] |
no_license
|
kseniazhagorina/genotree
|
3e9a07a82e611f34595262dbc386a762d583454e
|
95602ee80766dfda274b35d1f5adb34b3917c4eb
|
refs/heads/master
| 2023-02-02T10:59:54.232945
| 2023-01-08T19:05:44
| 2023-01-08T19:05:44
| 69,810,670
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,196
|
py
|
#!usr/bin/env
# -*- coding: utf-8 -*-
from collections import defaultdict
import re
class Term:
words_regex = re.compile('\w+')
END = None
@staticmethod
def split(text):
return Term.words_regex.findall(text.lower().replace('ё', 'е'))
class Trie:
class Node(dict):
'''{char -> Node}'''
def __init__(self):
self.docs = set()
def get_or_create(self, c):
if c not in self:
self[c] = Trie.Node()
return self[c]
def __init__(self):
self.__root = Trie.Node()
def add(self, doc_id, word):
current = self.__root
for c in word:
current = current.get_or_create(c)
current.docs.add(doc_id)
current = current.get_or_create(Term.END)
current.docs.add(doc_id)
def find(self, word, prefix=True):
current = self.__root
for c in word:
current = current.get(c, None)
if current is None:
return set()
if not prefix:
current = current.get(Term.END, None)
if current is None:
return set()
return current.docs
class SearchEngine:
def __init__(self, person_snippets):
self.trie = Trie()
for person_uid, person in person_snippets.items():
words = set(Term.split(str(person.name)))
for word in words:
self.trie.add(person_uid, word)
def search_strict(self, text):
words = set(Term.split(text))
matched = None
for word in words:
found = self.trie.find(word, prefix=False)
matched = matched & found if matched is not None else found
return list(matched)
def search(self, text):
words = set(Term.split(text))
hits = defaultdict(int)
for word in words:
docs = self.trie.find(word, prefix=True)
for d in docs:
hits[d] += 1
return [x[0] for x in sorted(hits.items(), key=lambda x: x[1], reverse=True)]
|
[
"ksenia.zhagorina@yandex.ru"
] |
ksenia.zhagorina@yandex.ru
|
0dd6eaca4dec38170e19e45f8effca7e6acf2ed5
|
1c5a0a3669eceec7ec28820f6435bcaeafba517d
|
/Exercises/class tests.py
|
f8eacdf1432d6039fd79e0c37968d82b7e5df65c
|
[] |
no_license
|
gagnongr/Gregory-Gagnon
|
0a48220cf1bf7a5a90ce441da0a03c131b6ed5da
|
2b660de5323e57634fc7f69f4379554d8fcc421f
|
refs/heads/master
| 2020-05-20T14:05:45.987126
| 2013-11-25T00:55:13
| 2013-11-25T00:55:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
class WeatherForecast(object):
def set_skies(self, param):
self.skies = param
def get_skies(self):
return self.skies
|
[
"gregorygagnon@Gregorys-MacBook-Pro.local"
] |
gregorygagnon@Gregorys-MacBook-Pro.local
|
6272350f4f6c626b86830dc3776e3ecf46a0cd08
|
e96e69b46e23a248d114e266fb72b7e4242eab2f
|
/djangostart/wsgi.py
|
c43842a9457371d047cee1979a80826603818a92
|
[] |
no_license
|
afrunk/DjangoStart
|
b7c90af37eec6befef51b92f994cf5957052a555
|
9952d12d465e05201be30a00c6beafc3183f61af
|
refs/heads/master
| 2020-06-26T10:23:47.820928
| 2019-07-30T14:19:36
| 2019-07-30T14:19:36
| 199,607,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for djangostart project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangostart.settings')
application = get_wsgi_application()
|
[
"afrunk7@gmail.com"
] |
afrunk7@gmail.com
|
2e34a9f1639c08b36edaff2f6ab1483d312ef2e2
|
dde21858017283702d5962c3ade969beea4c2831
|
/notifier.py
|
5763b9e03c4f46edad37a0719479131134227db1
|
[] |
no_license
|
lancewellspring/toornamentNotifier
|
2aef6eccee81692c594fcedb9e908cbeb6c522f6
|
674fe8f9e8ef65e5f608cfbd8506c1648cde8c4e
|
refs/heads/master
| 2020-07-28T15:53:31.647400
| 2016-11-07T14:11:28
| 2016-11-07T14:11:28
| 67,875,657
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,083
|
py
|
import smtplib
import urllib
import urllib2
import datetime
import ast
import traceback
#All 7 of the following values need to be set, see toornament website for details
SENDER = ''
SMTP_USERNAME=''
SMTP_PASSWORD=''
TOORNAMENT_ID = ''
API_KEY = ''
CLIENT_ID = ''
CLIENT_SECRET = ''
#this value starts out empty and is set by the authenticate function
ACCESS_TOKEN = ''
#The matches parameter should be a list of objects(dictionaries) containing data about the matches.
#Each object should have a name and email list for both involved teams, as well as the time of the match.
#This function formats an email based upon which matches are occurring today, and sends the email to all involved players.
def sendEmails(matches):
recipients=['']
msg = ''
for match in matches:
recipients.extend(match['team1emails'])
recipients.extend(match['team2emails'])
if len(recipients) > 0:
recipients = [x for x in recipients if x is not None]
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(SMTP_USERNAME,SMTP_PASSWORD)
body = '\r\n'.join((
"From: %s" % SENDER,
"BCC: %s" % ', '.join(recipients),
"Subject: Todays Matches",
"",
msg
))
server.sendmail(SENDER, recipients, body)
server.quit()
else:
print 'no recipients'
#The web service isn't returning json or xml. It appears to be returning the data formatted as a javascript object, so I decided to just convert it to python syntax and evaluate the string literally into an object.
def evaluateRaw(raw):
raw=raw.replace('false', 'False')
raw=raw.replace('true', 'True')
raw=raw.replace('null', 'None')
return ast.literal_eval(raw)
#We have to do the oauth v2 athentication in order to get participants emails. The main goal is to get an access_token to be used in the headers of future web service calls.
def authenticate():
url='https://api.toornament.com/oauth/v2/token'
values={'grant_type':'client_credentials', 'client_id':CLIENT_ID, 'client_secret':CLIENT_SECRET}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
raw = response.read()
d = evaluateRaw(raw)
global ACCESS_TOKEN
ACCESS_TOKEN = d['access_token']
def pullMatches():
url = 'https://api.toornament.com/v1/tournaments/' + TOORNAMENT_ID + '/matches'
hdr = {'User-Agent': 'Mozilla/5.0', 'X-Api-Key':API_KEY}
req = urllib2.Request(url, headers=hdr)
response = urllib2.urlopen(req)
raw = response.read()
return evaluateRaw(raw)
#Basically just parses out the data we care about, and returns a nicely formated dictionary
def parseMatches(matches):
matchData=[]
today = datetime.datetime.today()
for match in matches:
date = match['date']
if date is not None:
#this line assumes eastern time zone in the summer. Ideally should be changed to work for any time zone.
d=datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S-0400')
#we conly care about matches happening today
if d.year == today.year and d.month == today.month and d.day == today.day:
teams = match['opponents']
team1id = teams[0]['participant']['id']
team2id = teams[1]['participant']['id']
team1name = teams[0]['participant']['name']
team2name = teams[1]['participant']['name']
time = d.strftime('%H:%M')
data = {'team1id':team1id, 'team2id':team2id, 'team1name':team1name, 'team2name':team2name, 'time':time}
matchData.append(data)
return matchData
#returns a list of emails accociated with the given team.
def getTeamEmails(teamid):
url = 'https://api.toornament.com/v1/tournaments/' + TOORNAMENT_ID + '/participants/' + teamid
hdr = {'User-Agent': 'Mozilla/5.0', 'Host':'api.toornament.com', 'X-Api-Key':API_KEY, 'Authorization':'Bearer ' + ACCESS_TOKEN}
req = urllib2.Request(url, headers=hdr)
response = urllib2.urlopen(req)
raw = response.read()
data = evaluateRaw(raw)
emails = []
emails.append(data['email'])
for l in data['lineup']:
if 'email' in l:
emails.append(l['email'])
return emails
#The matches end point only gave us participant id's, but we can use those to call the participant endpoint to get their email addresses.
def findUserEmails(matchData):
for match in matchData:
match['team1emails'] = getTeamEmails(match['team1id'])
match['team2emails'] = getTeamEmails(match['team2id'])
if __name__ == '__main__':
try:
matches = pullMatches()
matchData = parseMatches(matches)
authenticate()
findUserEmails(matchData)
sendEmails(matchData)
except Exception as e:
recipients=[]
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(SMTP_USERNAME,SMTP_PASSWORD)
server.sendmail(SENDER, recipients, 'The notifier process exited with error: ' + str(e) + '\r\n\r\n' + traceback.format_exc())
server.quit()
|
[
"noreply@github.com"
] |
lancewellspring.noreply@github.com
|
9ad8eb5f1a0b2e7c71ae412beca8614b26286697
|
1462381335dc8e030d19d0bdbad85c963c060eeb
|
/swe241p/Exercise3/main.py
|
2b452ac792e8056df0dfe11c39e0d8f2304c166e
|
[] |
no_license
|
samaritanhu/swe_fall
|
a71f88915a129cb04216e07a2592d2baa465281b
|
653397129355784da490627f25d3d9b227351f4f
|
refs/heads/main
| 2023-02-11T10:43:35.763862
| 2021-01-11T22:13:59
| 2021-01-11T22:13:59
| 328,803,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
#!/usr/bin/env python
# coding: utf-8
# University of California, Irvine
# Master of Software Engineering, Donald Bren School of Information and Computer Science
# Created by: Xinyi Hu
# Date : 2020/10/11
# Contact : xinyih20@uci.edu
# Target :
# Note: This is the main file to test all of the sub-files
#
# a. Convert from an adjacency matrix to adjacency lists
#
# b. Convert from an adjacency list to an incidence matrix.
# An incidence matrix M has a row for each vertex and a column for each edge,
# such that M[i, j] = 1 if vertex i is part of edge j, otherwise M[i, j] = 0.
#
# c. Convert from an incidence matrix to adjacency lists.
#
# Reference : The algorithm design manual
from GraphDataStructure import *
def run():
adjaceny_matrix = [[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0]] # adjacency matrix
adjaceny_list = adjaceny_matrix_to_list(adjaceny_matrix)
print('Adjacency List:')
for eachll in adjaceny_list:
eachll.print()
incidence_matrix = adjaceny_list_to_incidence_matrix(adjaceny_list)
print('Incidence Matrix:\n', incidence_matrix)
al = incidence_matrix_to_adjaceny_list(incidence_matrix)
print('Adjacency List:')
for eachll in al:
eachll.print()
if __name__ == '__main__':
run()
|
[
"samaritanhu@gmail.com"
] |
samaritanhu@gmail.com
|
42bf84d33888ad2322d89211ecb8dd1fb590aa34
|
afeef1859384eebc97d8d67ff69c9fc22080322b
|
/systemInfo.py
|
2e5ad47211b5e0787f44e03615b62c114848556f
|
[] |
no_license
|
nijin39/systemInfo
|
4f8c2b7b5bc85cb37d2a98dfd810134cadd84eb0
|
103c4e6e68336bbc803a9bbb3b41d6a1cab8f50b
|
refs/heads/master
| 2020-04-02T20:41:07.214838
| 2016-06-01T07:22:36
| 2016-06-01T07:22:36
| 60,068,939
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,099
|
py
|
# -*- coding:utf-8 -*-
import commands
import subprocess
from subprocess import Popen,PIPE
def getHostname():
'''get System Hostname'''
return commands.getstatusoutput('cat /etc/hostname')[1]
def getKernel():
'''get System Kernel Version'''
return commands.getstatusoutput('uname -r')[1]
def getOSVersion():
CMDOUTPUTCOLUMN = 1
# 튜플 실행 결과 (0, 'No LSB modules are available.\nDistributor ID:\tLinuxMint\nDescription:\tLinux Mint 17.3 Rosa\nRelease:\t17.3\nCodename:\trosa')
# 01. 두번째 행 얻기
# 02. 개행문자 기준으로 문자열 자르기
cmdResult = commands.getstatusoutput("lsb_release -a")[CMDOUTPUTCOLUMN].split('\n')
for cmdResultLine in cmdResult:
# 03. 정보가 포함된 찾기
if cmdResultLine.find("Description") != -1:
#04. OS 정보가 포함된 열을 얻기 위해 탭을 기준으로 문자열 가르고 두번째 열을 반환한다.
# Description:\tLinux Mint 17.3 Rosa\n
return cmdResultLine.split('\t')[1]
def getLastBoot():
''' last boot 년-월-일 시:분 형태로 출력한다. '''
popen = subprocess.Popen('last boot', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = popen.communicate()
# 스페이스를 기준으로 문자열을 나누어준 뒤, 저장한다.
# 월 데이터의 경우, 문자열에 해당하는 숫자 String값을 넣어준다.
newStr = stdoutdata.split()
year = newStr[-1]
# 영어로 표시된 월 데이터를 숫자로 표시해주기 위해 사전을 사용한다.
monthDic = {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06',
'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}
month = monthDic[newStr[-4]]
day = newStr[-3]
# 시간 데이터를 :으로 split한 뒤, 시와 분 데이터로 저장한다.
times = newStr[-2]
times = times.split(':')
hour = times[0]
minute = times[1]
result = '{:>4}-{:>2}-{:>2} {:>2}:{:>2}'.format(year,month,day,hour,minute)
return result
def getCurrentUser():
'''System Information part
Get current user count and id '''
command = 'who'
popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = popen.communicate()
results = stdoutdata
# Parsing
userList = results.split('\n')
# initialization
userCount = 0
userNames = []
# Current User Counting
for user in userList:
if len(user) > 0:
userCount += 1
userDetail = user.split(' ')
userNames.append(userDetail[0])
# Return
dic = {'userCount':userCount,'userNames':userNames}
return dic
def getNetwork():
# Execute commands and parsing
text = commands.getstatusoutput('netstat -ni')
text = text[1]
text = text.split("\n")
#put the interface name in the list
index = 0
interfaces = []
for line in text :
# delete title row
if index < 2 :
index += 1
continue
interfaces.append(line.split()[0])
#create commands with the interface name from the list
returnVal = []
for interface in interfaces :
result = commands.getstatusoutput('/sbin/ifconfig ' + interface)[1]
result2 = result.split("\n")[1].split(":")[1].split(" ")[0]
result = result.split("\n")[-2]
result = result.split("(")
dic = {'interface' : interface, 'ip' : result2, 'receive':result[1].split(")")[0], 'transmit':result[2].split(")")[0]}
returnVal.append(dic)
return returnVal
def doFree():
popen = Popen("free",stdout=PIPE,stderr=PIPE,shell=True)
out,err = popen.communicate()
return out.split()
def doFreeH():
popen = Popen("free -h",stdout=PIPE,stderr=PIPE,shell=True)
out,err = popen.communicate()
return out.split()
def getMemInfo():
#popen = object for process open
#out, err = result of process
#dic = summary of memory info
#7: total of mem, 8: used of mem, 9: free of mem, 10:shared of mem, 11: buffer of mem, 12:cached of mem
out = doFree()
used = float(out[8])
total = float(out[7])
if total == 0.0:
total = 1.0
out = doFreeH()
dic = {'total':out[7],'used':out[8],'free':out[9],'use':str(int(used/total*100)) + '%'}
return dic
def getSwapInfo():
# popen = object for process open
# out, err = result of process
# dic = summary of swap info
#18: total of wap, 19: used of swap, 20: free of swap
out = doFree()
used = float(out[19])
total = float(out[18])
if total == 0.0:
total = 1.0
out = doFreeH()
dic = {'total':out[18],'used':out[19],'free':out[20],'use':str(int(used/total*100)) + '%'}
return dic
def getLastLogin():
text = commands.getoutput('lastlog')
#convert string type and split by space and put in to result
temp = str(text).split("\n")
list = []
dic={}
del temp[0]
for line in temp:
word = str(line).split()
if not word[1].startswith('**'):
dic={'hostId': word[0], 'Date':word[3]+' '+word[4]+ ' '+word[5]+' '+word[6]}
list.append(dic)
return list
def getCPULoad():
# Execute commands and parsing
text = commands.getoutput('uptime').split()
# return values: [1m, 5m, 10m]
return text[7][:-1],text[8][:-1],text[9]
def getUptime():
# 02. 결과 값을 , 를 기준으로 나누어 결과값을 출력한다
sqlResult = commands.getstatusoutput('uptime')
upTimeResult = sqlResult[1].split(',')
return upTimeResult[0]
def getDiskInformation():
'''get System Disk'''
# Disk 사용량을 string으로 읽어서 info에 저장
tmpinfo = commands.getoutput('df -Pkh')
# string으로 읽은 데이터를 행단위로 리스트로 쪼개기
information = tmpinfo.split('\n')[1:]
diskInformation = [[0 for col in range(7)] for row in range(len(information))]
# 열단위로 리스트 쪼갠 뒤 저장
for count in range(0,len(information)):
diskInformation[count] = information[count].split()
return diskInformation
def getPing():
pingList = ['www.google.com','www.facebook.com','www.yahoo.com','www.samsung.com']
result = {}
for item in pingList:
#01. ping결과 중 경과시간에 해당하는 라인만 저장
pingOutput = commands.getoutput('ping -c 1 ' + item + ' | grep rtt')
#02. =으로 스플릿한 후, /로 스플릿하여 반응속도만 추출
listOfSplitByEq = pingOutput.split(' = ')
if listOfSplitByEq[0] == '':
result[item] = '---'
elif len(listOfSplitByEq) == 1 :
result[item] = '---'
else:
listOfSplitBySlash = listOfSplitByEq[1].strip().split('/')
#03. min, avg, max 순으로 리스트에 저장
pingData = listOfSplitBySlash[0:3]
result[item] = pingData[1]
return result
def getCpuTime():
# get Cpu date & time : format "day, month, day, hour, minute, seconds, time slot, year"
return commands.getstatusoutput('date "+%a %b %d %H:%M:%S %Z %Y"')[1]
def getCpuInfo():
#01. CPU 정보에 대한 결과 값얻
text = str(commands.getstatusoutput('cat /proc/cpuinfo')[1])
#02. 개행문자 기준으로 문자열 자르
text = text.split("\n")
#03. 각 줄에 대하여 필요한 모델로 시작하는지 체크
#04. 필요한 내용은 변수에 저장하
for line in text:
if line.rstrip('\n').startswith('model name'):
model_name = line.rstrip('\n').split(':')[1].strip()
if line.rstrip('\n').startswith('cpu cores'):
cores = line.rstrip('\n').split(':')[1].strip()
if line.rstrip('\n').startswith('cpu MHz'):
speed = line.rstrip('\n').split(':')[1].strip()
if line.rstrip('\n').startswith('cache size'):
cache = line.rstrip('\n').split(':')[1].strip()
if line.rstrip('\n').startswith('bogomips'):
bogomips = line.rstrip('\n').split(':')[1].strip()
#05. 결과값에 사전형으로 내용을 저장 후 리
results = {"Model": model_name,
"Cores": cores,
"Speed": speed + " Mhz",
"Cache": cache,
"Bogomips": bogomips}
return results
# remove duplicate values in array
def removeDup(li):
my_set = set()
res = []
for e in li:
if e not in my_set:
res.append(e)
my_set.add(e)
return res
def getListenPort():
# read string from netstat command and split to lines
cmdResult = commands.getoutput('netstat -tupln')
line = cmdResult.split('\n')
remDupList = []
# find suitable port numbers
for word in line:
if word.find('0.0.0.0:*') > 0:
index = word.find(':')
port = (word[index+1 : index+6]).strip()
if int(port, 10) < 5000:
remDupList = remDupList +[port]
# remove duplicate port numbers
portList = list(removeDup(remDupList))
return portList
def getDate():
#get date : form == weekday[0], month[1] day[2] year[3] hh:mm:ss[4] UTC[5]
Monparse = {'01':'Jan','02':'Feb','03':'Mar','04':'Apr','05':'May','06':'Jun','07':'Jul','08':'Aug','09':'Sep','10':'Oct','11':'Nov','12':'Dec'}
Month = commands.getoutput("date '+%m'")
ToDay = Monparse[Month] + commands.getoutput("date '+ %_d'")
return ToDay
def getErrLogPrev():
#get syslog contains 'error' keyword at today
#form == month day hh:mm:ss hostname Message ...
LogData = commands.getoutput("cat /var/log/syslog | grep '^" + getDate() + "' | grep -ie 'error'")
return LogData.split('\n')
def getWarnLogPrev():
#get syslog contains 'warn' keyword at today
#form == month day hh:mm:ss hostname Message ...
ToDaySplit = getDate()
LogData = commands.getoutput("cat /var/log/syslog | grep '^" + getDate() + "' | grep -ie 'warning'")
return LogData.split('\n')
def getErrLog():
#get syslog error message split by hostname
#form == [date , message]
#and return result dictionary{message : count}
LogDataSplit = getErrLogPrev()
result = {}
if LogDataSplit[0] != '':
for row in LogDataSplit:
LogDataMessage = row.split(" " + commands.getoutput("hostname") + " ")
if LogDataMessage[1] in result:
result[LogDataMessage[1]] = result[LogDataMessage[1]] + 1
else:
result[LogDataMessage[1]] = 1
return result
def getWarnLog():
#get syslog warning message split by hostname
#form == [date , message]
#and return result dictionary{message : count}
LogDataSplit = getWarnLogPrev()
result = {}
if LogDataSplit[0] != '':
for row in LogDataSplit:
LogDataMessage = row.split(" " + commands.getoutput("hostname") + " ")
if LogDataMessage[1] in result:
result[LogDataMessage[1]] = result[LogDataMessage[1]] + 1
else:
result[LogDataMessage[1]] = 1
return result
|
[
"nijin39@gmail.com"
] |
nijin39@gmail.com
|
00190312dd9d51d779ed3bd1367432d0b083fbdf
|
31a9049aaea49f01bf49c6bf18e782cd35e7dd5d
|
/part1/chapter3-classification/utils.py
|
b43331b0bd7981a6920ac0a70e60262428706d68
|
[] |
no_license
|
Kimmirikwa/hands-on-machinelearning
|
e45018b994e91f0afdc1c6c6c98c8ea2781a9a68
|
57df7e92216354b1c8eae213cdaaaf05c0b19e96
|
refs/heads/master
| 2020-04-28T09:16:34.922860
| 2019-10-18T07:28:56
| 2019-10-18T07:28:56
| 175,161,832
| 1
| 0
| null | 2019-10-18T07:28:59
| 2019-03-12T07:57:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,649
|
py
|
import re
import urlextract
import nltk # natural language toolkit
from collections import Counter
from sklearn.base import BaseEstimator, TransformerMixin
from html import unescape
import numpy as np
from scipy.sparse import csr_matrix
def html_to_plain_text(html):
# will remove some tags and replace others
text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I)
text = re.sub('<a\s.*?>', ' HYPERLINK ', text, flags=re.M | re.S | re.I)
text = re.sub('<.*?>', '', text, flags=re.M | re.S)
text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S)
return unescape(text)
def email_to_text(email):
html = None
for part in email.walk():
content_type = part.get_content_type()
if not content_type in ("text/plain", "text/html"):
continue
try:
content = part.get_content()
except:
content = str(part.get_payload())
if content_type == "text/plain":
return content
if content:
return html_to_plain_text(content)
# a transformer that converts the words in the emails to count of words
class EmailToWordCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True, replace_urls=True,
replace_numbers=True, stemming=True): # will set the hyperparameters for this transformer
self.strip_headers = strip_headers
self.lower_case = lower_case
self.remove_punctuation = remove_punctuation
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.stemming = stemming
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = [] # will contain the counts of words in emails
for email in X:
text = email_to_text(email) or ""
if self.lower_case:
text = text.lower()
if self.replace_urls:
url_extractor = urlextract.URLExtract()
urls = list(set(url_extractor.find_urls(text)))
urls.sort(key=lambda url: len(url), reverse=True)
for url in urls:
text = text.replace(url, " URL ")
if self.replace_numbers:
text = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', 'NUMBER', text)
if self.remove_punctuation:
text = re.sub(r'\W+', ' ', text, flags=re.M)
word_counts = Counter(text.split())
if self.stemming:
stemmer = nltk.PorterStemmer()
stemmed_word_counts = Counter()
for word, count in word_counts.items():
stemmed_word = stemmer.stem(word)
stemmed_word_counts[stemmed_word] += count
word_counts = stemmed_word_counts
X_transformed.append(word_counts)
return np.array(X_transformed)
class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
# we get the words that are most common to be used as the vocabulary
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += min(count, 10)
most_common = total_count.most_common()[:self.vocabulary_size] # the top 'vocabulary_size' words
self.most_common_ = most_common
self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)}
return self
def transform(self, X, y=None):
# we transfors the word counts to be a sparce matrix of most common words as columns
# and the instances as rows
rows = []
cols = []
data = []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))
|
[
"kimrodrikwa@gmail.com"
] |
kimrodrikwa@gmail.com
|
71b0414059115dc1f0094760dbb55ec1206a4e6a
|
5d62feb365f68d7d9f0c07e5960ea7f41f7a56e3
|
/conditionals/lists/starter/q1.py
|
f8be6de2b1356b9958ef314a2a09524d7f34233f
|
[] |
no_license
|
runda87/she_codes_python
|
04bdc4595de72686c1d8c777ccfaa28dc95a0304
|
d609f391ca92750f82a0c3167b46e825d6bc9dae
|
refs/heads/main
| 2023-06-15T09:08:57.685236
| 2021-07-12T12:27:02
| 2021-07-12T12:27:02
| 374,647,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
foods = [
"orange",
"apple",
"banana",
"strawberry",
"grape",
"blueberry",
["carrot", "cauliflower", "pumpkin"],
"passionfruit",
"mango",
"kiwifruit"
]
print(foods[0])
print(foods[2])
print(foods[-1])
print(foods[0:3])
print(foods[7:10])
print(foods[6][2])
#output
# orange
# banana
# kiwifruit
# ['orange', 'apple', 'banana']
# ['passionfruit', 'mango', 'kiwifruit']
# pumpkin
|
[
"84775206+runda-git@users.noreply.github.com"
] |
84775206+runda-git@users.noreply.github.com
|
ef619675f10ada750d8b8e42b9b0ba8316ffee32
|
6e7ab33047525eb0a1d01ad499bfb17f1b8403a7
|
/models/Budget.py
|
6d13ead971b7b228b49bf27544f1c4de92915154
|
[] |
no_license
|
Freewin/Bananabudget
|
9fbd69e6b540a0f4233ea72ba1c247226bb66473
|
46988e76efdfa278595004f6279a1ff2dee97dd9
|
refs/heads/master
| 2020-03-26T01:17:00.002424
| 2018-09-12T17:07:40
| 2018-09-12T17:07:40
| 144,360,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
import requests
class Budget:
def get_price(self, number_of_days, start_date):
url = "https://bananabudget.azurewebsites.net/"
querystring = {"startDate": start_date, "numberOfDays": number_of_days}
headers = {
'Cache-Control': "no-cache",
}
response = requests.request("GET", url, headers=headers, params=querystring)
return response
|
[
"gary.rojas@gmail.com"
] |
gary.rojas@gmail.com
|
a61e686a2a19b194f74d057cf3102cd5df782b64
|
ff8bd1967aeb66ffec17c3ae78102c168414761a
|
/PythonIntroduction/datatypes/Strings/Stringemployee.py
|
b4b8a6638010e9657f3ff95b34f617e7bc0a2ee0
|
[] |
no_license
|
charan2108/Pythondirectories
|
d5cbec41db0685bbfc41a3135edc3e41fd223474
|
30b4a9f9171fe2295efbf12cbf9cbcf88bdf29b3
|
refs/heads/master
| 2023-03-28T03:29:25.039491
| 2021-03-29T02:45:19
| 2021-03-29T02:45:19
| 352,484,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
car_name = "Ferrari"
car_model= "F360"
car_manufactured = 2020
|
[
"sumacharan.adabala@gmail.com"
] |
sumacharan.adabala@gmail.com
|
9e3cdd59f59678166169807a7ba838469c1b9fc3
|
72276b20fca1578eed9cda9ed8a2ae2196e65b2e
|
/task_scheduler/apps.py
|
39548ce7bdb7662c08d6e7e861120c3620d7f778
|
[
"MIT"
] |
permissive
|
mypebble/django-task-scheduler
|
82d60d8ff3258a1806f622ffb49d5ddb57b9864e
|
439c16cbf6388a7bc98a6d0d09ca012845ef8bb2
|
refs/heads/master
| 2021-01-11T06:22:11.631210
| 2016-10-04T10:59:16
| 2016-10-04T10:59:16
| 69,955,435
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from django.apps import AppConfig
class TaskSchedulerConfig(AppConfig):
name = 'task_scheduler'
|
[
"scott.walton@mypebble.co.uk"
] |
scott.walton@mypebble.co.uk
|
fc9131f9ccde84e2d38716326e9ff70aa33bac2a
|
f306d169cf3b48061a7b29d297612b025f3825f7
|
/yamtbx/util/__init__.py
|
bda055a257197c3133ec50c6e8855fe242592bab
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
robertbuecker/yamtbx
|
b2b43c09ec27a6aa08c1b9330f731df2f95d82c6
|
9b90e03d27600fd9e550252dcb65c1109f04c44f
|
refs/heads/master
| 2020-06-20T07:03:17.679343
| 2019-07-15T00:17:06
| 2019-07-15T00:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,267
|
py
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import os
import sys
import re
import shutil
import subprocess
import commands
import glob
import tempfile
from libtbx.utils import null_out
import libtbx.load_env
def call(cmd, arg="",
stdin=None, stdout=subprocess.PIPE,
wdir=None,
expects_in=[], expects_out=[]):
##
# call the external program using subprocess.
#
# @param expects_in expected files before running
# @param expects_out expected files after running
#
# expected_in/out must be written as relative path from wdir or absolute path.
#
def check_exist(files):
is_exist = [os.path.isfile(f) for f in files]
if sum(is_exist) != len(is_exist):
not_founds = [ f for f, e in zip(files, is_exist) if not e ]
raise Exception("Expected file(s) not found: " + " ".join(not_founds))
# check_exist()
if wdir is None:
wdir = os.getcwd()
# Go to working directory
cwd = os.getcwd()
os.chdir(wdir)
# check before run
check_exist(expects_in)
# call the program
p = subprocess.Popen("%s %s" % (cmd, arg),
shell=True,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stdout
)
if stdin is not None:
p.stdin.write(stdin)
if stdout == subprocess.PIPE:
out, err = p.communicate()
else:
out, err = None, None
p.stdin.close()
p.wait()
if p.returncode < 0:
print >>sys.stderr, cmd, ": returncode is", p.returncode
# check after run
check_exist(expects_out)
# go back to the previous working directory
os.chdir(cwd)
return p.returncode, out, err
# call()
def rotate_file(filename, copy=False):
"""
Rotate file like logrotate.
If given filename already exists, rename it to "filename".n, n=1...
Filename with larger n is older one.
"""
# If not exist,
if not os.path.isfile(filename):
return
# make list [ [filename, number], ... ]
old_list = []
dot_files = glob.glob(filename + ".*")
for f in dot_files:
suffix = f.replace(filename+".", "")
try:
i = int(suffix)
if str(i) == suffix: # ignore if suffix was such as 003...
old_list.append([f, i])
except ValueError, e:
continue
old_list.sort(lambda x,y: x[1]-y[1])
# rotate files
for f, i in reversed(old_list):
os.rename(f, "%s.%d" % (f[:f.rfind(".")], i+1))
if copy:
shutil.copyfile(filename, filename + ".1")
else:
os.rename(filename, filename + ".1")
return filename + ".1"
# rotate_file()
def safe_copy(src, dst, move=False):
"""
Don't reveal file before copy completed.
"""
src_name = os.path.basename(src)
if os.path.isdir(dst): dst = os.path.join(dst, src_name)
tmpfd, tmp = tempfile.mkstemp(prefix="."+src_name, dir=os.path.dirname(dst))
os.close(tmpfd)
shutil.copy2(src, tmp)
os.rename(tmp, dst)
if move and os.path.isfile(dst) and not os.path.islink(dst) and os.path.getsize(src)==os.path.getsize(dst):
os.remove(src)
# safe_copy()
def commonalize(Is):
new_Is = []
Is0 = Is[0]
for I in Is[1:]:
Is0, I = Is0.common_sets(I, assert_is_similar_symmetry=False)
new_Is.append(I)
Is = []
for I in new_Is:
I = I.common_set(Is0, assert_is_similar_symmetry=False)
assert len(Is0.data()) == len(I.data())
Is.append(I)
return [Is0,] + Is
# commonalize()
def get_number_of_processors(default=4):
nproc = default
if os.path.isfile("/proc/cpuinfo"):
nproc = len(filter(lambda x:x.startswith("processor"), open("/proc/cpuinfo")))
else:
try:
nproc = int(commands.getoutput("sysctl -n hw.ncpu"))
except:
pass
return nproc
# get_number_of_processors()
def safe_float(v):
try:
return float(v)
except ValueError:
return float("nan")
# safe_float()
def num_th_str(v):
s = str(v)
if s[-1] == "1": return s+"st"
if s[-1] == "2": return s+"nd"
if s[-1] == "3": return s+"rd"
return s+"th"
# num_th_str()
def directory_included(path, topdir=None, include_dir=[], exclude_dir=[]):
if topdir is None:
for d in include_dir:
if directory_included(path, d): return True
return False
l1 = filter(lambda x: x, path.split(os.sep))
l2 = filter(lambda x: x, topdir.split(os.sep))
lc = os.path.commonprefix([l1,l2])
if len(lc) != len(l2): return False
if include_dir == exclude_dir == []:
return True
if include_dir != []:
for d in include_dir:
if directory_included(path, d): return True
return False
if exclude_dir != []:
for d in exclude_dir:
if directory_included(path, d): return False
return True
# directory_included()
def read_path_list(lstin, comment_strs=["#"], only_exists=False, as_abspath=False, err_out=null_out()):
ret = []
for l in open(lstin):
for c in comment_strs:
if c in l: l = l[:l.index(c)]
l = l.strip()
if not l: continue
if only_exists and not os.path.exists(l):
err_out.write("Error: file not found: %s\n"%l)
continue
ret.append(os.path.abspath(l) if as_abspath else l)
return ret
# read_path_list()
def return_first_found_file(files, wd=None):
for f in files:
if wd is not None: f = os.path.join(wd, f)
if os.path.isfile(f): return f
# return_first_found_file()
def expand_wildcard_in_list(fdlst, err_out=null_out()):
ret = []
for d in fdlst:
gd = glob.glob(d)
if len(gd) == 0:
print >>err_out, "Error: No match!!: %s" % d
continue
ret.extend(gd)
return ret
# expand_wildcard_in_list()
def check_disk_free_bytes(d):
try:
x = os.statvfs(d)
return x.f_frsize * x.f_bavail
except:
return -1
# check_disk_free_bytes()
def get_temp_local_dir(prefix, min_bytes=None, min_kb=None, min_mb=None, min_gb=None, additional_tmpd=None):
assert (min_bytes, min_kb, min_mb, min_gb).count(None) >= 2
min_free_bytes = 0
if min_bytes is not None: min_free_bytes = min_bytes
if min_kb is not None: min_free_bytes = min_kb * 1024
if min_mb is not None: min_free_bytes = min_mb * 1024**2
if min_gb is not None: min_free_bytes = min_gb * 1024**3
ramdisk = "/dev/shm"
if os.path.isdir(ramdisk): tmpdirs = [ramdisk, tempfile.gettempdir()]
else: tmpdirs = [tempfile.gettempdir()]
if type(additional_tmpd) is str:
tmpdirs.append(additional_tmpd)
elif type(additional_tmpd) in (list, tuple):
tmpdirs.extend(additional_tmpd)
for tmpdir in tmpdirs:
if check_disk_free_bytes(tmpdir) >= min_free_bytes:
return tempfile.mkdtemp(prefix=prefix, dir=tmpdir)
return None
# get_temp_local_dir()
def get_temp_filename(prefix="tmp", suffix="", wdir=None):
tmpfd, tmp = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=wdir)
os.close(tmpfd)
return tmp
# get_temp_filename()
def replace_forbidden_chars(filename, repl="-"):
return re.sub(r"[/><\*\\\?%:]", repl, filename)
# replace_forbidden_chars()
def human_readable_bytes(bytes):
if bytes < 1024:
return bytes, "B"
elif bytes < 1024**2:
return bytes/1024., "KB"
elif bytes < 1024**3:
return bytes/1024.**2, "MB"
elif bytes < 1024**4:
return bytes/1024.**3, "GB"
elif bytes < 1024**5:
return bytes/1024.**4, "TB"
else:# if bytes < 1024**6:
return bytes/1024.**5, "PB"
# human_readable_bytes()
def yamtbx_module_root():
"""
Possible locations: modules/yamtbx or modules/yamtbx/yamtbx
"""
tmp = libtbx.env.find_in_repositories("yamtbx/yamtbx")
if tmp: return tmp
tmp = libtbx.env.find_in_repositories("yamtbx")
if tmp: return tmp
# yamtbx_module_root()
|
[
"keitaroyam@users.noreply.github.com"
] |
keitaroyam@users.noreply.github.com
|
c35247face031fdcf18da283072975cf5773b968
|
64a80df5e23b195eaba7b15ce207743e2018b16c
|
/Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_pybadger/pybadge.py
|
6c341d8678773b63b175827e4b779cc10fcfcc22
|
[] |
no_license
|
aferlazzo/messageBoard
|
8fb69aad3cd7816d4ed80da92eac8aa2e25572f5
|
f9dd4dcc8663c9c658ec76b2060780e0da87533d
|
refs/heads/main
| 2023-01-27T20:02:52.628508
| 2020-12-07T00:37:17
| 2020-12-07T00:37:17
| 318,548,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,899
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Kattni Rembor for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_pybadger.pybadge`
================================================================================
Badge-focused CircuitPython helper library for PyBadge, PyBadge LC and EdgeBadge.
All three boards are included in this module as there is no difference in the
CircuitPython builds at this time, and therefore no way to differentiate
the boards from within CircuitPython.
* Author(s): Kattni Rembor
Implementation Notes
--------------------
**Hardware:**
* `Adafruit PyBadge <https://www.adafruit.com/product/4200>`_
* `Adafruit PyBadge LC <https://www.adafruit.com/product/3939>`_
* `Adafruit EdgeBadge <https://www.adafruit.com/product/4400>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from collections import namedtuple
import board
import digitalio
import analogio
import audioio
from gamepadshift import GamePadShift
import adafruit_lis3dh
import neopixel
from adafruit_pybadger.pybadger_base import PyBadgerBase
__version__ = "3.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyBadger.git"
Buttons = namedtuple("Buttons", "b a start select right down up left")
class PyBadge(PyBadgerBase):
"""Class that represents a single PyBadge, PyBadge LC, or EdgeBadge."""
_audio_out = audioio.AudioOut
_neopixel_count = 5
def __init__(self):
super().__init__()
i2c = None
if i2c is None:
try:
i2c = board.I2C()
except RuntimeError:
self._accelerometer = None
if i2c is not None:
int1 = digitalio.DigitalInOut(board.ACCELEROMETER_INTERRUPT)
try:
self._accelerometer = adafruit_lis3dh.LIS3DH_I2C(
i2c, address=0x19, int1=int1
)
except ValueError:
self._accelerometer = adafruit_lis3dh.LIS3DH_I2C(i2c, int1=int1)
# NeoPixels
self._neopixels = neopixel.NeoPixel(
board.NEOPIXEL, self._neopixel_count, brightness=1, pixel_order=neopixel.GRB
)
self._buttons = GamePadShift(
digitalio.DigitalInOut(board.BUTTON_CLOCK),
digitalio.DigitalInOut(board.BUTTON_OUT),
digitalio.DigitalInOut(board.BUTTON_LATCH),
)
self._light_sensor = analogio.AnalogIn(board.A7)
@property
def button(self):
"""The buttons on the board.
Example use:
.. code-block:: python
from adafruit_pybadger import pybadger
while True:
if pybadger.button.a:
print("Button A")
elif pybadger.button.b:
print("Button B")
elif pybadger.button.start:
print("Button start")
elif pybadger.button.select:
print("Button select")
"""
button_values = self._buttons.get_pressed()
return Buttons(
*[
button_values & button
for button in (
PyBadgerBase.BUTTON_B,
PyBadgerBase.BUTTON_A,
PyBadgerBase.BUTTON_START,
PyBadgerBase.BUTTON_SELECT,
PyBadgerBase.BUTTON_RIGHT,
PyBadgerBase.BUTTON_DOWN,
PyBadgerBase.BUTTON_UP,
PyBadgerBase.BUTTON_LEFT,
)
]
)
pybadge = PyBadge() # pylint: disable=invalid-name
"""Object that is automatically created on import."""
|
[
"aferlazzo@gmail.com"
] |
aferlazzo@gmail.com
|
b5a54ab413b42bbe5aaf2947e43b9c17f26aca05
|
c1198c82a511c9398367296e61ed3e7226ecd0a4
|
/gisapp/app/toolkit/__init__.py
|
b006a777cad622a63095caab3c84cf8dd5a7f5ce
|
[] |
no_license
|
maheshpaliwal/JeevanDaan
|
a22b2dc59bdeb7005f05569b0331fee01d98ead4
|
9b0b44075b0a7a3937e08155c12112ca306f4b33
|
refs/heads/master
| 2020-03-31T02:53:09.777605
| 2018-10-27T05:45:49
| 2018-10-27T05:45:49
| 151,843,342
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
from .buttons import *
from .layers import *
from .map import *
from .popups import *
from .ribbon import *
from .statusbar import *
from .toolbars import *
from . import theme
from . import dispatch
|
[
"39883849+maheshpaliwal@users.noreply.github.com"
] |
39883849+maheshpaliwal@users.noreply.github.com
|
46e1f87fb53aa118f81a3a35c9fbf474f72723a8
|
15e50d612929cc60149f311cb7807e13cebbd383
|
/djangodebatable/users/admin.py
|
0a16f115a914c5c357c395530ca2c312cb942597
|
[] |
no_license
|
DebatableChat/debatable
|
4c254e560be71974b867a5de8ebea81ff10c19ac
|
4f7f60952b63b0bd32f34628d5ab9e34fe2ceca7
|
refs/heads/main
| 2023-08-28T01:57:34.740003
| 2021-10-24T20:49:29
| 2021-10-24T20:49:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django.contrib import admin
from .models import Profile, Report, Follow
# Register your models here.
admin.site.register(Profile)
admin.site.register(Report)
admin.site.register(Follow)
|
[
"laz013laz@gmail.com"
] |
laz013laz@gmail.com
|
a9eb757a2b0a176611cde9701778712b3dd565df
|
bec8abb5c3146377f1b3bc2f2b4eaa4d02502211
|
/mediascraper/the_procrastinators/youtube_scraper.py
|
7383cc710c70c57551b36229ef8259fb99726bbb
|
[
"MIT"
] |
permissive
|
Kokitis/MediaScraper
|
578f3d96f1ef731906e03e56db77e141823f8681
|
8bd7294942945d90838357f14e10558a0512e316
|
refs/heads/master
| 2020-03-26T11:51:34.460233
| 2018-08-16T00:45:37
| 2018-08-16T00:45:37
| 144,863,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
from pathlib import Path
import requests
import json
import yaml
from pprint import pprint
from typing import List, Tuple
import re
import datetime
from dataclasses import dataclass
from pytools.timetools import Duration
shownotes_regex = ""
@dataclass
class Shownote:
timestamp: Duration
title: str
link: str
def extract_description(text:str)->str:
description, *junk = text.split('PCP Episode')
description = description.strip()
return description
def extract_shownotes(lines:List[str])->List[Shownote]:
""" Extracts the timestamps, titles, and links of each shownote."""
regex = re.compile("[\d]+:[\d]+(?:[:][\d]+)?")
shownotes = list()
for current_line, next_line in zip(lines[:-1], lines[1:]):
if regex.match(current_line):
_time, *_title = current_line.split(' ')
timestamp = Duration.from_string(_time)
title = " ".join(_title)
link = next_line
shownote = Shownote(timestamp, title, link)
shownotes.append(shownote)
return shownotes
if __name__ == "__main__":
sample = Path(__file__).parent / "Tourist Trap Stockholm Syndrome - The Pro Crastinators Podcast, Episode 119-toHfm6RyLYo.info.json"
data = json.loads(sample.read_text())
description = data['description']
#print(description)
pprint(extract_shownotes(description.split('\n')))
|
[
"cld100@pitt.edu"
] |
cld100@pitt.edu
|
9bd1cb685d27df3d42c491d7eae73d9b825184a9
|
5ea82bec4980c41c5d9526f2394d3e931978a717
|
/REG_TEST/Data_loader.py
|
20b715ac77b5a431c82fd3a844457e0c6812b04e
|
[] |
no_license
|
pbatwal/TF_TEST
|
74c4761125607024546c2af9bd592808f1d5bc4d
|
3bae278fcc842114e59319d7ade86d24d050f5ef
|
refs/heads/master
| 2022-10-26T15:06:04.657248
| 2018-05-20T19:13:34
| 2018-05-20T19:13:34
| 132,985,175
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
# This Code loads any type of data into Data Arrays (NP or PD)
#These are TF specific functions for saving and printing
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import tensorflow as tf
#import xlrd
# Paths and Data Description here
DATA_FILE_TRAIN = "F:\PiyushWS\data\data1_train.csv"
DATA_FILE_TEST = "F:\PiyushWS\data\data1_test.csv"
CSV_COLUMN_NAMES = ['x', 'y', 'A']
#OUTPUT_LABELS = ['A']
# this code reads data into an NDArray from the .xls file
"""
DATA_FILE = "F:\PiyushWS\TF_TEST\LR1\data\data.xlsx"
book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
print(data)
"""
# this code reads data into PD DF
def load_data(y_name) :
train_path = DATA_FILE_TRAIN
test_path = DATA_FILE_TEST
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
# This code converts features and lables into a Dataset
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
# This code converts features and lables into a Dataset
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation or prediction"""
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
"""
if __name__ == "__main__":
# run the code here
print(load_data("A"))
"""
|
[
"piyush.batwal@gmail.com"
] |
piyush.batwal@gmail.com
|
423f278c58c975114abab5bad2b8e37d7430b909
|
00c9da6996b7afcf4a6a522dad84690473febb14
|
/mysql/update.py
|
4da779cfb829cb24b79ca611d5a1535464a98614
|
[] |
no_license
|
laodearissaputra/sql
|
0efd081354c8b4f6309808ccee7f7ebb824119cf
|
2e807ac0b92e6e1091cb2c217b3eb24c82cda498
|
refs/heads/master
| 2023-03-20T05:15:33.908677
| 2021-03-16T19:49:24
| 2021-03-16T19:49:24
| 348,304,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import mysql.connector
db = mysql.connector.connect(host='localhost',database='sql-test',user='bithealth',password='Rahasia.2021')
cursor = db.cursor()
sql = "UPDATE employee SET age = 28 WHERE sex = 'M'"
try:
cursor.execute(sql)
print('data update')
db.commit()
except:
db.rollback()
db.close()
|
[
"aris.saputra@bithealth.co.id"
] |
aris.saputra@bithealth.co.id
|
58fa00901612d8e03424b86bb769e4c16caed12d
|
f080beb2225995e96d2d192f3561e87ffb9e05cf
|
/detect.py
|
a72851498a91cfd25339da9a966eaf75d564f96d
|
[] |
no_license
|
egeaydin/CarDetector
|
6532420e38cdfcf06c4a2b8bbc07016c02c0d940
|
f7bca723c0bce5dd545f589e5a32a3c858a843e3
|
refs/heads/master
| 2020-04-02T08:04:26.747694
| 2018-10-25T22:17:41
| 2018-10-25T22:17:41
| 154,227,942
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
from car_detector import CarDetector
import sys
if len(sys.argv) < 2:
raise Exception("Please specify the absolute path of the video file. Ex: python detect.py C:\\Users\\user\\movie.mov")
sub_clip = None
# means only the start second specified
if len(sys.argv) == 3:
raise Exception("Please specify the end second of the sub_clip. Ex: python detect.py C:\\Users\\user\\movie.mov 10 40")
# means we have enough parameters to specify a sub clip
if len(sys.argv) == 4:
try:
sub_clip = (int(sys.argv[2]), int(sys.argv[3]))
except ValueError:
raise Exception("Start and End second parameters has to be valid integers!")
detector = CarDetector(sys.argv[1], sub_clip)
# Detect the cars in the video
detector.detect()
# Save the video to a new video file
detector.save()
|
[
"egeaydin@gmail.com"
] |
egeaydin@gmail.com
|
766af69b4f7415293518c240d54f5b839b179605
|
9c6eb14eb45bbc554aba32afc825e2eddde14cd6
|
/3、基本数据结构/linklist2stack.py
|
898920495efebbfdaa60c2d99171103cdb325b7e
|
[] |
no_license
|
csuzll/python_DS_Algorithm
|
c4a466191e08fd18ff614f4537a23710f8ff9f5b
|
06b5ebd4ff776d87407310b67e23e3337048e002
|
refs/heads/master
| 2020-06-21T03:53:43.093060
| 2019-08-21T10:46:13
| 2019-08-21T10:46:13
| 197,337,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# 用链表实现栈
from unorderlist2 import UnorderedList
class Stack2():
def __init__(self):
self.items = UnorderedList()
def isEmpty(self):
return self.items.size() == 0
def size(self):
return self.items.size()
def push(self, item):
self.items.append(item)
def pop(self):
self.items.pop(self.items.size() - 1)
def peek(self):
self.items[-1]
|
[
"1575258778@qq.com"
] |
1575258778@qq.com
|
cc265e6daac130bd2e718e88da4c30d7bfa496a6
|
23da5ebe6f71bab7e63a799c673a81a87565da76
|
/tests/test_data_reader.py
|
abc3526d38dedd777225144c7b8fdc26c2bde9b1
|
[] |
no_license
|
AruniRC/colorizer-fcn
|
6af5c1da80f0200deb370e2841c51a7fd8eb3650
|
33ab423151a2c6045f69bcf0bb493965e106010a
|
refs/heads/master
| 2021-09-05T14:22:42.550611
| 2018-01-28T20:08:12
| 2018-01-28T20:08:12
| 115,282,079
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,267
|
py
|
import argparse
import os
import os.path as osp
import numpy as np
import PIL.Image
import skimage.io
import skimage.color as color
import torch
from torch.autograd import Variable
import sys
sys.path.append('/vis/home/arunirc/data1/Research/colorize-fcn/colorizer-fcn')
import models
import train
import utils
import data_loader
root = '/vis/home/arunirc/data1/datasets/ImageNet/images/'
def test_single_read():
print 'Entering: test_single_read'
dataset = data_loader.ColorizeImageNet(root, split='train', set='small')
img, lbl = dataset.__getitem__(0)
assert len(lbl)==2
assert np.min(lbl[0].numpy())==0
assert np.max(lbl[0].numpy())==30
print 'Test passed: test_single_read'
def test_single_read_dimcheck():
print 'Entering: test_single_read_dimcheck'
dataset = data_loader.ColorizeImageNet(root, split='train', set='small')
img, lbl = dataset.__getitem__(0)
assert len(lbl)==2
im_hue = lbl[0].numpy()
im_chroma = lbl[1].numpy()
assert im_chroma.shape==im_hue.shape, \
'Labels (Hue and Chroma maps) should have same dimensions.'
print 'Test passed: test_single_read_dimcheck'
def test_train_loader():
print 'Entering: test_train_loader'
train_loader = torch.utils.data.DataLoader(
data_loader.ColorizeImageNet(root, split='train', set='small'),
batch_size=1, shuffle=False)
dataiter = iter(train_loader)
img, label = dataiter.next()
assert len(label)==2, \
'Network should predict a 2-tuple: hue-map and chroma-map.'
im_hue = label[0].numpy()
im_chroma = label[1].numpy()
assert im_chroma.shape==im_hue.shape, \
'Labels (Hue and Chroma maps) should have same dimensions.'
print 'Test passed: test_train_loader'
def test_dataset_read():
'''
Read through the entire dataset.
'''
dataset = data_loader.ColorizeImageNet(\
root, split='train', set='small')
for i in xrange(len(dataset)):
# if i > 44890: # HACK: skipping over some stuff
img_file = dataset.files['train'][i]
img, lbl = dataset.__getitem__(i)
assert type(lbl) == torch.FloatTensor
assert type(img) == torch.FloatTensor
print 'iter: %d,\t file: %s,\t imsize: %s' % (i, img_file, img.size())
def test_cmyk_read():
'''
Handle CMYK images -- skip to previous image.
'''
print 'Entering: test_cmyk_read'
dataset = data_loader.ColorizeImageNet(\
root, split='train', set='small')
idx = 44896
img_file = dataset.files['train'][idx]
im1 = PIL.Image.open(img_file)
im1 = np.asarray(im1, dtype=np.uint8)
assert im1.shape[2]==4, 'Check that selected image is indeed CMYK.'
img, lbl = dataset.__getitem__(idx)
print 'Test passed: test_cmyk_read'
def test_grayscale_read():
'''
Handle single-channel images -- skip to previous image.
'''
print 'Entering: test_grayscale_read'
dataset = data_loader.ColorizeImageNet(root, split='train', set='small')
idx = 4606
img_file = dataset.files['train'][idx]
im1 = PIL.Image.open(img_file)
im1 = np.asarray(im1, dtype=np.uint8)
assert len(im1.shape)==2, 'Check that selected image is indeed grayscale.'
img, lbl = dataset.__getitem__(idx)
print 'Test passed: test_grayscale_read'
def test_rgb_hsv():
# DEFER
dataset = data_loader.ColorizeImageNet(\
root, split='train', set='small')
img_file = dataset.files['train'][100]
img = PIL.Image.open(img_file)
img = np.array(img, dtype=np.uint8)
assert np.max(img.shape) == 400
def test_soft_bins():
dataset = \
data_loader.ColorizeImageNet(root, split='train', set='small',
bins='soft')
img, lbl = dataset.__getitem__(0)
assert type(lbl) == torch.FloatTensor
assert type(img) == torch.FloatTensor
print 'Test passed: test_soft_bins'
def test_lowpass_image():
dataset = \
data_loader.ColorizeImageNet(root, split='train', set='small',
bins='soft', img_lowpass=8)
img, lbl = dataset.__getitem__(0)
assert type(lbl) == torch.FloatTensor
assert type(img) == torch.FloatTensor
print 'Test passed: test_soft_bins'
def test_init_gmm():
# Pass paths to cached GMM and mean Lightness
GMM_PATH = '/srv/data1/arunirc/Research/colorize-fcn/colorizer-fcn/logs/MODEL-fcn32s_color_CFG-014_VCS-db517d6_TIME-20171230-212406/gmm.pkl'
MEAN_L_PATH = '/srv/data1/arunirc/Research/colorize-fcn/colorizer-fcn/logs/MODEL-fcn32s_color_CFG-014_VCS-db517d6_TIME-20171230-212406/mean_l.npy'
dataset = \
data_loader.ColorizeImageNet(
root, split='train', set='tiny', bins='soft',
gmm_path=GMM_PATH, mean_l_path=MEAN_L_PATH)
print 'Test passed: test_init_gmm'
def main():
test_single_read()
test_single_read_dimcheck()
test_train_loader()
test_cmyk_read()
test_grayscale_read()
test_soft_bins()
test_lowpass_image()
test_init_gmm()
#
# dataset.get_color_samples()
# test_dataset_read()
# TODO - test_labels
# TODO - test colorspace conversions
if __name__ == '__main__':
main()
|
[
"arunirc@erdos.cs.umass.edu"
] |
arunirc@erdos.cs.umass.edu
|
507b5e4a2cf5d1be59559b0252c23e4d162aace9
|
7762ca6feb98c8b1c95da09758801a6bc38922ff
|
/NinjaGold/settings.py
|
00af97c3a4b37d82f68939050baa3b893c96e2ba
|
[] |
no_license
|
SDBranka/NinjaGold
|
211bd6ade5e9c6a216ffef89a0c791a8a2d15ad5
|
db881812842f2188df1da20edc81469fcb56a50a
|
refs/heads/main
| 2023-04-29T01:02:07.427340
| 2021-05-22T19:05:34
| 2021-05-22T19:05:34
| 369,070,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
"""
Django settings for NinjaGold project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '48s9*0q%(s79!70c9!^vujzz0iy))40u)ikr66k=9x7y^d*pcs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'NG_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NinjaGold.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NinjaGold.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"sdbranka@me.com"
] |
sdbranka@me.com
|
5e078a01031a1ae3716f3d785c8ac7140806777e
|
c39b10669f4bbac5dbf53d5e9437b6161e9a1923
|
/words_maze/jogo/jogo.py
|
b87a749097bbbe4ed87e35609d73709d9177a591
|
[] |
no_license
|
manuzika/PI3
|
68b558453907bc703b37393395c0ef0ee808869f
|
1eff419c63bea1b036a200032933a8b4216fb950
|
refs/heads/master
| 2023-04-07T06:02:05.527430
| 2021-04-16T23:52:27
| 2021-04-16T23:52:27
| 358,738,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
import os
import pygame
class Personagem(object):
def __init__(self):
self.rect = pygame.Rect(32, 32, 32, 32)
self.letra1_colisao = False
self.letra2_colisao = False
self.letra3_colisao = False
self.letra4_colisao = False
def move(self, pos_x, pos_y):
if pos_x != 0:
self.move_eixo(pos_x, 0)
if pos_y != 0:
self.move_eixo(0, pos_y)
def move_eixo(self, pos_x, pos_y):
self.rect.x += pos_x
self.rect.y += pos_y
#Colisão, com base na velocidade
for i in walls:
if self.rect.colliderect(i.rect):
if pos_x > 0:
self.personagem = pygame.image.load("imagens/lado_esquerdo.png")
self.rect.right = i.rect.left
if pos_x < 0:
self.personagem = pygame.image.load("imagens/lado_direito.png")
self.rect.left = i.rect.right
if pos_y > 0:
self.personagem = pygame.image.load("imagens/trás.png")
self.rect.bottom = i.rect.top
if pos_y < 0:
self.personagem = pygame.image.load("imagens/frente.png")
self.rect.top = i.rect.bottom
if self.rect.colliderect(letras.rect1):
self.letra1_colisao = True
elif self.rect.colliderect(letras.rect2):
self.letra2_colisao = True
elif self.rect.colliderect(letras.rect3):
self.letra3_colisao = True
elif self.rect.colliderect(letras.rect4):
self.letra4_colisao = True
class Labirinto (object):
def __init__(self, pos):
self.parede = pygame.image.load("imagens/muro.png").convert_alpha()
self.rect = parede.get_rect()
screen.blit(parede, self.rect)
walls.append(self)
self.rect = pygame.Rect(pos[0], pos[1], 32, 32)
class Letra(object):
def __init__(self):
super().__init__()
self.letra1 = pygame.image.load("imagens/i.png").convert_alpha()
self.rect = letra1.get_rect()
self.rect1 = pygame.Rect(736, 32, 32, 32)
screen.blit(letra1, self.rect)
self.letra2 = pygame.image.load("imagens/n.png").convert_alpha()
self.rect = letra2.get_rect()
self.rect2 = pygame.Rect(32, 544, 32, 32)
screen.blit(letra2, self.rect)
self.letra3 = pygame.image.load("imagens/f.png").convert_alpha()
self.rect = letra3.get_rect()
self.rect3 = pygame.Rect(352, 256, 32, 32)
screen.blit(letra3, self.rect)
self.letra4 = pygame.image.load("imagens/o.png").convert_alpha()
self.rect = letra4.get_rect()
self.rect4 = pygame.Rect(736, 512, 32, 32)
screen.blit(letra4, self.rect)
fundo = pygame.image.load("imagens/fundo.png")
parede = pygame.image.load("imagens/muro.png")
letra1 = pygame.image.load("imagens/i.png")
letra2 = pygame.image.load("imagens/n.png")
letra3 = pygame.image.load("imagens/f.png")
letra4 = pygame.image.load("imagens/o.png")
screen = pygame.display.set_mode((800, 608))
letras = Letra()
walls = []
moldura = [
["i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i"],
["i", "g", "i", "g", "g", "g", "g", "i", "i", "i", "i", "g", "g", "g", "g", "g", "g", "i", "g", "g", "g", "g", "g", "g", "i"],
["i", "g", "i", "g", "g", "i", "g", "g", "g", "g", "i", "g", "i", "i", "i", "i", "g", "i", "g", "g", "i", "i", "g", "i", "i"],
["i", "g", "i", "i", "g", "i", "g", "i", "i", "i", "i", "g", "i", "g", "g", "i", "g", "i", "i", "g", "i", "g", "g", "g", "i"],
["i", "g", "g", "g", "g", "i", "g", "i", "g", "g", "g", "g", "i", "g", "i", "i", "g", "g", "g", "g", "i", "g", "i", "g", "i"],
["i", "g", "i", "i", "g", "i", "g", "g", "g", "i", "i", "i", "i", "g", "i", "g", "g", "i", "i", "g", "i", "g", "i", "g", "i"],
["i", "g", "g", "i", "g", "g", "g", "i", "g", "g", "g", "g", "g", "g", "i", "g", "i", "i", "g", "g", "g", "g", "i", "g", "i"],
["i", "g", "i", "i", "i", "i", "i", "i", "i", "i", "i", "g", "i", "i", "i", "g", "i", "g", "g", "g", "i", "g", "i", "g", "i"],
["i", "g", "g", "g", "g", "g", "g", "g", "g", "i", "g", "g", "i", "g", "g", "g", "i", "i", "i", "i", "i", "i", "i", "g", "i"],
["i", "i", "i", "g", "i", "i", "i", "i", "g", "i", "g", "i", "i", "g", "i", "i", "i", "g", "g", "g", "g", "g", "i", "g", "i"],
["i", "g", "g", "g", "g", "g", "g", "i", "g", "i", "g", "i", "g", "g", "g", "g", "g", "g", "i", "g", "g", "g", "i", "g", "i"],
["i", "g", "i", "i", "i", "i", "g", "i", "g", "i", "g", "i", "g", "i", "i", "g", "g", "i", "i", "g", "g", "g", "i", "g", "i"],
["i", "g", "i", "g", "g", "i", "g", "i", "g", "g", "g", "i", "g", "i", "g", "g", "i", "g", "g", "g", "f", "g", "i", "g", "i"],
["i", "g", "g", "g", "g", "i", "g", "i", "i", "i", "i", "i", "g", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "g", "i"],
["i", "g", "i", "g", "g", "i", "g", "g", "g", "g", "g", "i", "g", "g", "g", "g", "g", "g", "g", "g", "g", "g", "i", "g", "i"],
["i", "i", "i", "g", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "g", "i", "i", "i"],
["i", "i", "g", "g", "g", "g", "g", "g", "g", "g", "i", "g", "g", "g", "i", "g", "g", "g", "i", "g", "g", "g", "i", "g", "i"],
["i", "g", "g", "i", "i", "i", "i", "i", "i", "g", "i", "g", "i", "g", "g", "g", "i", "g", "g", "g", "i", "g", "g", "g", "i"],
["i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i"]
]
x = y = 0
for row in moldura:
for col in row:
if col == "i":
Labirinto((x, y))
x += 32
y += 32
x = 0
|
[
"roepckemanoela@gmail.com"
] |
roepckemanoela@gmail.com
|
892c6ea0089b84d37e35d19760bbe949a8fd271a
|
9c6522db2db8c4c075e23f2953776384973891a8
|
/cn.ao/py.ori.fmt/c0310.bin.py
|
edfcd15820f2b96d91aea65d3e7db6e48edd12f7
|
[] |
no_license
|
ZhenjianYang/ZeroAoVoiceScripts
|
b388c28b498049d7d4277b8344cdc098488fd258
|
7e0b696d743912739b855acb4306f1dcf564d6c0
|
refs/heads/master
| 2021-05-18T08:58:59.482674
| 2021-02-19T08:22:19
| 2021-02-19T08:22:19
| 94,624,272
| 12
| 5
| null | 2021-02-19T08:22:20
| 2017-06-17T13:06:15
|
Python
|
UTF-8
|
Python
| false
| false
| 70,427
|
py
|
from ScenarioHelper import *
def main():
CreateScenaFile(
"c0310.bin", # FileName
"c0310", # MapName
"c0310", # Location
0x002B, # MapIndex
"ed7150",
0x00002000, # Flags
("", "", "", "", "", ""), # include
0x00, # PlaceNameNumber
0x00, # PreInitFunctionIndex
b'\x00\xff\xff', # Unknown_51
# Information
[0, 0, -1000, 0, 0, 0, 24000, 500, 30, 45, 0, 360, 0, 0, 0, 0, 0, 1, 43, 0, 4, 0, 5],
)
BuildStringList((
"c0310", # 0
"海尔玛", # 1
"乔安娜", # 2
))
AddCharChip((
"chr/ch25800.itc", # 00
"chr/ch25700.itc", # 01
))
DeclNpc(0, 4059, 7760, 180, 257, 0x0, 0, 0, 0, 0, 2, 0, 6, 255, 0)
DeclNpc(-45349, 59, 3900, 360, 257, 0x0, 0, 1, 0, 0, 0, 0, 8, 255, 0)
DeclActor(-40820, 0, 40910, 1500, -40820, 1500, 40910, 0x007C, 0, 9, 0x0000)
ChipFrameInfo(296, 0) # 0
ScpFunction((
"Function_0_128", # 00, 0
"Function_1_1E0", # 01, 1
"Function_2_20B", # 02, 2
"Function_3_236", # 03, 3
"Function_4_261", # 04, 4
"Function_5_392", # 05, 5
"Function_6_43B", # 06, 6
"Function_7_14A9", # 07, 7
"Function_8_16A7", # 08, 8
"Function_9_2859", # 09, 9
"Function_10_33A5", # 0A, 10
"Function_11_3990", # 0B, 11
))
def Function_0_128(): pass
label("Function_0_128")
RunExpression(0x2, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Switch(
(scpexpr(EXPR_GET_RESULT, 0x2), scpexpr(EXPR_END)),
(0, "loc_168"),
(1, "loc_174"),
(2, "loc_180"),
(3, "loc_18C"),
(4, "loc_198"),
(5, "loc_1A4"),
(6, "loc_1B0"),
(SWITCH_DEFAULT, "loc_1BC"),
)
label("loc_168")
OP_A0(0xFE, 1450, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_174")
OP_A0(0xFE, 1550, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_180")
OP_A0(0xFE, 1600, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_18C")
OP_A0(0xFE, 1400, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_198")
OP_A0(0xFE, 1650, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_1A4")
OP_A0(0xFE, 1350, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_1B0")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_1BC")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_1C8")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_1DF")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_1C8")
label("loc_1DF")
Return()
# Function_0_128 end
def Function_1_1E0(): pass
label("Function_1_1E0")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_20A")
OP_94(0xFE, 0xFFFFF63C, 0x0, 0x9C4, 0x73A, 0x3E8)
Sleep(300)
Jump("Function_1_1E0")
label("loc_20A")
Return()
# Function_1_1E0 end
def Function_2_20B(): pass
label("Function_2_20B")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_235")
OP_94(0xFE, 0xFFFFF8B2, 0x1A36, 0x744, 0x26DE, 0x3E8)
Sleep(300)
Jump("Function_2_20B")
label("loc_235")
Return()
# Function_2_20B end
def Function_3_236(): pass
label("Function_3_236")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_260")
OP_94(0xFE, 0xA00A, 0xA05A, 0xB31A, 0xB220, 0x3E8)
Sleep(300)
Jump("Function_3_236")
label("loc_260")
Return()
# Function_3_236 end
def Function_4_261(): pass
label("Function_4_261")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_END)), "loc_26F")
Jump("loc_391")
label("loc_26F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A5, 3)), scpexpr(EXPR_END)), "loc_2A5")
SetChrPos(0x8, 810, 0, 500, 270)
BeginChrThread(0x8, 0, 0, 0)
SetChrPos(0x9, -810, 0, 500, 90)
Jump("loc_391")
label("loc_2A5")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x182, 1)), scpexpr(EXPR_END)), "loc_2B3")
Jump("loc_391")
label("loc_2B3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x180, 2)), scpexpr(EXPR_END)), "loc_2D2")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x8F, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_2CD")
SetChrFlags(0x9, 0x80)
label("loc_2CD")
Jump("loc_391")
label("loc_2D2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x165, 5)), scpexpr(EXPR_END)), "loc_2E0")
Jump("loc_391")
label("loc_2E0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x164, 0)), scpexpr(EXPR_END)), "loc_2EE")
Jump("loc_391")
label("loc_2EE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 5)), scpexpr(EXPR_END)), "loc_301")
SetChrFlags(0x9, 0x80)
Jump("loc_391")
label("loc_301")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 0)), scpexpr(EXPR_END)), "loc_30F")
Jump("loc_391")
label("loc_30F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x160, 0)), scpexpr(EXPR_END)), "loc_31D")
Jump("loc_391")
label("loc_31D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 5)), scpexpr(EXPR_END)), "loc_32B")
Jump("loc_391")
label("loc_32B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 0)), scpexpr(EXPR_END)), "loc_339")
Jump("loc_391")
label("loc_339")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 4)), scpexpr(EXPR_END)), "loc_347")
Jump("loc_391")
label("loc_347")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 0)), scpexpr(EXPR_END)), "loc_355")
Jump("loc_391")
label("loc_355")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x128, 1)), scpexpr(EXPR_END)), "loc_379")
SetChrPos(0x9, -42190, 0, 48970, 0)
SetChrFlags(0x9, 0x10)
Jump("loc_391")
label("loc_379")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x126, 1)), scpexpr(EXPR_END)), "loc_391")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x134, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_391")
SetChrFlags(0x8, 0x10)
label("loc_391")
Return()
# Function_4_261 end
def Function_5_392(): pass
label("Function_5_392")
OP_65(0x0, 0x1)
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x87, 0x0, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x87, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_EXEC_OP, "OP_2A(0x87, 0x0, 0x40)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x177, 3)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_3BA")
OP_66(0x0, 0x1)
label("loc_3BA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x164, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x165, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x128, 1)), scpexpr(EXPR_PUSH_VALUE_INDEX, 0x4), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_3FF")
SetMapObjFrame(0xFF, "light01", 0x0, 0x1)
SetMapObjFrame(0xFF, "model05_light", 0x0, 0x1)
Sound(128, 1, 50, 0)
label("loc_3FF")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A5, 2)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_43A")
OP_7D(0xD2, 0xD2, 0xE6, 0x0, 0x0)
SetMapObjFrame(0xFF, "light01", 0x0, 0x1)
SetMapObjFrame(0xFF, "model05_light", 0x0, 0x1)
label("loc_43A")
Return()
# Function_5_392 end
def Function_6_43B(): pass
label("Function_6_43B")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_END)), "loc_57F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_4FF")
#C0001
ChrTalk(
0xFE,
(
"老爷好像正在兰花塔内\x01",
"指挥今后的行动。\x02",
)
)
CloseMessageWindow()
#C0002
ChrTalk(
0xFE,
(
"总统已经被拘捕,\x01",
"如今能引领克洛斯贝尔的\x01",
"只有老爷一个人了。\x02",
)
)
CloseMessageWindow()
#C0003
ChrTalk(
0xFE,
(
"虽然这肯定是很重的负担……\x01",
"但还是希望老爷能好好加油。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_57A")
label("loc_4FF")
#C0004
ChrTalk(
0xFE,
(
"总统已经被拘捕,\x01",
"如今能引领克洛斯贝尔的\x01",
"只有老爷一个人了。\x02",
)
)
CloseMessageWindow()
#C0005
ChrTalk(
0xFE,
(
"虽然这肯定是很重的负担……\x01",
"但还是希望老爷能好好加油。\x02",
)
)
CloseMessageWindow()
label("loc_57A")
Jump("loc_14A5")
label("loc_57F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A5, 3)), scpexpr(EXPR_END)), "loc_610")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1CC, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_59A")
Call(0, 7)
Jump("loc_60B")
label("loc_59A")
#C0006
ChrTalk(
0xFE,
(
"自从老爷和大小姐\x01",
"被软禁在米修拉姆之后,\x01",
"我就一直担心得坐立不安……\x02",
)
)
CloseMessageWindow()
#C0007
ChrTalk(
0xFE,
(
"能再次见到您,\x01",
"我也总算可以安心了。\x02",
)
)
CloseMessageWindow()
label("loc_60B")
Jump("loc_14A5")
label("loc_610")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x182, 1)), scpexpr(EXPR_END)), "loc_788")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x18C, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_721")
TurnDirection(0xFE, 0x102, 0)
#C0008
ChrTalk(
0xFE,
(
"……大小姐……\x01",
"老爷有和您联络\x01",
"过吗?\x02",
)
)
CloseMessageWindow()
#C0009
ChrTalk(
0x102,
"#00103F不,我这边也完全没消息……\x02",
)
CloseMessageWindow()
#C0010
ChrTalk(
0xFE,
"是吗……\x02",
)
CloseMessageWindow()
#C0011
ChrTalk(
0xFE,
(
"唔,总之……\x01",
"如果我了解到什么情况,\x01",
"一定会和您联络的。\x02",
)
)
CloseMessageWindow()
#C0012
ChrTalk(
0xFE,
(
"大小姐与各位就\x01",
"专心处理\x01",
"自己的工作吧。\x02",
)
)
CloseMessageWindow()
#C0013
ChrTalk(
0x102,
(
"#00100F嗯,拜托你了,\x01",
"海尔玛先生。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x18C, 2)
Jump("loc_783")
label("loc_721")
#C0014
ChrTalk(
0xFE,
(
"如果我了解到\x01",
"有关老爷的情况,\x01",
"一定会和您联络的。\x02",
)
)
CloseMessageWindow()
#C0015
ChrTalk(
0xFE,
(
"大小姐与各位就\x01",
"专心处理\x01",
"自己的工作吧。\x02",
)
)
CloseMessageWindow()
label("loc_783")
Jump("loc_14A5")
label("loc_788")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x180, 2)), scpexpr(EXPR_END)), "loc_8D2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_864")
#C0016
ChrTalk(
0xFE,
(
"……之前的那起袭击事件\x01",
"真是一场惨痛的经历。\x02",
)
)
CloseMessageWindow()
#C0017
ChrTalk(
0xFE,
(
"虽然重建工作\x01",
"总算是取得了一些进展……\x02",
)
)
CloseMessageWindow()
#C0018
ChrTalk(
0xFE,
(
"但现在还是有不少人\x01",
"无法从恐惧中解脱。\x02",
)
)
CloseMessageWindow()
#C0019
ChrTalk(
0xFE,
(
"为了防止那种事件再次发生,\x01",
"希望老爷和市长\x01",
"都要加油。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_8CD")
label("loc_864")
#C0020
ChrTalk(
0xFE,
(
"不久前的那起袭击事件\x01",
"真是一场惨痛的经历。\x02",
)
)
CloseMessageWindow()
#C0021
ChrTalk(
0xFE,
(
"为了防止那种事件再次发生,\x01",
"希望老爷和市长\x01",
"都要加油。\x02",
)
)
CloseMessageWindow()
label("loc_8CD")
Jump("loc_14A5")
label("loc_8D2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x165, 5)), scpexpr(EXPR_END)), "loc_A0B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_9A4")
#C0022
ChrTalk(
0xFE,
(
"老爷正在和市长\x01",
"一起研究玛因兹地区\x01",
"遭到占领事件的对策。\x02",
)
)
CloseMessageWindow()
#C0023
ChrTalk(
0xFE,
(
"据说,在武装集团面前,\x01",
"连警备队都束手无策,\x01",
"而且这样的状况还在持续……\x02",
)
)
CloseMessageWindow()
#C0024
ChrTalk(
0xFE,
(
"……真让人担心啊。\x01",
"但愿能尽早将事件解决。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_A06")
label("loc_9A4")
#C0025
ChrTalk(
0xFE,
(
"据说武装集团十分强悍,\x01",
"连警备队都束手无策……\x02",
)
)
CloseMessageWindow()
#C0026
ChrTalk(
0xFE,
(
"……真让人担心啊。\x01",
"但愿能尽早将事件解决。\x02",
)
)
CloseMessageWindow()
label("loc_A06")
Jump("loc_14A5")
label("loc_A0B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x164, 0)), scpexpr(EXPR_END)), "loc_B23")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_ABE")
#C0027
ChrTalk(
0xFE,
(
"昨天那起脱轨事故……\x01",
"呼,真是让人震惊。\x02",
)
)
CloseMessageWindow()
#C0028
ChrTalk(
0xFE,
(
"大家现在都议论纷纷,\x01",
"说事故发生的原因是落石或\x01",
"巨大怪物的袭击……\x02",
)
)
CloseMessageWindow()
#C0029
ChrTalk(
0xFE,
(
"唔……真正的原因\x01",
"究竟是什么呢?\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_B1E")
label("loc_ABE")
#C0030
ChrTalk(
0xFE,
"昨天那起脱轨事故真是让人震惊。\x02",
)
CloseMessageWindow()
#C0031
ChrTalk(
0xFE,
(
"市内流传着各种各样的传言……\x01",
"真正的原因\x01",
"究竟是什么呢?\x02",
)
)
CloseMessageWindow()
label("loc_B1E")
Jump("loc_14A5")
label("loc_B23")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 5)), scpexpr(EXPR_END)), "loc_B6E")
#C0032
ChrTalk(
0xFE,
(
"好像有警笛声\x01",
"从西街那边传来……\x02",
)
)
CloseMessageWindow()
#C0033
ChrTalk(
0xFE,
"大概是我听错了吧。\x02",
)
CloseMessageWindow()
Jump("loc_14A5")
label("loc_B6E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 0)), scpexpr(EXPR_END)), "loc_C0E")
#C0034
ChrTalk(
0xFE,
(
"独立的提案给社会各界\x01",
"都造成了一定影响,\x01",
"老爷正在努力制订应对措施。\x02",
)
)
CloseMessageWindow()
#C0035
ChrTalk(
0xFE,
(
"他今天也要和迪塔市长\x01",
"一起在兰花塔开会……\x01",
"希望他能注意自己的身体啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_14A5")
label("loc_C0E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x160, 0)), scpexpr(EXPR_END)), "loc_D5A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_CDE")
#C0036
ChrTalk(
0xFE,
(
"调查独立意向的居民投票活动\x01",
"已经渐渐临近了。\x02",
)
)
CloseMessageWindow()
#C0037
ChrTalk(
0xFE,
(
"关于这个问题,\x01",
"老爷认为应该采取\x01",
"慎重的态度来对待……\x02",
)
)
CloseMessageWindow()
#C0038
ChrTalk(
0xFE,
(
"而克洛斯贝尔的居民们\x01",
"最终又会做出怎样的选择呢……\x01",
"我对此也很有兴趣。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_D55")
label("loc_CDE")
#C0039
ChrTalk(
0xFE,
(
"调查独立意向的居民投票活动\x01",
"已经渐渐临近了。\x02",
)
)
CloseMessageWindow()
#C0040
ChrTalk(
0xFE,
(
"克洛斯贝尔的居民们\x01",
"最终会做出怎样的选择呢……\x01",
"我对此也很有兴趣。\x02",
)
)
CloseMessageWindow()
label("loc_D55")
Jump("loc_14A5")
label("loc_D5A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 5)), scpexpr(EXPR_END)), "loc_DB7")
#C0041
ChrTalk(
0xFE,
(
"老爷今天也\x01",
"一大早就前往\x01",
"兰花塔了。\x02",
)
)
CloseMessageWindow()
#C0042
ChrTalk(
0xFE,
(
"希望今天的正式会议\x01",
"能顺利结束……\x02",
)
)
CloseMessageWindow()
Jump("loc_14A5")
label("loc_DB7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 0)), scpexpr(EXPR_END)), "loc_ED4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_E6E")
#C0043
ChrTalk(
0xFE,
(
"老爷最近很忙,\x01",
"经常连家都不回,\x01",
"在市政厅过夜休息……\x02",
)
)
CloseMessageWindow()
#C0044
ChrTalk(
0xFE,
(
"刚才接到了联络,\x01",
"老爷今天总算\x01",
"要回家了。\x02",
)
)
CloseMessageWindow()
#C0045
ChrTalk(
0xFE,
(
"希望老爷养精蓄锐,\x01",
"为明天的正式会议\x01",
"做好准备。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_ECF")
label("loc_E6E")
#C0046
ChrTalk(
0xFE,
(
"今晚为老爷\x01",
"准备了营养价值\x01",
"很高的料理。\x02",
)
)
CloseMessageWindow()
#C0047
ChrTalk(
0xFE,
(
"希望老爷能养足精神,\x01",
"为明天的正式会议做好准备。\x02",
)
)
CloseMessageWindow()
label("loc_ECF")
Jump("loc_14A5")
label("loc_ED4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 4)), scpexpr(EXPR_END)), "loc_102A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_FA8")
#C0048
ChrTalk(
0xFE,
(
"在哈尔特曼担任议长的时期,\x01",
"老爷光是为了制衡帝国派与共和国派\x01",
"的议员,就耗尽了心神。\x02",
)
)
CloseMessageWindow()
#C0049
ChrTalk(
0xFE,
(
"但最近和新市长联手协力,\x01",
"总算能够与他们形成\x01",
"势均力敌之势……\x02",
)
)
CloseMessageWindow()
#C0050
ChrTalk(
0xFE,
(
"为此,我也感到\x01",
"十分高兴。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_1025")
label("loc_FA8")
#C0051
ChrTalk(
0xFE,
(
"迪塔先生能成为市长,\x01",
"也让我感到十分欣喜。\x02",
)
)
CloseMessageWindow()
#C0052
ChrTalk(
0xFE,
(
"在从明天开始的正式会议中……\x01",
"希望老爷和市长都能\x01",
"充分展现自己的政治手腕。\x02",
)
)
CloseMessageWindow()
label("loc_1025")
Jump("loc_14A5")
label("loc_102A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 0)), scpexpr(EXPR_END)), "loc_10AE")
#C0053
ChrTalk(
0xFE,
(
"为了明日开始的通商会议,\x01",
"老爷和迪塔市长\x01",
"都在精心进行准备工作。\x02",
)
)
CloseMessageWindow()
#C0054
ChrTalk(
0xFE,
(
"老爷最近几乎\x01",
"都不回家了……\x01",
"真是让人担心啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_14A5")
label("loc_10AE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x128, 1)), scpexpr(EXPR_END)), "loc_1242")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_11AF")
#C0055
ChrTalk(
0xFE,
(
"不久前,有几个人搬到\x01",
"隔壁的房子居住了……\x01",
"他们很快就造成了很多问题。\x02",
)
)
CloseMessageWindow()
#C0056
ChrTalk(
0xFE,
(
"比如开着导力车在市内横冲直撞,\x01",
"半夜播放音量超大的音乐……\x01",
"这种行为实在是让人看不下去。\x02",
)
)
CloseMessageWindow()
#C0057
ChrTalk(
0xFE,
(
"但就算提出抗议,\x01",
"他们也充耳不闻……\x01",
"到底该怎么办才好呢……\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_123D")
label("loc_11AF")
#C0058
ChrTalk(
0xFE,
(
"不久前,有几个人搬到\x01",
"隔壁的房子居住了……\x01",
"他们的行为实在是让人难以容忍。\x02",
)
)
CloseMessageWindow()
#C0059
ChrTalk(
0xFE,
(
"但就算提出抗议,\x01",
"他们也充耳不闻……\x01",
"到底该怎么办才好呢……\x02",
)
)
CloseMessageWindow()
label("loc_123D")
Jump("loc_14A5")
label("loc_1242")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x126, 1)), scpexpr(EXPR_END)), "loc_14A5")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x134, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1440")
TurnDirection(0xFE, 0x102, 0)
#C0060
ChrTalk(
0xFE,
(
"这不是艾莉大小姐吗!\x01",
"欢迎您回来。\x02",
)
)
CloseMessageWindow()
#C0061
ChrTalk(
0x102,
"#00100F我回来了,海尔玛先生。\x02",
)
CloseMessageWindow()
#C0062
ChrTalk(
0x105,
(
"#10300F原来如此,\x01",
"这里就是艾莉的家啊。\x02",
)
)
CloseMessageWindow()
#C0063
ChrTalk(
0x109,
"#10102F真是一座大房子呢。\x02",
)
CloseMessageWindow()
#C0064
ChrTalk(
0x101,
(
"#00004F嗯……\x01",
"毕竟是麦克道尔议长\x01",
"的宅邸嘛。\x02",
)
)
CloseMessageWindow()
#C0065
ChrTalk(
0x102,
"#00109F呵呵,请大家不要拘束。\x02",
)
CloseMessageWindow()
#C0066
ChrTalk(
0xFE,
(
"是啊,各位都是\x01",
"大小姐的同事,\x01",
"请放松些,不必拘谨。\x02",
)
)
CloseMessageWindow()
#C0067
ChrTalk(
0xFE,
(
"特别任务支援科\x01",
"总算恢复工作了,\x01",
"接下来大概会非常繁忙……\x02",
)
)
CloseMessageWindow()
#C0068
ChrTalk(
0xFE,
(
"今后也请各位\x01",
"继续关照艾莉大小姐。\x02",
)
)
CloseMessageWindow()
#C0069
ChrTalk(
0x101,
"#00000F嗯,放心吧。\x02",
)
CloseMessageWindow()
#C0070
ChrTalk(
0x102,
(
"#00102F呵呵,谢谢了,海尔玛先生。\x01",
"我会继续努力的。\x02",
)
)
CloseMessageWindow()
ClearChrFlags(0xFE, 0x10)
SetScenarioFlags(0x134, 1)
Jump("loc_14A5")
label("loc_1440")
#C0071
ChrTalk(
0xFE,
(
"特别任务支援科\x01",
"总算恢复工作了,\x01",
"接下来大概会非常繁忙……\x02",
)
)
CloseMessageWindow()
#C0072
ChrTalk(
0xFE,
(
"今后也请各位\x01",
"继续关照艾莉大小姐。\x02",
)
)
CloseMessageWindow()
label("loc_14A5")
TalkEnd(0xFE)
Return()
# Function_6_43B end
def Function_7_14A9(): pass
label("Function_7_14A9")
OP_4B(0x8, 0xFF)
OP_4B(0x9, 0xFF)
TurnDirection(0x8, 0x0, 0)
TurnDirection(0x9, 0x0, 0)
#C0073
ChrTalk(
0x8,
"哦哦,各位……!\x02",
)
CloseMessageWindow()
#C0074
ChrTalk(
0x9,
(
"大、大小姐…………\x01",
"……艾莉大小姐……!!\x02",
)
)
CloseMessageWindow()
#C0075
ChrTalk(
0x102,
(
"#00100F我回来啦,海尔玛先生,乔安娜。\x01",
"……让你们担心了呢。\x02",
)
)
CloseMessageWindow()
#C0076
ChrTalk(
0x9,
"…………(哽咽)\x02",
)
CloseMessageWindow()
#C0077
ChrTalk(
0x8,
(
"自从老爷发表独立无效宣言之后,\x01",
"我一直都非常担心,\x01",
"不知你们是否平安无事……\x02",
)
)
CloseMessageWindow()
#C0078
ChrTalk(
0x8,
(
"能再次见到您,\x01",
"我也总算可以安心了。\x02",
)
)
CloseMessageWindow()
#C0079
ChrTalk(
0x102,
(
"#00100F呵呵,谢谢。\x02\x03",
"#00103F……不过,我们现在无论如何\x01",
"也必须要去某个地方。\x02\x03",
"#00101F请二位暂时留在这里等我们,\x01",
"可以吗?\x02",
)
)
CloseMessageWindow()
#C0080
ChrTalk(
0x8,
"嗯,当然。\x02",
)
CloseMessageWindow()
#C0081
ChrTalk(
0x9,
(
"艾莉大小姐,各位……\x01",
"请你们一定要小心。\x02",
)
)
CloseMessageWindow()
OP_4C(0x8, 0xFF)
OP_4C(0x9, 0xFF)
OP_93(0x8, 0x10E, 0x0)
OP_93(0x9, 0x5A, 0x0)
SetScenarioFlags(0x1CC, 7)
Return()
# Function_7_14A9 end
def Function_8_16A7(): pass
label("Function_8_16A7")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A7, 1)), scpexpr(EXPR_END)), "loc_17FD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_17B4")
#C0082
ChrTalk(
0xFE,
(
"艾莉大小姐,\x01",
"你们要前往那棵\x01",
"诡异的大树吧……\x02",
)
)
CloseMessageWindow()
#C0083
ChrTalk(
0xFE,
"…………………………\x02",
)
CloseMessageWindow()
#C0084
ChrTalk(
0x102,
(
"#00104F别担心,乔安娜,\x01",
"我们一定会平安归来的。\x02",
)
)
CloseMessageWindow()
#C0085
ChrTalk(
0xFE,
(
"……嗯。\x01",
"至今为止,大小姐每次\x01",
"都平安回到了这里……\x02",
)
)
CloseMessageWindow()
#C0086
ChrTalk(
0xFE,
(
"所以我相信您这次也不会有事的。\x01",
"……请一定要小心。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
Jump("loc_17F8")
label("loc_17B4")
#C0087
ChrTalk(
0xFE,
(
"我相信艾莉大小姐和各位\x01",
"一定能平安归来。\x01",
"……请大家一定要小心。\x02",
)
)
CloseMessageWindow()
label("loc_17F8")
Jump("loc_2855")
label("loc_17FD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1A5, 3)), scpexpr(EXPR_END)), "loc_1864")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1CC, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1818")
Call(0, 7)
Jump("loc_185F")
label("loc_1818")
#C0088
ChrTalk(
0xFE,
"外面好像非常危险……\x02",
)
CloseMessageWindow()
#C0089
ChrTalk(
0xFE,
(
"艾莉大小姐,各位……\x01",
"请你们一定要小心。\x02",
)
)
CloseMessageWindow()
label("loc_185F")
Jump("loc_2855")
label("loc_1864")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x182, 1)), scpexpr(EXPR_END)), "loc_1A20")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x18C, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_19C2")
#C0090
ChrTalk(
0xFE,
(
"老爷他……\x01",
"到底到什么地方\x01",
"去了呢……?\x02",
)
)
CloseMessageWindow()
#C0091
ChrTalk(
0xFE,
"我好担心……\x02",
)
CloseMessageWindow()
#C0092
ChrTalk(
0x101,
(
"#00003F在刚才的演说现场直播里\x01",
"也没有见到议长呢……\x02",
)
)
CloseMessageWindow()
#C0093
ChrTalk(
0x102,
"#00108F外公……到底在什么地方……\x02",
)
CloseMessageWindow()
TurnDirection(0xFE, 0x102, 500)
#C0094
ChrTalk(
0xFE,
(
"……啊啊,对不起!\x01",
"我竟然口无遮拦,\x01",
"害得大小姐也开始不安了……\x02",
)
)
CloseMessageWindow()
#C0095
ChrTalk(
0x102,
(
"#00103F……哪里,我不要紧。\x02\x03",
"#00100F乔安娜,你也不要\x01",
"太过担心哦。\x02",
)
)
CloseMessageWindow()
#C0096
ChrTalk(
0xFE,
"明、明白了……\x02",
)
CloseMessageWindow()
SetScenarioFlags(0x18C, 3)
Jump("loc_1A1B")
label("loc_19C2")
#C0097
ChrTalk(
0xFE,
(
"……我竟然口无遮拦,\x01",
"害得大小姐\x01",
"也开始不安了……\x02",
)
)
CloseMessageWindow()
#C0098
ChrTalk(
0xFE,
(
"老爷……\x01",
"一定会平安无事的……\x02",
)
)
CloseMessageWindow()
label("loc_1A1B")
Jump("loc_2855")
label("loc_1A20")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x180, 2)), scpexpr(EXPR_END)), "loc_1B27")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x8F, 0x0, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x8F, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_EXEC_OP, "OP_2A(0x8F, 0x0, 0x40)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x198, 4)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x199, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_1A52")
Call(0, 10)
Return()
label("loc_1A52")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x199, 6)), scpexpr(EXPR_END)), "loc_1AC9")
#C0099
ChrTalk(
0xFE,
(
"……我还是…………\x01",
"决定参加\x01",
"职业女性选秀活动。\x02",
)
)
CloseMessageWindow()
#C0100
ChrTalk(
0xFE,
(
"……活动开始前请通知我吧,\x01",
"我会立刻赶过去的……\x02",
)
)
CloseMessageWindow()
Jump("loc_1B22")
label("loc_1AC9")
#C0101
ChrTalk(
0xFE,
(
"听说今天要在\x01",
"行政区举办一场\x01",
"慈善宴会……\x02",
)
)
CloseMessageWindow()
#C0102
ChrTalk(
0xFE,
(
"……有没有什么\x01",
"我能帮上忙的事情呢……\x02",
)
)
CloseMessageWindow()
label("loc_1B22")
Jump("loc_2855")
label("loc_1B27")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x165, 5)), scpexpr(EXPR_END)), "loc_1B89")
#C0103
ChrTalk(
0xFE,
"矿山镇竟然被占领了……\x02",
)
CloseMessageWindow()
#C0104
ChrTalk(
0xFE,
(
"……我好害怕……\x01",
"总觉得接下来\x01",
"还会发生什么事情……\x02",
)
)
CloseMessageWindow()
Jump("loc_2855")
label("loc_1B89")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x164, 0)), scpexpr(EXPR_END)), "loc_1BD4")
#C0105
ChrTalk(
0xFE,
"最近经常下雨啊……\x02",
)
CloseMessageWindow()
#C0106
ChrTalk(
0xFE,
(
"……洗好的衣物都晒不干。\x01",
"呼……\x02",
)
)
CloseMessageWindow()
Jump("loc_2855")
label("loc_1BD4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 5)), scpexpr(EXPR_END)), "loc_1BE2")
Jump("loc_2855")
label("loc_1BE2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x162, 0)), scpexpr(EXPR_END)), "loc_1D6E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1D10")
#C0107
ChrTalk(
0xFE,
(
"老爷最近特别繁忙,\x01",
"得用心为他准备些有营养的食物……\x02",
)
)
CloseMessageWindow()
#C0108
ChrTalk(
0xFE,
"……做什么料理才好呢?\x02",
)
CloseMessageWindow()
#C0109
ChrTalk(
0x102,
(
"#00100F这个嘛……\x01",
"羔羊肉如何呢?\x02\x03",
"#00104F高蛋白,低热量,\x01",
"而且应该很合外公的口味。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x102, 500)
#C0110
ChrTalk(
0xFE,
(
"……似乎不错呢……\x01",
"真不愧是艾莉大小姐。\x02",
)
)
CloseMessageWindow()
#C0111
ChrTalk(
0x102,
(
"#00109F啊、啊哈哈,\x01",
"这点小事不值得夸奖啦……\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
Jump("loc_1D69")
label("loc_1D10")
#C0112
ChrTalk(
0xFE,
(
"我的饭量很小,\x01",
"一般不吃肉,\x01",
"不过做给老爷应该不错呢。\x02",
)
)
CloseMessageWindow()
#C0113
ChrTalk(
0xFE,
(
"一会得去百货店\x01",
"买食材……\x02",
)
)
CloseMessageWindow()
label("loc_1D69")
Jump("loc_2855")
label("loc_1D6E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x160, 0)), scpexpr(EXPR_END)), "loc_1EC0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1E94")
#C0114
ChrTalk(
0xFE,
(
"听说在前不久召开的通商会议中,\x01",
"有恐怖分子发动了袭击……\x01",
"听到这消息时,我的心脏都快停止跳动了。\x02",
)
)
CloseMessageWindow()
#C0115
ChrTalk(
0xFE,
(
"自那之后,我一直非常担心\x01",
"艾莉大小姐和老爷……\x02",
)
)
CloseMessageWindow()
#C0116
ChrTalk(
0x102,
(
"#00103F乔安娜……\x01",
"你不用那么担心的。\x02\x03",
"#00100F有支援科的同伴陪着我……\x01",
"一定不会出什么事的。\x02",
)
)
CloseMessageWindow()
#C0117
ChrTalk(
0xFE,
"嗯……是啊……\x02",
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
Jump("loc_1EBB")
label("loc_1E94")
#C0118
ChrTalk(
0xFE,
(
"各位……\x01",
"艾莉大小姐就拜托你们了。\x02",
)
)
CloseMessageWindow()
label("loc_1EBB")
Jump("loc_2855")
label("loc_1EC0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 5)), scpexpr(EXPR_END)), "loc_1F17")
#C0119
ChrTalk(
0xFE,
(
"艾莉大小姐……\x01",
"市里今天好像也维持着戒严状态。\x02",
)
)
CloseMessageWindow()
#C0120
ChrTalk(
0xFE,
"请您一定小心……\x02",
)
CloseMessageWindow()
Jump("loc_2855")
label("loc_1F17")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x141, 0)), scpexpr(EXPR_END)), "loc_208D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2002")
#C0121
ChrTalk(
0xFE,
(
"今天的晚餐\x01",
"是老爷最喜欢的\x01",
"苦西红柿料理。\x02",
)
)
CloseMessageWindow()
#C0122
ChrTalk(
0xFE,
(
"苦西红柿沙拉、\x01",
"苦西红柿酱的薏面,\x01",
"还有100%浓度的苦西红柿汁……\x02",
)
)
CloseMessageWindow()
#C0123
ChrTalk(
0x101,
(
"#00005F(哇……\x01",
" 好极端的菜单啊。)\x02",
)
)
CloseMessageWindow()
#C0124
ChrTalk(
0x102,
(
"#00106F(外公竟然那么喜欢\x01",
" 苦西红柿……)\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
Jump("loc_2088")
label("loc_2002")
#C0125
ChrTalk(
0xFE,
(
"我今天做了老爷最喜欢的\x01",
"苦西红柿料理。\x02",
)
)
CloseMessageWindow()
#C0126
ChrTalk(
0xFE,
(
"听说吃苦西红柿有利于健康,\x01",
"我也准备忍耐着\x01",
"试试……\x02",
)
)
CloseMessageWindow()
#C0127
ChrTalk(
0x102,
"#00105F可、可不要太勉强哦……\x02",
)
CloseMessageWindow()
label("loc_2088")
Jump("loc_2855")
label("loc_208D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 4)), scpexpr(EXPR_END)), "loc_21D3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2151")
#C0128
ChrTalk(
0xFE,
(
"今天早上,我正好在门口\x01",
"遇到了住在隔壁的那几个人……\x02",
)
)
CloseMessageWindow()
#C0129
ChrTalk(
0xFE,
(
"他们突然就对我发出邀请,\x01",
"说『一起去兜风吧』。\x02",
)
)
CloseMessageWindow()
#C0130
ChrTalk(
0xFE,
(
"我自然是礼貌地回绝了,\x01",
"总觉得那些人实在是欠缺教养啊……\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
Jump("loc_21CE")
label("loc_2151")
#C0131
ChrTalk(
0xFE,
(
"今天早上,住在隔壁的\x01",
"那几个人突然邀请我\x01",
"和他们一起去兜风。\x02",
)
)
CloseMessageWindow()
#C0132
ChrTalk(
0xFE,
(
"我自然是礼貌地回绝了,\x01",
"总觉得那些人实在是欠缺教养啊……\x02",
)
)
CloseMessageWindow()
label("loc_21CE")
Jump("loc_2855")
label("loc_21D3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x140, 0)), scpexpr(EXPR_END)), "loc_224E")
#C0133
ChrTalk(
0xFE,
(
"新市政厅大楼\x01",
"明天就要正式揭幕了……\x02",
)
)
CloseMessageWindow()
#C0134
ChrTalk(
0xFE,
(
"就算隔着帷幕,\x01",
"都能充分感受到它的魄力,\x01",
"简直让人头昏目眩呢……\x02",
)
)
CloseMessageWindow()
Jump("loc_2855")
label("loc_224E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x128, 1)), scpexpr(EXPR_END)), "loc_238B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2337")
#C0135
ChrTalk(
0xFE,
"(吱吱……吱吱……)\x02",
)
CloseMessageWindow()
#C0136
ChrTalk(
0x102,
(
"#00105F哎,乔安娜,\x01",
"你在窗户上画东西吗?\x02",
)
)
CloseMessageWindow()
OP_63(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
Sound(28, 0, 100, 0)
Sleep(1000)
TurnDirection(0x9, 0x102, 1000)
Sleep(1000)
#C0137
ChrTalk(
0xFE,
(
"……窗、窗户上蒙了一层白雾,\x01",
"所以我不由自主就……\x02",
)
)
CloseMessageWindow()
#C0138
ChrTalk(
0xFE,
(
"失、失礼了,\x01",
"我这就继续做扫除。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 1)
ClearChrFlags(0x9, 0x10)
Jump("loc_2386")
label("loc_2337")
#C0139
ChrTalk(
0xFE,
(
"不知为何,只要听到雨声,\x01",
"我就会分心走神……\x02",
)
)
CloseMessageWindow()
#C0140
ChrTalk(
0xFE,
"唉,真是不喜欢下雨天啊。\x02",
)
CloseMessageWindow()
label("loc_2386")
Jump("loc_2855")
label("loc_238B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x126, 1)), scpexpr(EXPR_END)), "loc_2855")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x134, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_27DF")
OP_63(0xFE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
TurnDirection(0x9, 0x102, 0)
Sleep(1000)
#C0141
ChrTalk(
0xFE,
"啊……\x02",
)
CloseMessageWindow()
#C0142
ChrTalk(
0xFE,
"欢迎回来,艾莉大小姐。\x02",
)
CloseMessageWindow()
#C0143
ChrTalk(
0x102,
(
"#00100F我回来了,乔安娜,\x01",
"今天没有什么异常情况吧?\x02",
)
)
CloseMessageWindow()
#C0144
ChrTalk(
0xFE,
"是的……托您的福。\x02",
)
CloseMessageWindow()
#C0145
ChrTalk(
0xFE,
(
"大小姐外出旅行归来,\x01",
"我也总算可以安心了。\x02",
)
)
CloseMessageWindow()
#C0146
ChrTalk(
0x102,
(
"#00102F呵呵,乔安娜,你可真是的……\x01",
"根本不用那么担心啊。\x02",
)
)
CloseMessageWindow()
#C0147
ChrTalk(
0xFE,
(
"不,对我来说,大小姐\x01",
"和老爷就是一切……\x02",
)
)
CloseMessageWindow()
#C0148
ChrTalk(
0x105,
"#10309F呵呵,真是一位关怀主人的女仆小姐啊。\x02",
)
CloseMessageWindow()
OP_63(0xFE, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(1000)
OP_64(0xFE)
#C0149
ChrTalk(
0xFE,
(
"……那个,艾莉大小姐,\x01",
"前几天收到了先生和小姐\x01",
"寄来的信……\x02",
)
)
CloseMessageWindow()
OP_63(0x0, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_63(0x1, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_63(0x2, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_63(0x3, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
#C0150
ChrTalk(
0x101,
"#00005F那是……\x02",
)
CloseMessageWindow()
#C0151
ChrTalk(
0x102,
(
"#00103F……嗯,是我分别居住在共和国\x01",
"和帝国的父母。\x02\x03",
"#00100F他们以前也时常来信,\x01",
"自从教团那起事件结束之后,\x01",
"似乎寄得比以前更加频繁了。\x02\x03",
"#00104F信中内容主要都是表达\x01",
"对我和外公的关心,\x01",
"最近都成为我的心灵支柱之一了呢。\x02",
)
)
CloseMessageWindow()
#C0152
ChrTalk(
0x109,
"#10105F这样啊……\x02",
)
CloseMessageWindow()
#C0153
ChrTalk(
0x105,
"#10303F(……亲人……吗……)\x02",
)
CloseMessageWindow()
#C0154
ChrTalk(
0x102,
(
"#00100F乔安娜,我稍后会去看的,\x01",
"请你帮我仔细保管好哦。\x02",
)
)
CloseMessageWindow()
#C0155
ChrTalk(
0xFE,
"是……谨遵吩咐。\x02",
)
CloseMessageWindow()
#C0156
ChrTalk(
0xFE,
(
"那个,听说特别任务支援科\x01",
"已经恢复工作了……\x01",
"请您一定要注意身体。\x02",
)
)
CloseMessageWindow()
#C0157
ChrTalk(
0xFE,
(
"老爷、先生以及小姐……\x01",
"大家全都在挂念着大小姐。\x01",
"我自然也是一样。\x02",
)
)
CloseMessageWindow()
#C0158
ChrTalk(
0x102,
(
"#00109F呵呵,我明白的。\x01",
"谢谢你,乔安娜。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x134, 2)
Jump("loc_2855")
label("loc_27DF")
TurnDirection(0x9, 0x102, 0)
#C0159
ChrTalk(
0xFE,
(
"艾莉大小姐,\x01",
"请一定要注意保重身体。\x02",
)
)
CloseMessageWindow()
#C0160
ChrTalk(
0xFE,
(
"老爷、先生以及小姐……\x01",
"大家全都在挂念着大小姐。\x01",
"我自然也是一样。\x02",
)
)
CloseMessageWindow()
label("loc_2855")
TalkEnd(0xFE)
Return()
# Function_8_16A7 end
def Function_9_2859(): pass
label("Function_9_2859")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x177, 5)), scpexpr(EXPR_END)), "loc_28F6")
TalkBegin(0xFF)
FadeToDark(300, 0, 100)
OP_0D()
SetMessageWindowPos(-1, 30, -1, -1)
SetChrName("")
#A0161
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
scpstr(0x6),
scpstr(0x18),
"#1K要阅读昆西公司的宣传手册吗?\x07\x00\x02",
)
)
Menu(
0,
-1,
-1,
1,
(
"阅读\x01", # 0
"不阅读\x01", # 1
)
)
MenuEnd(0x0)
OP_60(0x0)
OP_57(0x0)
SetMessageWindowPos(14, 280, 60, 3)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_28E5")
Call(0, 11)
TalkEnd(0xFF)
Jump("loc_28F1")
label("loc_28E5")
FadeToBright(300, 0)
TalkEnd(0xFF)
label("loc_28F1")
Jump("loc_33A4")
label("loc_28F6")
EventBegin(0x0)
FadeToDark(1000, 0, -1)
OP_0D()
OP_68(-41830, 1500, 40450, 0)
MoveCamera(52, 27, 0, 0)
OP_6E(400, 0)
SetCameraDistance(18820, 0)
SetChrPos(0x101, -43440, 60, 40740, 90)
SetChrPos(0x102, -41310, 0, 40520, 90)
SetChrPos(0x103, -42890, 0, 39580, 45)
SetChrPos(0x104, -42860, 60, 41860, 135)
SetChrPos(0x109, -42020, 0, 38900, 0)
SetChrPos(0x105, -41650, 0, 42460, 180)
ClearChrFlags(0x4, 0x80)
ClearChrBattleFlags(0x4, 0x8000)
ClearChrFlags(0x5, 0x80)
ClearChrBattleFlags(0x5, 0x8000)
FadeToBright(1000, 0)
OP_0D()
#C0162
ChrTalk(
0x102,
"#00105F唔……找到了。\x02",
)
CloseMessageWindow()
Sound(802, 0, 100, 0)
Sleep(400)
OP_93(0x102, 0x10E, 0x1F4)
#C0163
ChrTalk(
0x102,
(
"#00100F这就是昆西公司\x01",
"的宣传手册。\x02",
)
)
CloseMessageWindow()
#C0164
ChrTalk(
0x104,
(
"#00305F嘿……\x01",
"装订得很精美啊。\x02\x03",
"#00300F看起来并不像是\x01",
"普通的资料呢。\x02",
)
)
CloseMessageWindow()
#C0165
ChrTalk(
0x103,
(
"#00200F只有大企业才会\x01",
"在这种细节方面如此讲究。\x02\x03",
"看来这本手册中的内容\x01",
"有很高的可信度。\x02",
)
)
CloseMessageWindow()
#C0166
ChrTalk(
0x102,
"#00109F呵呵,那就好。\x02",
)
CloseMessageWindow()
#C0167
ChrTalk(
0x101,
(
"#00001F好……\x01",
"我们先来粗略浏览一下吧。\x02",
)
)
CloseMessageWindow()
Call(0, 11)
OP_63(0x101, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
OP_63(0x102, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
OP_63(0x103, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
OP_63(0x104, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
OP_63(0x109, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
OP_63(0x105, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(2000)
OP_64(0x101)
OP_64(0x102)
OP_64(0x103)
OP_64(0x104)
OP_64(0x109)
OP_64(0x105)
#C0168
ChrTalk(
0x105,
(
"#10303F嗯,已经大致看了一遍……\x01",
"但并没找到什么重要资料呢。\x02",
)
)
CloseMessageWindow()
#C0169
ChrTalk(
0x109,
(
"#10105F有没有发现什么与敏涅斯的话\x01",
"有矛盾的内容呢……?\x02",
)
)
CloseMessageWindow()
#C0170
ChrTalk(
0x102,
(
"#00106F唔~这个……\x01",
"在这种资料中果然还是\x01",
"不会有什么收获……\x02",
)
)
CloseMessageWindow()
#C0171
ChrTalk(
0x101,
"#00003F不……我发现矛盾了。\x02",
)
CloseMessageWindow()
OP_63(0x102, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(50)
OP_63(0x103, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(50)
OP_63(0x104, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(50)
OP_63(0x109, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(50)
OP_63(0x105, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
def lambda_2CC4():
TurnDirection(0xFE, 0x101, 500)
ExitThread()
QueueWorkItem(0x103, 1, lambda_2CC4)
Sleep(50)
def lambda_2CD4():
TurnDirection(0xFE, 0x101, 500)
ExitThread()
QueueWorkItem(0x104, 1, lambda_2CD4)
Sleep(50)
def lambda_2CE4():
TurnDirection(0xFE, 0x101, 500)
ExitThread()
QueueWorkItem(0x109, 1, lambda_2CE4)
Sleep(50)
def lambda_2CF4():
TurnDirection(0xFE, 0x101, 500)
ExitThread()
QueueWorkItem(0x105, 1, lambda_2CF4)
#C0172
ChrTalk(
0x103,
"#00205F……真的吗?\x02",
)
CloseMessageWindow()
#C0173
ChrTalk(
0x104,
(
"#00309F哈哈,你还是\x01",
"这么靠得住啊。\x02\x03",
"#00300F那就说说吧,到底是什么矛盾?\x02",
)
)
CloseMessageWindow()
#C0174
ChrTalk(
0x101,
(
"#00004F只要仔细回想一下我们昨天\x01",
"在酒店中与敏涅斯的对话,\x01",
"也就不难得出答案了。\x02\x03",
"#00000F敏涅斯随口说出的一句牢骚话……\x01",
"与手册中的内容存在着明显矛盾。\x02\x03",
"那正是敏涅斯\x01",
"并非『昆西公司董事』\x01",
"的证据……\x02",
)
)
CloseMessageWindow()
#C0175
ChrTalk(
0x109,
(
"#10105F这、这本手册中\x01",
"竟然有那么重要的线索……?\x02",
)
)
CloseMessageWindow()
#C0176
ChrTalk(
0x101,
"#00000F嗯,那句话就是——\x02",
)
CloseMessageWindow()
#C0177
ChrTalk(
0x105,
(
"#10304F等一下,\x01",
"暂时还是不要说出来了。\x02",
)
)
CloseMessageWindow()
def lambda_2EAF():
TurnDirection(0xFE, 0x105, 500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2EAF)
OP_63(0x101, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
Sleep(1000)
#C0178
ChrTalk(
0x101,
"#00005F哎……为什么呢?\x02",
)
CloseMessageWindow()
#C0179
ChrTalk(
0x105,
(
"#10300F呵呵,只有你一个人想到答案,\x01",
"未免让人不甘心。\x02\x03",
"#10309F所以,在揭穿敏涅斯\x01",
"之前暂时保密,就当作\x01",
"留给大家的作业如何?\x02",
)
)
CloseMessageWindow()
#C0180
ChrTalk(
0x101,
(
"#00006F那、那个……\x01",
"这又不是在做游戏……\x02",
)
)
CloseMessageWindow()
#C0181
ChrTalk(
0x103,
(
"#00203F不,我认为瓦吉先生\x01",
"说的很有道理。\x02",
)
)
CloseMessageWindow()
def lambda_2FCF():
TurnDirection(0xFE, 0x103, 500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2FCF)
Sleep(100)
#C0182
ChrTalk(
0x103,
(
"#00203F罗伊德前辈的想法\x01",
"也存在错误的可能性,\x01",
"如果现在就统一意见,多少有些危险。\x02\x03",
"#00211F而且,每次都\x01",
"被罗伊德前辈比下去,\x01",
"实在是让人有些不爽。\x02",
)
)
CloseMessageWindow()
OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
Sound(23, 0, 100, 0)
Sleep(1000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x20, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_314C")
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
FadeToDark(300, 0, 100)
OP_0D()
SetMessageWindowPos(-1, 30, -1, -1)
SetChrName("")
#A0183
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
scpstr(0x6),
scpstr(0x18),
"◆IBC事件(测试用)\x07\x00\x02",
)
)
Menu(
0,
-1,
-1,
0,
(
"【不做变更】\x01", # 0
"【已调查】\x01", # 1
"【未调查】\x01", # 2
)
)
MenuEnd(0x0)
OP_60(0x0)
OP_57(0x0)
SetMessageWindowPos(14, 280, 60, 3)
FadeToBright(300, 0)
OP_0D()
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(0, "loc_3137"),
(1, "loc_313C"),
(2, "loc_3144"),
(SWITCH_DEFAULT, "loc_314C"),
)
label("loc_3137")
Jump("loc_314C")
label("loc_313C")
SetScenarioFlags(0x177, 4)
Jump("loc_314C")
label("loc_3144")
ClearScenarioFlags(0x177, 4)
Jump("loc_314C")
label("loc_314C")
OP_29(0x87, 0x1, 0x3)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x177, 4)), scpexpr(EXPR_END)), "loc_327F")
#C0184
ChrTalk(
0x101,
(
"#00006F(后面那些才是她的真心话吧……)\x02\x03",
"#00001F我、我明白了。\x01",
"既然如此,这个问题\x01",
"就留给大家继续思考……\x02\x03",
"至于这本资料中的要点部分,\x01",
"最好记录在调查手册中。\x02\x03",
"#00003F好……我们已经收集到\x01",
"不少可以证明敏涅斯\x01",
"行事可疑的证据了。\x02\x03",
"#00000F先回哈罗德\x01",
"先生家吧。\x02",
)
)
CloseMessageWindow()
#C0185
ChrTalk(
0x102,
"#00100F嗯,走吧。\x02",
)
CloseMessageWindow()
OP_29(0x87, 0x1, 0x4)
Jump("loc_336B")
label("loc_327F")
#C0186
ChrTalk(
0x101,
(
"#00006F(后面那些才是她的真心话吧……)\x02\x03",
"#00001F我、我明白了。\x01",
"既然如此,这个问题\x01",
"就留给大家继续思考……\x02\x03",
"至于这本资料中的要点部分,\x01",
"最好记录在调查手册中。\x02\x03",
"#00003F……接下来还要去IBC调查,\x01",
"尽快行动吧。\x02",
)
)
CloseMessageWindow()
#C0187
ChrTalk(
0x102,
"#00100F嗯,明白了。\x02",
)
CloseMessageWindow()
label("loc_336B")
FadeToDark(1000, 0, -1)
OP_0D()
SetScenarioFlags(0x177, 5)
SetChrPos(0x0, -43000, 60, 40720, 90)
OP_69(0xFF, 0x0)
SetChrFlags(0x4, 0x80)
SetChrBattleFlags(0x4, 0x8000)
SetChrFlags(0x5, 0x80)
SetChrBattleFlags(0x5, 0x8000)
EventEnd(0x5)
label("loc_33A4")
Return()
# Function_9_2859 end
def Function_10_33A5(): pass
label("Function_10_33A5")
EventBegin(0x0)
Fade(500)
OP_68(-45700, 1560, 2610, 0)
MoveCamera(38, 32, 0, 0)
OP_6E(340, 0)
SetCameraDistance(23160, 0)
SetChrPos(0x101, -44780, 60, 2500, 0)
SetChrPos(0x102, -45960, 0, 2500, 0)
SetChrPos(0x103, -46730, 0, 1660, 0)
SetChrPos(0x104, -44030, 0, 1620, 0)
SetChrPos(0x105, -45860, 0, 980, 0)
SetChrPos(0x109, -44820, 0, 940, 0)
ClearChrFlags(0x4, 0x80)
ClearChrBattleFlags(0x4, 0x8000)
ClearChrFlags(0x5, 0x80)
ClearChrBattleFlags(0x5, 0x8000)
OP_4B(0x9, 0xFF)
OP_93(0x9, 0xB4, 0x0)
OP_0D()
#C0188
ChrTalk(
0x9,
(
"啊……\x01",
"艾莉大小姐,各位……\x02",
)
)
CloseMessageWindow()
#C0189
ChrTalk(
0x105,
(
"#10300F(职业女性选秀活动中的『女仆』……\x01",
" 邀请她来担当如何?)\x02",
)
)
CloseMessageWindow()
#C0190
ChrTalk(
0x101,
(
"#00003F(是啊……\x01",
" 这主意不错。)\x02\x03",
"#00000F(艾莉,\x01",
" 你去问问她可以吗?)\x02",
)
)
CloseMessageWindow()
#C0191
ChrTalk(
0x102,
(
"#00102F(明白了,\x01",
" 不过我觉得很难成功……)\x02\x03",
"#00100F那个,乔安娜,\x01",
"有件事情想请你帮忙……\x02",
)
)
CloseMessageWindow()
#C0192
ChrTalk(
0x9,
(
"好的……\x01",
"只要是艾莉大小姐的请求,\x01",
"无论要我做什么都可以。\x02",
)
)
CloseMessageWindow()
SetChrName("")
#A0193
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"艾莉邀请乔安娜参加\x01",
"慈善宴会中的职业女性选秀活动。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
OP_5A()
#C0194
ChrTalk(
0x9,
(
"……啊……\x01",
"选、选秀………………\x02",
)
)
CloseMessageWindow()
OP_63(0x9, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(2000)
OP_64(0x9)
OP_82(0x64, 0x0, 0xBB8, 0x12C)
#C0195
ChrTalk(
0x9,
"#4S……咦咦咦咦咦咦咦!?\x02",
)
CloseMessageWindow()
#C0196
ChrTalk(
0x103,
"#00205F……好像很吃惊呢。\x02",
)
CloseMessageWindow()
#C0197
ChrTalk(
0x9,
(
"唔、唔唔,我不行的……\x01",
"我怎么能参加什么职业女性选秀……\x02",
)
)
CloseMessageWindow()
#C0198
ChrTalk(
0x104,
(
"#00302F不不不,绝对没问题的,\x01",
"大哥哥我可以向你保证。\x02",
)
)
CloseMessageWindow()
#C0199
ChrTalk(
0x101,
"#00006F你拿什么保证啊……\x02",
)
CloseMessageWindow()
#C0200
ChrTalk(
0x102,
(
"#00105F那、那个,乔安娜,\x01",
"不用太介意哦。\x02\x03",
"#00103F我们再去找找,\x01",
"应该会有其他的\x01",
"女仆小姐愿意参加……\x02",
)
)
CloseMessageWindow()
OP_63(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
Sound(28, 0, 100, 0)
Sleep(1000)
#C0201
ChrTalk(
0x9,
"………………………………\x02",
)
CloseMessageWindow()
#C0202
ChrTalk(
0x9,
(
"……那个,我…………\x01",
"还是让我参加吧。\x02",
)
)
CloseMessageWindow()
#C0203
ChrTalk(
0x109,
(
"#10105F主、主意变得好快啊。\x01",
"这虽然再好不过,可是你……\x02",
)
)
CloseMessageWindow()
#C0204
ChrTalk(
0x9,
(
"因、因为我才是……\x01",
"……大小姐的女仆…………\x02",
)
)
CloseMessageWindow()
#C0205
ChrTalk(
0x102,
(
"#00102F呵呵,谢谢啦,乔安娜。\x01",
"不过没必要勉强自己哦。\x02",
)
)
CloseMessageWindow()
#C0206
ChrTalk(
0x9,
(
"……活动开始前请通知我吧,\x01",
"我会立刻赶过去的…………\x02",
)
)
CloseMessageWindow()
#C0207
ChrTalk(
0x101,
"#00000F嗯,拜托你了。\x02",
)
CloseMessageWindow()
OP_29(0x8F, 0x1, 0x3)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x199, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x199, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x199, 7)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_3956")
#C0208
ChrTalk(
0x101,
(
"#00003F好,我们总算\x01",
"把参选者找齐了。\x02\x03",
"#00000F这就去市民会馆,\x01",
"向洛依先生他们报告吧。\x02",
)
)
CloseMessageWindow()
OP_29(0x8F, 0x1, 0x5)
label("loc_3956")
SetScenarioFlags(0x199, 6)
OP_4C(0x9, 0xFF)
OP_93(0x9, 0x0, 0x0)
OP_69(0xFF, 0x0)
SetChrPos(0x0, -45350, 60, 2400, 180)
SetChrFlags(0x4, 0x80)
SetChrBattleFlags(0x4, 0x8000)
SetChrFlags(0x5, 0x80)
SetChrBattleFlags(0x5, 0x8000)
EventEnd(0x5)
Return()
# Function_10_33A5 end
def Function_11_3990(): pass
label("Function_11_3990")
FadeToDark(300, 0, 100)
OP_0D()
SetMessageWindowPos(-1, -1, -1, -1)
Sound(18, 0, 100, 0)
Sleep(300)
SetChrName("")
#A0209
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"#3S……本公司身为糕点制造业界的领头羊,\x01",
"为了糕点制造业的未来,始终在不断钻研。\x01",
"本手册将会为您展示\x01",
"本公司的部分方面。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
Sound(18, 0, 100, 0)
SetChrName("")
#A0210
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"#3S对于糕点而言,\x01",
"最重要的就是\x01",
"能否让食用者感到『美味』。\x01",
"为此,本公司在\x01",
"『提高糕点的品质』\x01",
"这一点上绝对不会妥协。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
Sound(18, 0, 100, 0)
SetChrName("")
#A0211
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"#3S糕点制造工厂中配备了最新型设备,\x01",
"卫生方面也采取了最完善的处理措施,\x01",
"这些基本条件自不必说。\x01",
"至于糕点原材料的品质与产地,\x01",
"本公司也有着严格的要求。\x01",
"此外,关于商品开发这一过程,\x01",
"本公司也制定了严谨的步骤与基准。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
Sound(18, 0, 100, 0)
SetChrName("")
#A0212
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"#3S董事要亲自试吃开发中的商品,\x01",
"以便判断其上市销售的可行性。\x01",
"之后,还要经过多次企划会议讨论,\x01",
"才会正式投入到生产线。\x01",
"这都是为了能给顾客献上\x01",
"最美味的糕点,给顾客最好的享受,\x01",
"而从公司初创时便一直继承下来的传统。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
Sound(18, 0, 100, 0)
SetChrName("")
#A0213
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"#3S昆西公司正是因为长期\x01",
"给顾客提供高品质的糕点,\x01",
"才能获得如今的成就……\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
Sound(18, 0, 100, 0)
FadeToBright(300, 0)
OP_5A()
SetMessageWindowPos(14, 280, 60, 3)
Return()
# Function_11_3990 end
SaveToFile()
Try(main)
|
[
"zj.yang@qq.com"
] |
zj.yang@qq.com
|
11f4876869aa2dad11abfebfef87a6527d005b2b
|
06fe16763861e556eeb71a8ae81ceff9b59581db
|
/autotools/optimizetask.py
|
8ed63647627df6f08deabde0bffd56f178897caf
|
[] |
no_license
|
hehaoslj/FSimulator
|
9961dc8fee41e1f7fb5862e127294fef0b39c85c
|
6f4b79c23f3265ea11b31b09f70c822d8704e8fb
|
refs/heads/master
| 2020-05-29T08:48:47.889577
| 2017-06-12T08:01:56
| 2017-06-12T08:01:56
| 69,776,028
| 2
| 0
| null | 2017-06-11T17:21:02
| 2016-10-02T03:03:29
|
C++
|
UTF-8
|
Python
| false
| false
| 918
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cffi
import os
import sys
ffibuilder = cffi.FFI()
ffibuilder.set_source("optimizetask", """
""")
init_code = """#!/usr/bin/env python
# -*- coding: utf-8 -*-
from optimizetask import ffi
"""
init_api=""
replace_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "base_config.py")
code_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "optimizehandle.py")
with open(code_file, "r") as f:
last_ln=""
for ln in f.readlines():
if ln[:4] =="def " and last_ln[:1] == "#":
init_code += "@ffi.def_extern()\n"
init_api += last_ln.replace("#", "")
init_code += ln
last_ln = ln
print init_api
ffibuilder.embedding_init_code(init_code)
ffibuilder.embedding_api(init_api)
ffibuilder.compile(target="liboptimizetask-1.0.*", verbose=True)
|
[
"hehaoslj@sina.com"
] |
hehaoslj@sina.com
|
ddff6a97ebf3aaffa32bcca7a455e9df667bac7b
|
1c7483b7f679d41850642101b77267c88e4d2252
|
/book/implement/lucky_strike.py
|
14a45e787e90b690e103b88880fac705577bbf54
|
[] |
no_license
|
julianne03/ready-coding-test
|
1864e460823f41f68c3c7491f397c80037a08919
|
27b0e66b4b69800fa41b96562dac5103fee3251f
|
refs/heads/master
| 2023-04-26T16:20:35.588176
| 2021-05-19T08:37:02
| 2021-05-19T08:37:02
| 358,656,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
numbers = input()
num_sum = 0
for i in range(len(numbers) // 2) :
num_sum += int(numbers[i])
for i in range(len(numbers) // 2, len(numbers)) :
num_sum -= int(numbers[i])
if num_sum == 0 :
print('LUCKY')
else :
print('READY')
|
[
""
] | |
e7d34241c79b9cfc264743c12b1575090c6fe6ab
|
3169075a364c2d713e5cae0566f81e02c0afe044
|
/lecture_length.py
|
a9ea39d7dbec6f949929d0c8159ed03fdcb8db94
|
[] |
no_license
|
dkok97/average_length_of_lectures
|
4145ea70c7656d9f3700e1472e7e5b6e4db6add8
|
4e7bc2676d377d52ff3b02c9da1a64e04f021860
|
refs/heads/master
| 2021-01-24T01:28:28.007216
| 2018-04-10T05:09:19
| 2018-04-10T05:09:19
| 122,808,507
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,894
|
py
|
import json
import time
import csv
from pprint import pprint
base = {
'major': {
'school': 'Engineering',
'NorthOrSouth': 'South',
'Fall': {
'upper': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0,
},
'lower': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0
},
},
'Winter': {
'upper': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0
},
'lower': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0
},
},
'Spring': {
'upper': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0
},
'lower': {
'avg_lecture_size': 0,
'avg_lecture_length_day': 0,
'avg_num_lectures_week': 0,
'avg_lecture_length_week': 0
}
}
}
}
output = {}
majors = {}
data = json.load(open('Spring2018.json'))
def get_length(from_time, to_time):
if (len(from_time)==1):
from_time.append('00'+from_time[0][-2:])
from_time[0]=from_time[0][:-2]
if (len(to_time)==1):
to_time.append('00'+to_time[0][-2:])
to_time[0]=to_time[0][:-2]
from_time.append(from_time[1][-2:])
to_time.append(to_time[1][-2:])
from_time[1]=int(from_time[1][:2])
to_time[1]=int(to_time[1][:2])
from_time[0]=int(from_time[0])
to_time[0]=int(to_time[0])
if (from_time[2]=='pm' and from_time[0]!=12):
from_time[0]+=12
if (to_time[2]=='pm' and to_time[0]!=12):
to_time[0]+=12
time_diff=(to_time[0]*60 + to_time[1])-(from_time[0]*60 + from_time[1])
return time_diff
def get_lecture_length(time_range):
days=[]
from_time=""
to_time=""
time_and_day=list(time_range)
for i in time_range:
if (i.isupper() and (i=='M' or i=='T' or i=='W' or i=='R' or i=='F')):
days.append(i)
time_and_day.remove(i)
if (i=='\n' or i.isdigit()):
break
for i in time_range:
if (i.isspace()):
time_and_day.remove(i)
temp = ''.join(time_and_day)
for i in range (0,len(temp)):
if (temp[i].isdigit()):
break
temp=temp[i:]
time_range_list=temp.split('-')
time_range_list=[time_range_list[0], time_range_list[1]]
for i in range(0,len(time_range_list[1])):
if (time_range_list[1][i]=='m'):
break
time_range_list[1]=time_range_list[1][0:i+1]
return get_length(time_range_list[0].split(':'), time_range_list[1].split(':')), len(days)
def get_lectures_disc(lec):
lec_and_disc=lec.split("|")
lec_and_disc = [str(lec_and_disc[x]) for x in range(len(lec_and_disc))]
while '*' in lec_and_disc:
lec_and_disc.remove('*')
return lec_and_disc
def getCourseNumber(course):
end = course.find("-") - 1
#edge case HIN-URD
if course[end] != ' ':
end = course.find("-", end + 5) - 1
start = course.rfind(" ", 0, end - 1)
val = course[start+1:end]
courseNum = ''.join(list(filter(lambda x: x.isdigit(), val)))
if(len(courseNum) == 0):
return -1
return int(courseNum)
def createJson():
for n,l in majors.items():
output[n] = base["major"]
i=0
total_lec_length_upper=0.0
total_lec_length_lower=0.0
total_number_of_days_upper=0
total_number_of_days_lower=0
no_of_lectures_upper=0.0
no_of_lectures_lower=0.0
while (i!=len(data)):
major_name = str(data[i]['fields']['subject'])
majors[major_name]=[]
curr_major=major_name
while (major_name==curr_major and i!=len(data)):
day_times=data[i]['fields']['day_times']
if (day_times==""):
i+=1
elif (day_times[0]=='M' or day_times[0]=='T' or day_times[0]=='W' or day_times[0]=='R' or day_times[0]=='F'):
course_num = getCourseNumber(data[i]["pk"].encode('utf-8'))
times = get_lectures_disc(day_times)
time_diff, no_of_days=get_lecture_length(times[0])
if (course_num >= 1 and course_num <= 99):
total_lec_length_lower+=time_diff
total_number_of_days_lower+=no_of_days
no_of_lectures_lower+=1
elif (course_num >= 100 and course_num <= 199):
total_lec_length_upper+=time_diff
total_number_of_days_upper+=no_of_days
no_of_lectures_upper+=1
i+=1
else:
i+=1
if i < len(data):
curr_major = str(data[i]['fields']['subject'])
for j in range(0,4):
majors[major_name].append(0)
if (no_of_lectures_lower!=0):
majors[major_name][0]=(total_lec_length_lower/no_of_lectures_lower)
majors[major_name][1]=(total_number_of_days_lower/no_of_lectures_lower)
if (no_of_lectures_upper!=0):
majors[major_name][2]=(total_lec_length_upper/no_of_lectures_upper)
majors[major_name][3]=(total_number_of_days_upper/no_of_lectures_upper)
total_lec_length_upper=0.0
total_lec_length_lower=0.0
total_number_of_days_upper=0
total_number_of_days_lower=0
no_of_lectures_upper=0.0
no_of_lectures_lower=0.0
with open('Spring2018-data.csv', 'wb') as csvfile:
lec_writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
lec_writer.writerow(['major','average lecture time (one day) - lowerdiv','average num of days a week - lowerdiv', 'average lecture time (one week) - lowerdiv', 'average lecture time (one day) - upperdiv','average num of days a week - upperdiv', 'average lecture time (one week) - upperdiv'])
lec_writer.writerow(['','','','','','',''])
for n,l in majors.items():
lec_writer.writerow([n,round(l[0],2), round(l[1],2), round(l[0]*l[1],2), round(l[2],2), round(l[3],2), round(l[2]*l[3],2)])
# with open('data.csv', 'wb') as csvfile:
# lec_writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
# #lec_writer.writerow(['major','average lecture time (one day)','average num of days a week', 'average lecture time (one week)'])
# #lec_writer.writerow(['','','', ''])
# lec_writer.writerow(['name','value'])
# for n,l in majors.items():
# lec_writer.writerow([n,int(round(l[0],2)/30)])
with open('data.json', 'w') as f:
createJson()
json.dump(output, f)
print len(output)
|
[
"dinkarkhattar@gmail.com"
] |
dinkarkhattar@gmail.com
|
91ef1503ce75661dbbe6b7d791eda966a31b1c1d
|
81eabe15995a6426b285b2312b73c0bde7bb61bc
|
/paleomix/tools/zonkey/common.py
|
81ad379b116d4e6692319c1a2c4afc9f055ff3ca
|
[] |
no_license
|
fvangef/paleomix
|
3a732d8cd99177809b25bd09dde6efd261b10cad
|
826fb866ae9c26cb7b49fc6a96fb618a3daaffcc
|
refs/heads/master
| 2020-04-15T22:05:02.249220
| 2018-11-05T19:56:49
| 2018-11-05T19:56:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import paleomix.yaml
import paleomix.common.versions as versions
# Format number for database file; is incremented when the format is changed.
# The 'revision' field specifies updates to the table that do not change the
# format of the database (see below).
_SUPPORTED_DB_FORMAT = 1
RSCRIPT_VERSION = versions.Requirement(call=("Rscript", "--version"),
search="version (\d+)\.(\d+)\.(\d+)",
checks=versions.GE(3, 0, 0),
priority=10)
class DBFileError(RuntimeError):
pass
def get_sample_names(handle):
samples = []
for readgroup in handle.header.get("RG", ()):
if "SM" in readgroup:
samples.append(readgroup["SM"])
return frozenset(samples)
def contig_name_to_plink_name(chrom):
"""Converts chromosome / contig name to the values expected by 'plink',
namely a digit or X/Y, or returns None if the chromosome could not be
identified.
"""
if chrom.isdigit():
return chrom.upper
elif chrom.upper() in "XY":
return chrom.upper()
elif chrom.lower().startswith("chr") and chrom[3:].isdigit():
return chrom[3:]
elif chrom.lower() in ("chrx", "chry"):
return chrom[3].upper()
else:
return None
def read_summary(filename, default="[MISSING VALUE!]"):
results = collections.defaultdict(lambda: default)
with open(filename) as makefile:
string = makefile.read()
data = paleomix.yaml.safe_load(string)
if not isinstance(data, dict):
raise DBFileError('Summary file does not contain dictionary')
results.update(data)
return results
|
[
"MikkelSch@gmail.com"
] |
MikkelSch@gmail.com
|
49e3ff0f5f62bc19306f05b2fd7a489cf70d2013
|
56552161d554899961141d3233315115e75dcc6e
|
/apps/account/models/__init__.py
|
72e52fd08a9003ee8b2ac95bd06b4854b4acc3f5
|
[
"MIT"
] |
permissive
|
kutera/pyerp
|
7029b04d3e0bf9761b63ce5cadc764f710abe9d9
|
bac9d8c2f88a95e6be6d1a08d74a248dd3b2e501
|
refs/heads/master
| 2020-09-02T20:00:27.070049
| 2019-10-30T14:15:33
| 2019-10-30T14:15:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
"""Account Models
"""
from .move import PyAccountMove, PyAccountMoveDetail
from .plan import PyAccountPlan
from .invoice import PyInvoice, PyInvoiceDetail
from .journal import PyJournal
|
[
"gvizquel@gmail.com"
] |
gvizquel@gmail.com
|
02bcb1015f4ae20df9471e74fcbe8a96a2da1508
|
c20e1f67bb879ae1c3bcaaca45491e26208e921d
|
/tests/test_model/test_task.py
|
bd261b3287ce78fbddfa65a6ab58f7917882e71c
|
[] |
no_license
|
lesunb/Knapsack-Planning-MPERS
|
457f97ad062c6dac4d5fec109444849276d69b8f
|
62e4543fd18219dcaf9ee5efff69ca11d659abfc
|
refs/heads/master
| 2023-08-30T17:11:06.525671
| 2021-10-26T01:50:15
| 2021-10-26T01:50:15
| 359,919,416
| 0
| 0
| null | 2021-05-05T14:58:56
| 2021-04-20T18:49:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,057
|
py
|
from planning.common.model.task import Task
from planning.common.model.context import Context
from planning.common.model.decomposition import Decomposition
from planning.common.model.comparison import Comparison
from planning.common.model.quality_constraint import QualityConstraint
from tests.test_data.mpers_metric import MpersMetrics
from tests.test_data.mpers_model import MpersModel
from planning.common.model.pragmatic import Pragmatic
from planning.algorithm.pragmatic.pragmatic_planning import PragmaticPlanning
import pytest
@pytest.fixture
def mpers():
mpers = MpersModel()
return mpers
def test_shouldProvideCorrectValueForMetric():
task = Task("T1")
currentContext = Context("C1")
fullContext = []
fullContext.append(currentContext)
task.setProvidedQuality(currentContext, MpersMetrics.METERS, 30)
assert 30 == PragmaticPlanning().myProvidedQuality(task, MpersMetrics.METERS, fullContext)
def text_shouldProvideMetricForBaseline():
task = Task("t1")
current = Context("C1")
fullContext = []
fullContext.append(current)
task.setProvidedQuality(None, MpersMetrics.METERS, 30.0)
assert 30.0 == PragmaticPlanning().myProvidedQuality(task, MpersMetrics.METERS, fullContext)
def metricNotFound():
task = Task("T1")
currentContext = Context("C1")
fullContext = []
fullContext.append(currentContext)
task.setProvidedQuality(currentContext, MpersMetrics.METERS, 30.0)
result = PragmaticPlanning().myProvidedQuality(task, MpersMetrics.SECONDS, fullContext)
assert result is None
def test_OnlyBaselineDefined():
task = Task("T1")
baseline = Context(None)
fullContext = []
fullContext.append(baseline)
task.setProvidedQuality(baseline, MpersMetrics.METERS, 50.0)
assert 50.0 == PragmaticPlanning().myProvidedQuality(task, MpersMetrics.METERS, fullContext)
def test_shouldProvideSpecificContextMetric():
task = Task("T2")
currentContext = Context("C1")
baseline = None
fullContext = []
fullContext.append(currentContext)
fullContext.append(baseline)
task.setProvidedQuality(currentContext, MpersMetrics.METERS, 50)
task.setProvidedQuality(baseline, MpersMetrics.METERS, 30)
assert 50 == PragmaticPlanning().myProvidedQuality(task, MpersMetrics.METERS, fullContext)
def test_abidesByInterpretation_passing_baseline(mpers):
isNotifiedAboutEmergencyGoal = mpers.goals.isNotifiedAboutEmergencyGoal
notifyByMobileVibrationTask = mpers.tasks.notifyByMobileVibrationTask
c1 = mpers.contexts.c1
c9 = mpers.contexts.c9
context = [c1]
result = PragmaticPlanning().abidesByInterpretation(notifyByMobileVibrationTask,
isNotifiedAboutEmergencyGoal.interp, context)
assert result == True
context = [c9]
result = PragmaticPlanning().abidesByInterpretation(notifyByMobileVibrationTask,
isNotifiedAboutEmergencyGoal.interp, context)
assert result == True
def test_abidesByInterpretation_not_passing_baseline(mpers):
isNotifiedAboutEmergencyGoal = mpers.goals.isNotifiedAboutEmergencyGoal
notifyBySoundAlertTask = mpers.tasks.notifyBySoundAlertTask
c6 = mpers.contexts.c6
c1 = mpers.contexts.c1
context = [c6]
result = PragmaticPlanning().abidesByInterpretation(notifyBySoundAlertTask,
isNotifiedAboutEmergencyGoal.interp, context)
assert result == True
context = [c1]
result = PragmaticPlanning().abidesByInterpretation(notifyBySoundAlertTask,
isNotifiedAboutEmergencyGoal.interp, context)
assert result == False
def test_abidesByInterpretation_only_baseline(mpers):
considerLastKnownLocationTask = mpers.tasks.considerLastKnownLocationTask
locationIsIdentifiedGoal = mpers.goals.locationIsIdentifiedGoal
context = []
result = PragmaticPlanning().abidesByInterpretation(considerLastKnownLocationTask,
locationIsIdentifiedGoal.interp, context)
assert result == True
def test_abidesByInterpretation_only_baseline_context(mpers):
considerLastKnownLocationTask = mpers.tasks.considerLastKnownLocationTask
locationIsIdentifiedGoal = mpers.goals.locationIsIdentifiedGoal
c1 = mpers.contexts.c1
c2 = mpers.contexts.c2
c3 = mpers.contexts.c3
context = [c1, c2, c3]
result = PragmaticPlanning().abidesByInterpretation(considerLastKnownLocationTask,
locationIsIdentifiedGoal.interp, context)
assert result == True
def test_abidesByInterpretation_context_not_passing(mpers):
identifyLocationByVoiceCallTask = mpers.tasks.identifyLocationByVoiceCallTask
locationIsIdentifiedGoal = mpers.goals.locationIsIdentifiedGoal
c5 = mpers.contexts.c5
context = []
result = PragmaticPlanning().abidesByInterpretation(identifyLocationByVoiceCallTask,
locationIsIdentifiedGoal.interp, context)
assert result == True
context.append(c5)
result = PragmaticPlanning().abidesByInterpretation(identifyLocationByVoiceCallTask,
locationIsIdentifiedGoal.interp, context)
assert result == False
def test_abidesByInterpretation_only_baseline_not_passing(mpers):
locationIsIdentifiedGoal = mpers.goals.locationIsIdentifiedGoal
context = []
LongSecondsTask = Task("LongSecondsTask")
LongSecondsTask.setProvidedQuality(
None, MpersMetrics.SECONDS, 1500)
result = PragmaticPlanning().abidesByInterpretation(LongSecondsTask,
locationIsIdentifiedGoal.interp, context)
assert result == False
def test_myQualityBaseline(mpers):
accessLocationFromTriangulationTask = mpers.tasks.accessLocationFromTriangulationTask
c2 = mpers.contexts.c2
c11 = mpers.contexts.c11
context = [c2]
result = PragmaticPlanning().myProvidedQuality(accessLocationFromTriangulationTask,
MpersMetrics.DISTANCE_ERROR, context)
assert result == 40
context.append(c11)
result = PragmaticPlanning().myProvidedQuality(accessLocationFromTriangulationTask,
MpersMetrics.DISTANCE_ERROR, context)
assert result == 400
|
[
"biachiarelli@gmail.com"
] |
biachiarelli@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.