sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _update_frames(self, written):
"""Update self.frames after writing."""
if self.seekable():
curr = self.tell()
self._info.frames = self.seek(0, SEEK_END)
self.seek(curr, SEEK_SET)
else:
self._info.frames += written | Update self.frames after writing. | entailment |
def _prepare_read(self, start, stop, frames):
"""Seek to start frame and calculate length."""
if start != 0 and not self.seekable():
raise ValueError("start is only allowed for seekable files")
if frames >= 0 and stop is not None:
raise TypeError("Only one of {frames, stop} may be used")
start, stop, _ = slice(start, stop).indices(self.frames)
if stop < start:
stop = start
if frames < 0:
frames = stop - start
if self.seekable():
self.seek(start, SEEK_SET)
return frames | Seek to start frame and calculate length. | entailment |
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise | Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. | entailment |
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed | Generates boolean matrices indicating which are the probes for each model | entailment |
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores | Extracts a matrix of scores for a model, given a probes_used row vector of boolean | entailment |
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data | Read audio file | entailment |
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset | Applies a unit mean and variance normalization to an arrayset | entailment |
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels | Applies a smoothing on VAD | entailment |
def _conversion(self, input_signal, vad_file):
"""
Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes.
"""
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self.use_existing_vad(energy_array, vad_file)
return labels | Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes. | entailment |
def mod_4hz(self, rate_wavsample):
"""Computes and returns the 4Hz modulation energy features for the given input wave file"""
# Set parameters
wl = self.win_length_ms
ws = self.win_shift_ms
nf = self.n_filters
f_min = self.f_min
f_max = self.f_max
pre = self.pre_emphasis_coef
c = bob.ap.Spectrogram(rate_wavsample[0], wl, ws, nf, f_min, f_max, pre)
c.energy_filter=True
c.log_filter=False
c.energy_bands=True
sig = rate_wavsample[1]
energy_bands = c(sig)
filtering_res = self.pass_band_filtering(energy_bands, rate_wavsample[0])
mod_4hz = self.modulation_4hz(filtering_res, rate_wavsample)
mod_4hz = self.averaging(mod_4hz)
e = bob.ap.Energy(rate_wavsample[0], wl, ws)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array, mod_4hz)
labels = utils.smoothing(labels,self.smoothing_window) # discard isolated speech less than 100ms
logger.info("After Mod-4Hz based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels, energy_array, mod_4hz | Computes and returns the 4Hz modulation energy features for the given input wave file | entailment |
def read_matlab_files(self, biofile, directory, extension):
"""
Read pre-computed CQCC Matlab features here
"""
import bob.io.matlab
# return the numpy array read from the data_file
data_path = biofile.make_path(directory, extension)
return bob.io.base.load(data_path) | Read pre-computed CQCC Matlab features here | entailment |
def write_data(self, data, data_file, compression=0):
"""Writes the given *preprocessed* data to a file with the given name.
"""
f = bob.io.base.HDF5File(data_file, 'w')
f.set("rate", data[0], compression=compression)
f.set("data", data[1], compression=compression)
f.set("labels", data[2], compression=compression) | Writes the given *preprocessed* data to a file with the given name. | entailment |
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args | Defines the command line parameters that are accepted. | entailment |
def _compute_energy(self, rate_wavsample):
"""retreive the speech / non speech labels for the speech sample given by the tuple (rate, wave signal)"""
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array)
# discard isolated speech a number of frames defined in smoothing_window
labels = utils.smoothing(labels,self.smoothing_window)
logger.info("After 2 Gaussian Energy-based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels | retreive the speech / non speech labels for the speech sample given by the tuple (rate, wave signal) | entailment |
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0) | Calculates the mean of the data. | entailment |
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0) | Calculates the variance of the data. | entailment |
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std | Calculates both the mean of the data. | entailment |
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
""" Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features | Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied) | entailment |
def solve_sweep_structure(
self,
structures,
sweep_param_list,
filename="structure_n_effs.dat",
plot=True,
x_label="Structure number",
fraction_mode_list=[],
):
"""
Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure.
"""
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, sweep_param_list
)
with open(self._modes_directory + "mode_types.dat", "w") as fs:
header = ",".join(
"Mode%i" % i for i, _ in enumerate(mode_types[0])
)
fs.write("# " + header + "\n")
for mt in mode_types:
txt = ",".join("%s %.2f" % pair for pair in mt)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_te.dat", "w") as fs:
header = "fraction te"
fs.write("# param sweep," + header + "\n")
for param, fte in zip(sweep_param_list, fractions_te):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in fte)
fs.write(txt + "\n")
with open(self._modes_directory + "fraction_tm.dat", "w") as fs:
header = "fraction tm"
fs.write("# param sweep," + header + "\n")
for param, ftm in zip(sweep_param_list, fractions_tm):
txt = "%.6f," % param
txt += ",".join("%.2f" % f for f in ftm)
fs.write(txt + "\n")
if plot:
if MPL:
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename, self._modes_directory + "fraction_te.dat", x_label, y_label, title
)
title = "TE Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_te.dat",
x_label,
"TE Fraction [%]",
title,
fraction_mode_list,
)
title = "TM Fraction vs %s" % x_label
self._plot_fraction(
self._modes_directory + "fraction_tm.dat",
x_label,
"TM Fraction [%]",
title,
fraction_mode_list,
)
return n_effs | Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure. | entailment |
def solve_sweep_wavelength(
self,
structure,
wavelengths,
filename="wavelength_n_effs.dat",
plot=True,
):
"""
Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength.
"""
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(
n_effs, self._modes_directory + filename, wavelengths
)
if plot:
if MPL:
title = "$n_{eff}$ vs Wavelength"
y_label = "$n_{eff}$"
else:
title = "n_{effs} vs Wavelength" % x_label
y_label = "n_{eff}"
self._plot_n_effs(
self._modes_directory + filename,
self._modes_directory + "fraction_te.dat",
"Wavelength",
"n_{eff}",
title,
)
return n_effs | Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength. | entailment |
def solve_ng(self, structure, wavelength_step=0.01, filename="ng.dat"):
r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode.
"""
wl_nom = structure._wl
self.solve(structure)
n_ctrs = self.n_effs
structure.change_wavelength(wl_nom - wavelength_step)
self.solve(structure)
n_bcks = self.n_effs
structure.change_wavelength(wl_nom + wavelength_step)
self.solve(structure)
n_frws = self.n_effs
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(
n_ctr - wl_nom * (n_frw - n_bck) / (2 * wavelength_step)
)
if filename:
with open(self._modes_directory + filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs | r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'ng.dat'.
Returns:
list: A list of the group indices found for each mode. | entailment |
def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True):
"""
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
"""
modes_directory = "./modes_semi_vec/"
if not os.path.isdir(modes_directory):
os.mkdir(modes_directory)
filename = modes_directory + filename
for i, mode in enumerate(self._ms.modes):
filename_mode = self._get_mode_filename(
self._semi_vectorial_method, i, filename
)
self._write_mode_to_file(np.real(mode), filename_mode)
if plot:
if i == 0 and analyse:
A, centre, sigma_2 = anal.fit_gaussian(
self._structure.xc, self._structure.yc, np.abs(mode)
)
subtitle = (
"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, "
"MFD_{y} = %.3f"
) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1])
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
subtitle,
sigma_2[0],
sigma_2[1],
centre[0],
centre[1],
wavelength=self._structure._wl,
)
else:
self._plot_mode(
self._semi_vectorial_method,
i,
filename_mode,
self.n_effs[i],
wavelength=self._structure._wl,
)
return self.modes | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each mode number. Default is 'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
analyse (bool): `True` if an analysis on the fundamental
mode should be performed. The analysis adds to the
plot of the fundamental mode the power mode-field
diameter (MFD) and marks it on the output, and it
marks with a cross the maximum E-field value.
Default is `True`.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | entailment |
def write_modes_to_file(
self,
filename="mode.dat",
plot=True,
fields_to_write=("Ex", "Ey", "Ez", "Hx", "Hy", "Hz"),
):
"""
Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each field and mode number. Default is
'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
fields_to_write (tuple): A tuple of strings where the
strings can be 'Ex', 'Ey', 'Ez', 'Hx', 'Hy' and 'Hz'
defining what part of the mode should be saved and
plotted. By default, all six components are written
and plotted.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for).
"""
modes_directory = self._modes_directory
# Mode info file.
with open(modes_directory + "mode_info", "w") as fs:
fs.write("# Mode idx, Mode type, % in major direction, n_eff\n")
for i, (n_eff, (mode_type, percentage)) in enumerate(
zip(self.n_effs, self.mode_types)
):
mode_idx = str(i)
line = "%s,%s,%.2f,%.3f" % (
mode_idx,
mode_type,
percentage,
n_eff.real,
)
fs.write(line + "\n")
# Mode field plots.
for i, (mode, areas) in enumerate(zip(self._ms.modes, self.overlaps)):
mode_directory = "%smode_%i/" % (modes_directory, i)
if not os.path.isdir(mode_directory):
os.mkdir(mode_directory)
filename_full = mode_directory + filename
for (field_name, field_profile), area in zip(
mode.fields.items(), areas
):
if field_name in fields_to_write:
filename_mode = self._get_mode_filename(
field_name, i, filename_full
)
self._write_mode_to_file(
np.real(field_profile), filename_mode
)
if plot:
self._plot_mode(
field_name,
i,
filename_mode,
self.n_effs[i],
area=area,
wavelength=self._structure._wl,
)
return self.modes | Writes the mode fields to a file and optionally plots them.
Args:
filename (str): The nominal filename to use for the saved
data. The suffix will be automatically be changed to
identifiy each field and mode number. Default is
'mode.dat'
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
fields_to_write (tuple): A tuple of strings where the
strings can be 'Ex', 'Ey', 'Ez', 'Hx', 'Hy' and 'Hz'
defining what part of the mode should be saved and
plotted. By default, all six components are written
and plotted.
Returns:
dict: A dictionary containing the effective indices
and mode field profiles (if solved for). | entailment |
def set_required(field, render_kw=None, force=False):
"""
Returns *render_kw* with *required* set if the field is required.
Sets the *required* key if the `required` flag is set for the field (this
is mostly the case if it is set by validators). The `required` attribute
is used by browsers to indicate a required field.
..note::
This won't change keys already present unless *force* is used.
"""
if render_kw is None:
render_kw = {}
if 'required' in render_kw and not force:
return render_kw
if field.flags.required:
render_kw['required'] = True
return render_kw | Returns *render_kw* with *required* set if the field is required.
Sets the *required* key if the `required` flag is set for the field (this
is mostly the case if it is set by validators). The `required` attribute
is used by browsers to indicate a required field.
..note::
This won't change keys already present unless *force* is used. | entailment |
def set_invalid(field, render_kw=None):
"""
Returns *render_kw* with `invalid` added to *class* on validation errors.
Set (or appends) 'invalid' to the fields CSS class(es), if the *field* got
any errors. 'invalid' is also set by browsers if they detect errors on a
field.
"""
if render_kw is None:
render_kw = {}
if field.errors:
classes = render_kw.get('class') or render_kw.pop('class_', '')
if classes:
render_kw['class'] = 'invalid {}'.format(classes)
else:
render_kw['class'] = 'invalid'
return render_kw | Returns *render_kw* with `invalid` added to *class* on validation errors.
Set (or appends) 'invalid' to the fields CSS class(es), if the *field* got
any errors. 'invalid' is also set by browsers if they detect errors on a
field. | entailment |
def set_minmax(field, render_kw=None, force=False):
"""
Returns *render_kw* with *min* and *max* set if validators use them.
Sets *min* and / or *max* keys if a `Length` or `NumberRange` validator is
using them.
..note::
This won't change keys already present unless *force* is used.
"""
if render_kw is None:
render_kw = {}
for validator in field.validators:
if isinstance(validator, MINMAX_VALIDATORS):
if 'min' not in render_kw or force:
v_min = getattr(validator, 'min', -1)
if v_min not in (-1, None):
render_kw['min'] = v_min
if 'max' not in render_kw or force:
v_max = getattr(validator, 'max', -1)
if v_max not in (-1, None):
render_kw['max'] = v_max
return render_kw | Returns *render_kw* with *min* and *max* set if validators use them.
Sets *min* and / or *max* keys if a `Length` or `NumberRange` validator is
using them.
..note::
This won't change keys already present unless *force* is used. | entailment |
def set_title(field, render_kw=None):
"""
Returns *render_kw* with *min* and *max* set if required.
If the field got a *description* but no *title* key is set, the *title* is
set to *description*.
"""
if render_kw is None:
render_kw = {}
if 'title' not in render_kw and getattr(field, 'description'):
render_kw['title'] = '{}'.format(field.description)
return render_kw | Returns *render_kw* with *min* and *max* set if required.
If the field got a *description* but no *title* key is set, the *title* is
set to *description*. | entailment |
def get_html5_kwargs(field, render_kw=None, force=False):
"""
Returns a copy of *render_kw* with keys added for a bound *field*.
If some *render_kw* are given, the new keys are added to a copy of them,
which is then returned. If none are given, a dictionary containing only
the automatically generated keys is returned.
.. important::
This might add new keys but won't changes any values if a key is
already in *render_kw*, unless *force* is used.
Raises:
ValueError: if *field* is an :cls:`UnboundField`.
The following keys are set automatically:
:required:
Sets the *required* key if the `required` flag is set for the
field (this is mostly the case if it is set by validators). The
`required` attribute is used by browsers to indicate a required field.
:invalid:
Set (or appends) 'invalid' to the fields CSS class(es), if the *field*
got any errors. 'invalid' is also set by browsers if they detect
errors on a field.
:min / max:
Sets *min* and / or *max* keys if a `Length` or `NumberRange`
validator is using them.
:title:
If the field got a *description* but no *title* key is set, the
*title* is set to *description*.
"""
if isinstance(field, UnboundField):
msg = 'This function needs a bound field not: {}'
raise ValueError(msg.format(field))
kwargs = render_kw.copy() if render_kw else {}
kwargs = set_required(field, kwargs, force) # is field required?
kwargs = set_invalid(field, kwargs) # is field invalid?
kwargs = set_minmax(field, kwargs, force) # check validators for min/max
kwargs = set_title(field, kwargs) # missing tile?
return kwargs | Returns a copy of *render_kw* with keys added for a bound *field*.
If some *render_kw* are given, the new keys are added to a copy of them,
which is then returned. If none are given, a dictionary containing only
the automatically generated keys is returned.
.. important::
This might add new keys but won't changes any values if a key is
already in *render_kw*, unless *force* is used.
Raises:
ValueError: if *field* is an :cls:`UnboundField`.
The following keys are set automatically:
:required:
Sets the *required* key if the `required` flag is set for the
field (this is mostly the case if it is set by validators). The
`required` attribute is used by browsers to indicate a required field.
:invalid:
Set (or appends) 'invalid' to the fields CSS class(es), if the *field*
got any errors. 'invalid' is also set by browsers if they detect
errors on a field.
:min / max:
Sets *min* and / or *max* keys if a `Length` or `NumberRange`
validator is using them.
:title:
If the field got a *description* but no *title* key is set, the
*title* is set to *description*. | entailment |
def render_field(self, field, render_kw):
"""
Returns the rendered field after adding auto–attributes.
Calls the field`s widget with the following kwargs:
1. the *render_kw* set on the field are used as based
2. and are updated with the *render_kw* arguments from the render call
3. this is used as an argument for a call to `get_html5_kwargs`
4. the return value of the call is used as final *render_kw*
"""
field_kw = getattr(field, 'render_kw', None)
if field_kw is not None:
render_kw = dict(field_kw, **render_kw)
render_kw = get_html5_kwargs(field, render_kw)
return field.widget(field, **render_kw) | Returns the rendered field after adding auto–attributes.
Calls the field`s widget with the following kwargs:
1. the *render_kw* set on the field are used as based
2. and are updated with the *render_kw* arguments from the render call
3. this is used as an argument for a call to `get_html5_kwargs`
4. the return value of the call is used as final *render_kw* | entailment |
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x | np.array: The grid points in x. | entailment |
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y | np.array: The grid points in y. | entailment |
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp | function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary. | entailment |
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n) | function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary. | entailment |
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n | A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls. | entailment |
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args) | Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`. | entailment |
def add_slab(self, height, n_background=1., position='top'):
'''
Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab.
'''
assert position in ('top', 'bottom')
name = str(self.slab_count)
if not callable(n_background):
n_back = lambda wl: n_background
else:
n_back = n_background
height_discretised = self.y_step*((height // self.y_step) + 1)
y_min = self._next_start
y_max = y_min + height_discretised
self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,
y_max, self.x_min, y_min, n_back, self._wl)
self.y_max = y_max
self._next_start = y_min + height_discretised
self.slab_count += 1
if position == 'bottom':
slabs = {}
for k in self.slabs.keys():
slabs[str(int(k)+1)] = self.slabs[k]
slabs['0'] = slabs.pop(str(self.slab_count))
self.slabs = slabs
return name | Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab. | entailment |
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | entailment |
def n(self):
'''
np.array: The refractive index profile matrix
of the current slab.
'''
try:
n_mat = self.slabs['0'].n
for s in range(1, self.slab_count):
n_mat = np.vstack((self.slabs[str(s)].n, n_mat))
except KeyError:
n_mat = None
return n_mat | np.array: The refractive index profile matrix
of the current slab. | entailment |
def add_material(self, x_min, x_max, n, angle=0):
'''
Add a refractive index between two x-points.
Args:
x_min (float): The start x-point.
x_max (float): The stop x-point.
n (float, function): Refractive index between
`x_min` and `x_max`. Either a constant (`float`), or
a function that accepts one parameters, the
wavelength, and returns a float of the refractive
index. This is useful when doing wavelength
sweeps and solving for the group velocity. The
function provided could be a Sellmeier equation.
angle (float): Angle in degrees of the slope of the
sidewalls at `x_min` and `x_max`. This is useful
for defining a ridge with angled sidewalls.
'''
self._mat_params.append([x_min, x_max, n, angle])
if not callable(n):
n_mat = lambda wl: n
else:
n_mat = n
Structure._add_material(self, x_min, self.y_min, x_max, self.y_max, n_mat(self._wl), angle)
return self.n | Add a refractive index between two x-points.
Args:
x_min (float): The start x-point.
x_max (float): The stop x-point.
n (float, function): Refractive index between
`x_min` and `x_max`. Either a constant (`float`), or
a function that accepts one parameters, the
wavelength, and returns a float of the refractive
index. This is useful when doing wavelength
sweeps and solving for the group velocity. The
function provided could be a Sellmeier equation.
angle (float): Angle in degrees of the slope of the
sidewalls at `x_min` and `x_max`. This is useful
for defining a ridge with angled sidewalls. | entailment |
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
dir_plot = 'material_index/'
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
for axis, name in zip(self.axes, self.axes_str):
root, ext = os.path.splitext(filename)
fn = dir_plot + root + '_'+ name + ext
with open(fn, 'w') as fs:
for n_row in np.abs(axis.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(fn)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile: %s' % name,
'x_pts': self.xx.x_pts,
'y_pts': self.xx.y_pts,
'x_min': self.xx.x_min,
'x_max': self.xx.x_max,
'y_min': self.xx.y_min,
'y_max': self.xx.y_max,
'filename_data': fn,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args, silent=False) | Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`. | entailment |
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for axis in self.axes:
if issubclass(type(axis), Slabs):
axis.change_wavelength(wavelength)
self.xx, self.xy, self.yx, self.yy, self.zz = self.axes
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | entailment |
def covstr(s):
""" convert string to int or float. """
try:
ret = int(s)
except ValueError:
ret = float(s)
return ret | convert string to int or float. | entailment |
def output(self):
#re = "{%(time)s} %(name)s %(stock_no)s %(c)s %(range)+.2f(%(pp)+.2f%%) %(value)s" % {
'''
re = """<table>
<tr><td>%(name)s</td><td>%(c)s</td><td>%(range)+.2f(%(pp)+.2f%%)</td></tr>
<tr><td>%(stock_no)s</td><td>%(value)s</td><td>%(time)s</td></tr></table>""" % {
'''
if covstr(self.g['range']) > 0:
css = "red"
elif covstr(self.g['range']) < 0:
css = "green"
else:
css = "gray"
re = {
'name': self.g['name'],
'stock_no': self.g['no'],
'time': self.g['time'],
'open': self.g['open'],
'h': self.g['h'],
'l': self.g['l'],
'c': self.g['c'],
'max': self.g['max'],
'min': self.g['min'],
'range': covstr(self.g['range']),
'ranges': self.g['ranges'],
'value': self.g['value'],
'pvalue': self.g['pvalue'],
'pp': covstr(self.g['pp']),
'top5buy': self.g['top5buy'],
'top5sell': self.g['top5sell'],
'crosspic': self.g['crosspic'],
'css': css
}
return re | re = """<table>
<tr><td>%(name)s</td><td>%(c)s</td><td>%(range)+.2f(%(pp)+.2f%%)</td></tr>
<tr><td>%(stock_no)s</td><td>%(value)s</td><td>%(time)s</td></tr></table>""" % { | entailment |
def Rt_display(stock_no):
""" For real time stock display
即時盤用,顯示目前查詢各股的股價資訊。
"""
a = twsk(stock_no).real
if a:
re = "{%(time)s} %(stock_no)s %(c)s %(range)+.2f(%(pp)+.2f%%) %(value)s" % {
'stock_no': stock_no,
'time': a['time'],
'c': a['c'],
'range': covstr(a['range']),
'value': a['value'],
'pp': covstr(a['pp'])
}
return re
else:
return a | For real time stock display
即時盤用,顯示目前查詢各股的股價資訊。 | entailment |
def ckinv(self,oo):
""" check the value is date or not
檢查是否為日期格式
"""
pattern = re.compile(r"[0-9]{2}/[0-9]{2}/[0-9]{2}")
b = re.search(pattern, oo[0])
try:
b.group()
return True
except:
return False | check the value is date or not
檢查是否為日期格式 | entailment |
def high_or_low(self,one,two,rev=0):
""" Return ↑↓- for high, low or equal.
回傳漲跌標示
rev = 0
回傳 ↑↓-
rev = 1
回傳 1 -1 0
"""
if rev == 0:
if one > two:
re = '↑'.decode('utf-8')
elif one < two:
re = '↓'.decode('utf-8')
else:
re = '-'.decode('utf-8')
else:
if one > two:
re = 1
elif one < two:
re = -1
else:
re = 0
return re | Return ↑↓- for high, low or equal.
回傳漲跌標示
rev = 0
回傳 ↑↓-
rev = 1
回傳 1 -1 0 | entailment |
def goback(self,days = 1):
""" Go back days
刪除最新天數資料數據
days 代表刪除多少天數(倒退幾天)
"""
for i in xrange(days):
self.raw_data.pop()
self.data_date.pop()
self.stock_range.pop()
self.stock_vol.pop()
self.stock_open.pop()
self.stock_h.pop()
self.stock_l.pop() | Go back days
刪除最新天數資料數據
days 代表刪除多少天數(倒退幾天) | entailment |
def fetch_data(self, stock_no, nowdatetime, firsttime = 1):
""" Fetch data from twse.com.tw
return list.
從 twse.com.tw 下載資料,回傳格式為 list
"""
url = 'http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/STOCK_DAY_print.php?genpage=genpage/Report%(year)d%(mon)02d/%(year)d%(mon)02d_F3_1_8_%(stock)s.php&type=csv&r=%(rand)s' % {'year': nowdatetime.year, 'mon': nowdatetime.month, 'stock': stock_no, 'rand': random.randrange(1,1000000)}
self.debug_print(url)
logging.info(url)
#print cc.info().headers
# set memcache expire
now = TWTime().now
if now >= datetime(now.year, now.month, now.day, 14, 45):
addday = 1
else:
addday = 0
endtime = datetime(now.year, now.month, now.day, 14, 00) + timedelta(days = addday) ## change from 13:35 to 14:00
logging.info('endtime: %s' % str(endtime))
if firsttime == 0:
if endtime <= now:
expire = 'ALUP' ## always update.
else:
expire = (endtime - now).seconds
else:
expire = 0 ## never expire.
logging.info('expire: %s' % expire)
## get memcache
memname = '%(stock)s%(year)d%(mon)02d' % {'year': nowdatetime.year, 'mon': nowdatetime.month,'stock': stock_no}
stkm = memcache.get(memname)
if stkm:
csv_read = csv.reader(stkm)
logging.info('#MemcacheGet: %s' % memname)
else:
cc = urllib2.urlopen(url)
cc_read = cc.readlines()
csv_read = csv.reader(cc_read)
if expire != 'ALUP':
memcache.add(memname, cc_read, expire)
else:
memcache.delete(memname)
memcache.add('time%s' % memname, '%s %s' % (now, expire))
logging.info('#MemcacheAdd: %s' % memname)
return csv_read | Fetch data from twse.com.tw
return list.
從 twse.com.tw 下載資料,回傳格式為 list | entailment |
def list_data(self, csv_read):
""" 將資料 list 化
return dictionary:
[stock_price]: Closing price (list)
收盤價格
[stock_name]: Stock name (str) and encode form big5 to utf-8
該股名稱,big5 → UTF-8
[data_date]: Stock date (list)
數據日期資訊
[stock_range]: Stock range price (list)
該鼓漲跌價格
[stock_vol]: Stock Volue (list)
成交量
[stock_open]: Stock open price (list)
開盤價
[stock_h]: Stock high price (list)
最高價
[stock_l]: Stock low price (list)
最低價
"""
getr = []
getdate = []
getrange = []
getvol = []
getopen = []
geth = []
getl = []
otherinfo = []
fetch_data_raw = 1
for i in csv_read:
if self.ckinv(i):
#if len(i) > 1:
self.debug_print(i)
getr.append(self.covstr(i[6]))
getdate.append(i[0].replace(' ',''))
getrange.append(i[-2])
getvol.append(int(i[1].replace(',','')))
getopen.append(self.covstr(i[3]))
geth.append(self.covstr(i[4]))
getl.append(self.covstr(i[5]))
else:
otherinfo.append(i[0])
fetch_data_raw += 1
if fetch_data_raw >= 3:
#stock_name = otherinfo[0].split(' ')[2].decode('big5').encode('utf-8')
stock_name = unicode(otherinfo[0].split(' ')[2],'cp950')
else:
pass
return_value = {
'stock_price': getr,
'stock_name': stock_name,
'data_date': getdate,
'stock_range': getrange,
'stock_vol': getvol,
'stock_open': getopen,
'stock_h': geth,
'stock_l': getl
}
self.debug_print(otherinfo)
self.debug_print(stock_name)
return return_value | 將資料 list 化
return dictionary:
[stock_price]: Closing price (list)
收盤價格
[stock_name]: Stock name (str) and encode form big5 to utf-8
該股名稱,big5 → UTF-8
[data_date]: Stock date (list)
數據日期資訊
[stock_range]: Stock range price (list)
該鼓漲跌價格
[stock_vol]: Stock Volue (list)
成交量
[stock_open]: Stock open price (list)
開盤價
[stock_h]: Stock high price (list)
最高價
[stock_l]: Stock low price (list)
最低價 | entailment |
def range_per(self):
""" Range percentage
計算最新日之漲跌幅度百分比
"""
rp = float((self.raw_data[-1] - self.raw_data[-2]) / self.raw_data[-2] * 100)
return rp | Range percentage
計算最新日之漲跌幅度百分比 | entailment |
def SD(self, days=45):
""" Standard Deviation.
計算 days 日內之標準差,預設 45 日
"""
if len(self.raw_data) >= days:
data = self.raw_data[-days:]
data_avg = float(sum(data) / days)
data2 = []
for x in data:
data2.append((x - data_avg ) ** 2)
return math.sqrt(sum(data2) / len(data2))
else:
return 0 | Standard Deviation.
計算 days 日內之標準差,預設 45 日 | entailment |
def SDAVG(self, days=45):
""" the last 45 days average.
計算 days 日內之平均數,預設 45 日
"""
if len(self.raw_data) >= days:
data = self.raw_data[-days:]
data_avg = float(sum(data) / days)
return data_avg
else:
return 0 | the last 45 days average.
計算 days 日內之平均數,預設 45 日 | entailment |
def CV(self, days=45):
""" Coefficient of Variation.
計算 days 日內之變異數,預設 45 日
"""
if len(self.raw_data) >= days:
data_avg = sum(self.raw_data[-days:]) / days
return self.SD / data_avg
else:
return 0 | Coefficient of Variation.
計算 days 日內之變異數,預設 45 日 | entailment |
def TimeinOpen(self):
""" In open market time.
在當日開市時刻,9 - 14
"""
now = TWTime().now.hour
if now >= 9 and now <= 14:
return True
else:
return False | In open market time.
在當日開市時刻,9 - 14 | entailment |
def MAC(self,days,rev = 0):
""" Comparing yesterday price is high, low or equal.
return ↑,↓ or -
與前一天 days 日收盤價移動平均比較
rev = 0
回傳 ↑,↓ or -
rev = 1
回傳 1,-1 or 0
"""
yesterday = self.raw_data[:]
yesterday.pop()
yes_MA = float(sum(yesterday[-days:]) / days)
today_MA = self.MA(days)
return self.high_or_low(today_MA, yes_MA, rev) | Comparing yesterday price is high, low or equal.
return ↑,↓ or -
與前一天 days 日收盤價移動平均比較
rev = 0
回傳 ↑,↓ or -
rev = 1
回傳 1,-1 or 0 | entailment |
def MA_serial(self,days,rev=0):
""" see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial()
"""
return self.make_serial(self.raw_data,days,rev) | see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial() | entailment |
def MACVOL(self,days,rev=0):
""" Comparing yesterday volume is high, low or equal.
return ↑,↓ or -
與前一天 days 日成交量移動平均比較
rev = 0
回傳 ↑,↓ or -
rev = 1
回傳 1,-1 or 0
"""
yesterday = self.stock_vol[:]
yesterday.pop()
yes_MAVOL = float(sum(yesterday[-days:]) / days)
today_MAVOL = self.MAVOL(days)
return self.high_or_low(today_MAVOL, yes_MAVOL,rev) | Comparing yesterday volume is high, low or equal.
return ↑,↓ or -
與前一天 days 日成交量移動平均比較
rev = 0
回傳 ↑,↓ or -
rev = 1
回傳 1,-1 or 0 | entailment |
def MAVOL_serial(self,days,rev=0):
""" see make_serial()
成較量移動平均 list 化,資料格式請見 def make_serial()
"""
return self.make_serial(self.stock_vol,days,rev=0) | see make_serial()
成較量移動平均 list 化,資料格式請見 def make_serial() | entailment |
def VOLMAX3(self):
""" Volume is the max in last 3 days.
三日內最大成交量
"""
if self.stock_vol[-1] > self.stock_vol[-2] and self.stock_vol[-1] > self.stock_vol[-3]:
return True
else:
return False | Volume is the max in last 3 days.
三日內最大成交量 | entailment |
def MAO(self,day1,day2,rev=0):
""" This is MAO(Moving Average Oscillator), not BIAS.
It's only 'MAday1 - MAday2'.
乖離率,MAday1 - MAday2 兩日之移動平均之差
return list:
[0] is the times of high, low or equal
[0] is times
[1] is the MAO data
[1] rev=0:↑ ↓ or -,rev=1:1 -1 0
回傳:
[0]
[0] 回傳次數
[1] MAO 資料數據
[1] 漲跌標示,rev=0:↑ ↓ or -,rev=1:1 -1 0
"""
day1MA = self.MA_serial(day1)[1]
day2MA = self.MA_serial(day2)[1]
bw = abs(day1-day2)
if len(day1MA) > len(day2MA):
day1MAs = day1MA[bw:]
day2MAs = day2MA[:]
elif len(day1MA) < len(day2MA):
day1MAs = day1MA[:]
day2MAs = day2MA[bw:]
else:
day1MAs = day1MA[:]
day2MAs = day2MA[:]
serial = []
for i in xrange(len(day1MAs)):
serial.append(day1MAs[i]-day2MAs[i])
cum = self.make_serial(serial,1,rev)
#return [day1MAs,day2MAs,serial,cum,self.high_or_low(cum[-1],cum[-2])]
return [cum,self.high_or_low(day1MAs[-1]-day2MAs[-1],day1MAs[-2]-day2MAs[-2],rev)] | This is MAO(Moving Average Oscillator), not BIAS.
It's only 'MAday1 - MAday2'.
乖離率,MAday1 - MAday2 兩日之移動平均之差
return list:
[0] is the times of high, low or equal
[0] is times
[1] is the MAO data
[1] rev=0:↑ ↓ or -,rev=1:1 -1 0
回傳:
[0]
[0] 回傳次數
[1] MAO 資料數據
[1] 漲跌標示,rev=0:↑ ↓ or -,rev=1:1 -1 0 | entailment |
def ckMAO(self,data,s=5,pm=False):
"""判斷正負乖離位置
s = 取樣判斷區間
pm = True(正)/False(負) 乖離
return [T/F, 第幾個轉折日, 乖離值]
"""
c = data[-s:]
if pm:
ckvalue = max(c)
preckvalue = max(c) > 0
else:
ckvalue = min(c)
preckvalue = max(c) < 0
return [s - c.index(ckvalue) < 4 and c.index(ckvalue) != s-1 and preckvalue, s - c.index(ckvalue) - 1, ckvalue] | 判斷正負乖離位置
s = 取樣判斷區間
pm = True(正)/False(負) 乖離
return [T/F, 第幾個轉折日, 乖離值] | entailment |
def RABC(self):
""" Return ABC
轉折點 ABC
"""
A = self.raw_data[-3]*2 - self.raw_data[-6]
B = self.raw_data[-2]*2 - self.raw_data[-5]
C = self.raw_data[-1]*2 - self.raw_data[-4]
return '(%.2f,%.2f,%.2f)' % (A,B,C) | Return ABC
轉折點 ABC | entailment |
def make_serial(self,data,days,rev=0):
""" make data in list
if data enough, will return:
[0] is the times of high, low or equal
[1] is the serial of data.
or return '?'
資料數據 list 化,days 移動平均值
[0] 回傳次數
[1] 回傳數據
"""
raw = data[:]
result = []
try:
while len(raw) >= days:
result.append(float(sum(raw[-days:]) / days))
raw.pop()
self.debug_print(len(result))
result.reverse()
re = [self.cum_serial(result,rev), result]
return re
except:
return '?' | make data in list
if data enough, will return:
[0] is the times of high, low or equal
[1] is the serial of data.
or return '?'
資料數據 list 化,days 移動平均值
[0] 回傳次數
[1] 回傳數據 | entailment |
def cum_serial(self, raw,rev=0):
""" Cumulate serial data
and return times(int)
計算數據重複(持續)次數
"""
org = raw[1:]
diff = raw[:-1]
result = []
for i in xrange(len(org)):
result.append(self.high_or_low(org[i], diff[i],rev))
times = 0
try:
if result[-1] == result[-2]:
signal = result[-1]
re_signal = result[:]
try:
while signal == re_signal[-1]:
re_signal.pop()
times += 1
except:
pass
else:
times += 1
except:
times = '?'
if self.debug:
for i in result:
print i
self.debug_print(times)
return times | Cumulate serial data
and return times(int)
計算數據重複(持續)次數 | entailment |
def display(self,*arg):
""" For simple Demo
測試用顯示樣式。
"""
print self.stock_name,self.stock_no
print '%s %s %s(%+.2f%%)' % (self.data_date[-1],self.raw_data[-1],self.stock_range[-1],self.range_per)
for i in arg:
print ' - MA%02s %.2f %s(%s)' % (i,self.MA(i),self.MAC(i),self.MA_serial(i)[0])
print ' - Volume: %s %s(%s)' % (self.MAVOL(1)/1000,self.MACVOL(1),self.MAVOL_serial(1)[0])
MAO = self.MAO(3,6)
print ' - MAO(3-6): %.2f %s(%s)' % (MAO[0][1][-1], MAO[1], MAO[0][0])
print ' - RABC: %s' % self.RABC | For simple Demo
測試用顯示樣式。 | entailment |
def XMPP_display(self,*arg):
""" For XMPP Demo
輸出到 XMPP 之樣式。
"""
MA = ''
for i in arg:
MAs = '- MA%02s: %.2f %s(%s)\n' % (
unicode(i),
self.MA(i),
self.MAC(i),
unicode(self.MA_serial(i)[0])
)
MA = MA + MAs
vol = '- Volume: %s %s(%s)' % (
unicode(self.MAVOL(1)/1000),
unicode(self.MACVOL(1)),
unicode(self.MAVOL_serial(1)[0])
)
MAO = self.MAO(3,6)
re = """%(stock_name)s %(stock_no)s
%(stock_date)s: %(stock_price)s %(stock_range)s(%(range_per)+.2f%%)
%(MA)s%(vol)s
- MAO(3-6): %(MAO_v).2f %(MAO_c)s(%(MAO_times)s)
- RABC: %(RABC)s""" % {
'stock_name': unicode(self.stock_name),
'stock_no': unicode(self.stock_no),
'stock_date': unicode(self.data_date[-1]),
'stock_price': unicode(self.raw_data[-1]),
'stock_range': unicode(self.stock_range[-1]),
'range_per': self.range_per,
'MA': MA,
'vol': vol,
'MAO_v': MAO[0][1][-1],
'MAO_c': unicode(MAO[1]),
'MAO_times': unicode(MAO[0][0]),
'RABC': self.RABC
}
return re | For XMPP Demo
輸出到 XMPP 之樣式。 | entailment |
def Task_display(self):
""" For Task overall stock display
顯示資訊樣式之一,兩行資訊。
"""
re = """%(stock_name)s %(stock_no)s %(stock_date)s
Today: %(stock_price)s %(stock_range)s
=-=-=-=""" % {
'stock_name': unicode(self.stock_name),
'stock_no': unicode(self.stock_no),
'stock_date': unicode(self.data_date[-1]),
'stock_price': unicode(self.raw_data[-1]),
'stock_range': unicode(self.stock_range[-1]),
}
return re | For Task overall stock display
顯示資訊樣式之一,兩行資訊。 | entailment |
def Cmd_display(self):
""" For Task overall stock display
一行顯示資訊,用於終端機顯示樣式。
"""
re = "%(stock_no)s %(stock_name)s %(stock_date)s %(stock_price)s %(stock_range)s %(stock_range_per).2f%% %(RABC)s %(stock_vol)s" % {
'stock_name': unicode(self.stock_name),
'stock_no': unicode(self.stock_no),
'stock_date': unicode(self.data_date[-1]),
'stock_price': unicode(self.raw_data[-1]),
'stock_range': unicode(self.stock_range[-1]),
'stock_range_per': self.range_per,
'stock_vol': self.stock_vol[-1]/1000,
'RABC': self.RABC
}
return re | For Task overall stock display
一行顯示資訊,用於終端機顯示樣式。 | entailment |
def gchart(self, s = 0, size = [], candle = 20):
""" Chart for serious stocks
輸出 Google Chart 圖表。
s = 資料筆數
size = 圖表寬度、高度 [寬度,高度]
candle = K 棒的寬度
"""
if s == 0:
s = len(self.raw_data)
if len(size) == 2:
sw,sh = size
else:
sh = 300
sw = 25 * s
if sw > 1000:
sw = 1000
candle = 950/s
stc = ''
for i in self.raw_data[-s:]:
stc += str(i) + ','
sto = ''
for i in self.stock_open[-s:]:
sto += str(i) + ','
sth = ''
for i in self.stock_h[-s:]:
sth += str(i) + ','
stl = ''
for i in self.stock_l[-s:]:
stl += str(i) + ','
stdate = ''
for i in self.data_date[-s:]:
stdate += str(i[-2:]) + '|'
stmax = max(self.stock_h[-s:])
stmin = min(self.stock_l[-s:])
strange = (stmax-stmin) / 10
re = "http://%(rand)s.chart.apis.google.com/chart?chs=%(sw)sx%(sh)s&cht=lc&chd=t1:0,0,0|0,%(h)s0|0,%(c)s0|0,%(o)s0|0,%(l)s0&chm=F,,1,1:-1,%(candle)s&chxt=y,x&chds=%(min)s,%(max)s&chxr=0,%(min)s,%(max)s,%(range)s&chg=20,%(chg)s&chtt=%(chtt)s&chxl=1:||%(chxl)s" % {
'h': sth,
'c': stc,
'o': sto,
'l': stl,
'min': stmin,
'max': stmax,
'sw': sw,
'sh': sh,
'range': strange,
'candle': candle,
'chg': 10,
'rand': random.randint(0,9),
'chxl': stdate,
'chtt': '%s %s' % (self.stock_name,self.stock_no)
}
return re | Chart for serious stocks
輸出 Google Chart 圖表。
s = 資料筆數
size = 圖表寬度、高度 [寬度,高度]
candle = K 棒的寬度 | entailment |
def buy(self, no, price, value):
''' 買 '''
self.money += -price*value
try:
self.store[no] += value
except:
self.store[no] = value
try:
self.avgprice[no]['buy'] += [price]
except:
try:
self.avgprice[no]['buy'] = [price]
except:
self.avgprice[no] = {}
self.avgprice[no]['buy'] = [price] | 買 | entailment |
def sell(self, no, price, value):
''' 賣 '''
self.money += price*value
try:
self.store[no] += -value
except:
self.store[no] = -value
try:
self.avgprice[no]['sell'] += [price]
except:
try:
self.avgprice[no]['sell'] = [price]
except:
self.avgprice[no] = {}
self.avgprice[no]['sell'] = [price] | 賣 | entailment |
def showinfo(self):
''' 總覽顯示 '''
print 'money:',self.money
print 'store:',self.store
print 'avgprice:',self.avgprice | 總覽顯示 | entailment |
def search(self,q):
""" Search. """
import re
pattern = re.compile("%s" % q)
result = {}
for i in self.allstockno:
b = re.search(pattern, self.allstockno[i])
try:
b.group()
result[i] = self.allstockno[i]
except:
pass
return result | Search. | entailment |
def trapz2(f, x=None, y=None, dx=1.0, dy=1.0):
"""Double integrate."""
return numpy.trapz(numpy.trapz(f, x=y, dx=dy), x=x, dx=dx) | Double integrate. | entailment |
def solve(self, neigs=4, tol=0, guess=None, mode_profiles=True, initial_mode_guess=None):
"""
This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues. The default value of 0 implies machine precision.
guess : float
a guess for the refractive index. Only finds eigenvectors with an effective refractive index
higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez
"""
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
if guess is not None:
# calculate shift for eigs function
k = 2 * numpy.pi / self.wl
shift = (guess * k) ** 2
else:
shift = None
[eigvals, eigvecs] = eigen.eigs(A,
k=neigs,
which='LR',
tol=0.001,
ncv=None,
v0 = initial_mode_guess,
return_eigenvectors=mode_profiles,
sigma=shift)
neffs = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)
if mode_profiles:
Hxs = []
Hys = []
nx = self.nx
ny = self.ny
for ieig in range(neigs):
Hxs.append(eigvecs[:nx * ny, ieig].reshape(nx, ny))
Hys.append(eigvecs[nx * ny:, ieig].reshape(nx, ny))
# sort the modes
idx = numpy.flipud(numpy.argsort(neffs))
neffs = neffs[idx]
self.neff = neffs
if mode_profiles:
tmpx = []
tmpy = []
for i in idx:
tmpx.append(Hxs[i])
tmpy.append(Hys[i])
Hxs = tmpx
Hys = tmpy
[Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)
self.modes = []
for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):
self.modes.append(
FDMode(self.wl, self.x, self.y, neff, Ey, Ex, Ez, Hy, Hx, Hz).normalize())
return self | This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues. The default value of 0 implies machine precision.
guess : float
a guess for the refractive index. Only finds eigenvectors with an effective refractive index
higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez | entailment |
def real(self):
""" Real time data """
try:
unch = sum([covstr(self.stock[3]),covstr(self.stock[4])])/2
re = {'name': unicode(self.stock[36].replace(' ',''), 'cp950'),
'no': self.stock[0],
'range': self.stock[1],
'time': self.stock[2],
'max': self.stock[3],
'min': self.stock[4],
'unch': '%.2f' % unch,
'pp': '%.2f' % ((covstr(self.stock[8]) - unch)/unch*100),
'open': self.stock[5],
'h': self.stock[6],
'l': self.stock[7],
'c': self.stock[8],
'value': self.stock[9],
'pvalue': self.stock[10],
'top5buy': [
(self.stock[11], self.stock[12]),
(self.stock[13], self.stock[14]),
(self.stock[15], self.stock[16]),
(self.stock[17], self.stock[18]),
(self.stock[19], self.stock[20])
],
'top5sell': [
(self.stock[21], self.stock[22]),
(self.stock[23], self.stock[24]),
(self.stock[25], self.stock[26]),
(self.stock[27], self.stock[28]),
(self.stock[29], self.stock[30])
]
}
if '-' in self.stock[1]:
re['ranges'] = False ## price down
else:
re['ranges'] = True ## price up
re['crosspic'] = "http://chart.apis.google.com/chart?chf=bg,s,ffffff&chs=20x50&cht=ls&chd=t1:0,0,0|0,%s,0|0,%s,0|0,%s,0|0,%s,0&chds=%s,%s&chm=F,,1,1:4,20" % (re['h'],re['c'],re['open'],re['l'],re['l'],re['h'])
re['top5buy'].sort()
re['top5sell'].sort()
return re
except:
return False | Real time data | entailment |
def now(self):
''' Display Taiwan Time now
顯示台灣此刻時間
'''
localtime = datetime.datetime.now()
return localtime + datetime.timedelta(hours = time.timezone/60/60 + self.TimeZone) | Display Taiwan Time now
顯示台灣此刻時間 | entailment |
def date(self):
''' Display Taiwan date now
顯示台灣此刻日期
'''
localtime = datetime.date.today()
return localtime + datetime.timedelta(hours = time.timezone/60/60 + self.TimeZone) | Display Taiwan date now
顯示台灣此刻日期 | entailment |
def allck():
''' 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 '''
for i in twseno().allstockno:
a = goristock.goristock(i)
try:
if a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#a.goback(3) ## 倒退天數
ck4m(a)
except:
pass | 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 | entailment |
def directional_coupler_lc(wavelength_nm, n_eff_1, n_eff_2):
'''
Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer.
'''
wavelength_m = wavelength_nm * 1.e-9
dn_eff = (n_eff_1 - n_eff_2).real
lc_m = wavelength_m / (2. * dn_eff)
lc_um = lc_m * 1.e6
return lc_um | Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer. | entailment |
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period | Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at. | entailment |
def oop(aa):
""" For cmd output. """
return ('%s %s %s %.2f %+.2f %s %s %s %s %+.2f %s %s %.2f %.4f %.4f' % (aa.stock_no, aa.stock_name, aa.data_date[-1], aa.raw_data[-1], aa.range_per, aa.MAC(3), aa.MAC(6), aa.MAC(18), aa.MAO(3,6)[1], aa.MAO(3,6)[0][1][-1], aa.MAO(3,6)[0][0], aa.RABC, aa.stock_vol[-1]/1000, aa.SD, aa.CV)).encode('utf-8') | For cmd output. | entailment |
def overall(goback = 0, case = 1):
""" To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day.
"""
from twseno import twseno
for i in twseno().allstock:
#timetest(i)
try:
if case == 1:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.VOLMAX3 and a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#print a.Cmd_display
print 'buy-: ' + oop(a)
elif a.MAO(3,6)[1] == '↓'.decode('utf-8') and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][0] <= 3:
print 'sell: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 2:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.stock_vol[-1] >= 1000*1000 and a.raw_data[-1] > 10 and (sum(a.stock_vol[-45:])/45) <= 1000*1000:
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 3:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MA(3) > a.raw_data[-1] and a.MA(6) <= a.raw_data[-1] and a.MA(6) > a.MA(18):
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
except KeyboardInterrupt:
print 'KeyboardInterrupt'
break | To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day. | entailment |
def read_file(filename, prepend_paths=[]):
"""
Returns the contents of *filename* (UTF-8).
If *prepend_paths* is set, join those before the *fielname*.
If it is `True`, prepend the path to `setup.py`.
"""
if prepend_paths is True:
prepend_paths = [
os.path.abspath(os.path.dirname(__file__)),
]
if prepend_paths:
prepend_paths.append(filename)
filename = os.path.join(*prepend_paths)
print(filename)
with open(filename, encoding='utf-8') as f:
return f.read() | Returns the contents of *filename* (UTF-8).
If *prepend_paths* is set, join those before the *fielname*.
If it is `True`, prepend the path to `setup.py`. | entailment |
def loaddate(self):
''' 載入檔案
檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php
'''
ld = csv.reader(open('./%s/opendate.csv' % _CSVFILEPATH, 'r'))
re = {}
re['close'] = []
re['open'] = []
for i in ld:
''' 0 = 休市, 1 = 開市 '''
if i[1] == '0':
re['close'] += [datetime.strptime(i[0],'%Y/%m/%d').date()]
elif i[1] == '1':
re['open'] += [datetime.strptime(i[0],'%Y/%m/%d').date()]
else:
pass
return re | 載入檔案
檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php | entailment |
def ooc(self):
''' Open or close
回傳 True:開市,False:休市。
'''
if self.ptime.date() in self.ocdate['close']: ## 判對是否為法定休市
return False
elif self.ptime.date() in self.ocdate['open']: ## 判對是否為法定開市
return True
else:
''' 判斷是否為每週開休市 '''
if self.ptime.weekday() <= 4:
return True
else:
return False | Open or close
回傳 True:開市,False:休市。 | entailment |
def ck_portf_001(self):
''' 3-6負乖離且向上,三日內最大量,成交量大於 1000 張,收盤價大於 10 元。(較嚴謹的選股)'''
return self.a.MAO(3,6)[1] == '↑'.decode('utf-8') and (self.a.MAO(3,6)[0][1][-1] < 0 or ( self.a.MAO(3,6)[0][1][-1] < 1 and self.a.MAO(3,6)[0][1][-1] > 0 and self.a.MAO(3,6)[0][1][-2] < 0 and self.a.MAO(3,6)[0][0] == 3)) and self.a.VOLMAX3 and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10 | 3-6負乖離且向上,三日內最大量,成交量大於 1000 張,收盤價大於 10 元。(較嚴謹的選股) | entailment |
def ck_portf_002(self):
''' 3日均價大於6日均價,6日均價大於18日均價。(短中長線呈現多頭的態勢) '''
return self.a.MA(3) > self.a.MA(6) > self.a.MA(18) and self.a.MAC(18) == '↑'.decode('utf-8') and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10 | 3日均價大於6日均價,6日均價大於18日均價。(短中長線呈現多頭的態勢) | entailment |
def ck_portf_003(self):
''' 當日成交量,大於前三天的總成交量。(短線多空動能) '''
return self.a.stock_vol[-1] > sum(self.a.stock_vol[-4:-1]) and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10 | 當日成交量,大於前三天的總成交量。(短線多空動能) | entailment |
def ck_portf_004(self):
''' 價走平一個半月。(箱型整理、盤整) '''
return self.a.SD < 0.25 and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10 | 價走平一個半月。(箱型整理、盤整) | entailment |
def GLI(self, pm=False):
''' 判斷乖離 '''
return list(self.a.ckMAO(self.a.MAO(3,6)[0][1], pm=pm))[0] | 判斷乖離 | entailment |
def B1(self):
''' 量大收紅 '''
return self.a.stock_vol[-1] > self.a.stock_vol[-2] and self.a.PUPTY | 量大收紅 | entailment |
def B2(self):
''' 量縮價不跌 '''
return self.a.stock_vol[-1] < self.a.stock_vol[-2] and self.a.PUPTY | 量縮價不跌 | entailment |
def S1(self):
''' 量大收黑 '''
return self.a.stock_vol[-1] > self.a.stock_vol[-2] and not self.a.PUPTY | 量大收黑 | entailment |
def S2(self):
''' 量縮價跌 '''
return self.a.stock_vol[-1] < self.a.stock_vol[-2] and not self.a.PUPTY | 量縮價跌 | entailment |
def B4PB(self):
''' 判斷是否為四大買點 '''
return self.ckMinsGLI and (self.B1 or self.B2 or self.B3 or self.B4) | 判斷是否為四大買點 | entailment |
def B4PS(self):
''' 判斷是否為四大賣點 '''
return self.ckPlusGLI and (self.S1 or self.S2 or self.S3 or self.S4) | 判斷是否為四大賣點 | entailment |
def reflection(n1, n2):
'''
Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power.
'''
r = abs((n1-n2) / (n1+n2))**2
return r | Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power. | entailment |
def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas | Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency. | entailment |
def getIndex(reference):
'''
Find the reference folder using the location of the script file
Create the index, test if successful
'''
if reference:
reffas = reference
else:
parent_directory = path.dirname(path.abspath(path.dirname(__file__)))
reffas = path.join(parent_directory, "reference/DNA_CS.fasta")
if not path.isfile(reffas):
logging.error("Could not find reference fasta for lambda genome.")
sys.exit("Could not find reference fasta for lambda genome.")
aligner = mp.Aligner(reffas, preset="map-ont") # build index
if not aligner:
logging.error("Failed to load/build index")
raise Exception("ERROR: failed to load/build index")
return aligner | Find the reference folder using the location of the script file
Create the index, test if successful | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.