_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q9200
Collection.set_default_subject
train
def set_default_subject(self, subject): """ Sets the subject's location media URL as a link. It displays as the default subject on PFE. - **subject** can be a single :py:class:`.Subject` instance or a single subject ID. Examples:: collection.set_default_subject(1234) collection.set_default_subject(Subject(1234)) """ if not ( isinstance(subject, Subject) or isinstance(subject, (int, str,)) ): raise TypeError if isinstance(subject, Subject): _subject_id = subject.id else: _subject_id = str(subject) self.http_post( '{}/links/default_subject'.format(self.id), json={'default_subject': _subject_id}, )
python
{ "resource": "" }
q9201
Workflow.retire_subjects
train
def retire_subjects(self, subjects, reason='other'): """ Retires subjects in this workflow. - **subjects** can be a list of :py:class:`Subject` instances, a list of subject IDs, a single :py:class:`Subject` instance, or a single subject ID. - **reason** gives the reason the :py:class:`Subject` has been retired. Defaults to **other**. Examples:: workflow.retire_subjects(1234) workflow.retire_subjects([1,2,3,4]) workflow.retire_subjects(Subject(1234)) workflow.retire_subjects([Subject(12), Subject(34)]) """ subjects = [ s.id if isinstance(s, Subject) else s for s in subjects ] return Workflow.http_post( '{}/retired_subjects'.format(self.id), json={ 'subject_ids': subjects, 'retirement_reason': reason } )
python
{ "resource": "" }
q9202
PanoptesObject.where
train
def where(cls, **kwargs): """ Returns a generator which yields instances matching the given query arguments. For example, this would yield all :py:class:`.Project`:: Project.where() And this would yield all launch approved :py:class:`.Project`:: Project.where(launch_approved=True) """ _id = kwargs.pop('id', '') return cls.paginated_results(*cls.http_get(_id, params=kwargs))
python
{ "resource": "" }
q9203
PanoptesObject.reload
train
def reload(self): """ Re-fetches the object from the API, discarding any local changes. Returns without doing anything if the object is new. """ if not self.id: return reloaded_object = self.__class__.find(self.id) self.set_raw( reloaded_object.raw, reloaded_object.etag )
python
{ "resource": "" }
q9204
PanoptesObject.delete
train
def delete(self): """ Deletes the object. Returns without doing anything if the object is new. """ if not self.id: return if not self._loaded: self.reload() return self.http_delete(self.id, etag=self.etag)
python
{ "resource": "" }
q9205
LinkCollection.add
train
def add(self, objs): """ Adds the given `objs` to this `LinkCollection`. - **objs** can be a list of :py:class:`.PanoptesObject` instances, a list of object IDs, a single :py:class:`.PanoptesObject` instance, or a single object ID. Examples:: organization.links.projects.add(1234) organization.links.projects.add(Project(1234)) workflow.links.subject_sets.add([1,2,3,4]) workflow.links.subject_sets.add([Project(12), Project(34)]) """ if self.readonly: raise NotImplementedError( '{} links can\'t be modified'.format(self._slug) ) if not self._parent.id: raise ObjectNotSavedException( "Links can not be modified before the object has been saved." ) _objs = [obj for obj in self._build_obj_list(objs) if obj not in self] if not _objs: return self._parent.http_post( '{}/links/{}'.format(self._parent.id, self._slug), json={self._slug: _objs}, retry=True, ) self._linked_object_ids.extend(_objs)
python
{ "resource": "" }
q9206
LinkCollection.remove
train
def remove(self, objs): """ Removes the given `objs` from this `LinkCollection`. - **objs** can be a list of :py:class:`.PanoptesObject` instances, a list of object IDs, a single :py:class:`.PanoptesObject` instance, or a single object ID. Examples:: organization.links.projects.remove(1234) organization.links.projects.remove(Project(1234)) workflow.links.subject_sets.remove([1,2,3,4]) workflow.links.subject_sets.remove([Project(12), Project(34)]) """ if self.readonly: raise NotImplementedError( '{} links can\'t be modified'.format(self._slug) ) if not self._parent.id: raise ObjectNotSavedException( "Links can not be modified before the object has been saved." ) _objs = [obj for obj in self._build_obj_list(objs) if obj in self] if not _objs: return _obj_ids = ",".join(_objs) self._parent.http_delete( '{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids), retry=True, ) self._linked_object_ids = [ obj for obj in self._linked_object_ids if obj not in _objs ]
python
{ "resource": "" }
q9207
write
train
def write(nml, nml_path, force=False, sort=False): """Save a namelist to disk using either a file object or its file path. File object usage: >>> with open(nml_path, 'w') as nml_file: >>> f90nml.write(nml, nml_file) File path usage: >>> f90nml.write(nml, 'data.nml') This function is equivalent to the ``write`` function of the ``Namelist`` object ``nml``. >>> nml.write('data.nml') By default, ``write`` will not overwrite an existing file. To override this, use the ``force`` flag. >>> nml.write('data.nml', force=True) To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag. >>> nml.write('data.nml', sort=True) """ # Promote dicts to Namelists if not isinstance(nml, Namelist) and isinstance(nml, dict): nml_in = Namelist(nml) else: nml_in = nml nml_in.write(nml_path, force=force, sort=sort)
python
{ "resource": "" }
q9208
patch
train
def patch(nml_path, nml_patch, out_path=None): """Create a new namelist based on an input namelist and reference dict. >>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml') This function is equivalent to the ``read`` function of the ``Parser`` object with the patch output arguments. >>> parser = f90nml.Parser() >>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml') A patched namelist file will retain any formatting or comments from the original namelist file. Any modified values will be formatted based on the settings of the ``Namelist`` object. """ parser = Parser() return parser.read(nml_path, nml_patch, out_path)
python
{ "resource": "" }
q9209
Canvas.convert
train
def convert(self, format="png", **kwargs): """ png, ps, pdf, gif, jpg, svg returns image in format as bytes """ if format.upper() in cairosvg.SURFACES: surface = cairosvg.SURFACES[format.upper()] else: raise Exception("'%s' image format unavailable: use one of %s" % (format.upper(), list(cairosvg.SURFACES.keys()))) return surface.convert(bytestring=str(self), **kwargs)
python
{ "resource": "" }
q9210
Canvas.toPIL
train
def toPIL(self, **attribs): """ Convert canvas to a PIL image """ import PIL.Image bytes = self.convert("png") sfile = io.BytesIO(bytes) pil = PIL.Image.open(sfile) return pil
python
{ "resource": "" }
q9211
Canvas.toGIF
train
def toGIF(self, **attribs): """ Convert canvas to GIF bytes """ im = self.toPIL(**attribs) sfile = io.BytesIO() im.save(sfile, format="gif") return sfile.getvalue()
python
{ "resource": "" }
q9212
Canvas.getPixels
train
def getPixels(self): """ Return a stream of pixels from current Canvas. """ array = self.toArray() (width, height, depth) = array.size for x in range(width): for y in range(height): yield Pixel(array, x, y)
python
{ "resource": "" }
q9213
Circle.getP1
train
def getP1(self): """ Left, upper point """ return Point(self.center[0] - self.radius, self.center[1] - self.radius)
python
{ "resource": "" }
q9214
Circle.getP2
train
def getP2(self): """ Right, lower point """ return Point(self.center[0] + self.radius, self.center[1] + self.radius)
python
{ "resource": "" }
q9215
Simulation.start_sim
train
def start_sim(self, gui=True, set_values={}, error=None): """ Run the simulation in the background, showing the GUI by default. """ self.error = error if not self.is_running.is_set(): def loop(): self.need_to_stop.clear() self.is_running.set() for robot in self.robots: if robot.brain: self.runBrain(robot.brain) count = 0 while not self.need_to_stop.isSet(): if not self.paused.is_set(): self.clock += self.sim_time for robot in self.robots: try: robot.update() except Exception as exc: self.need_to_stop.set() if error: error.value = "Error: %s. Now stopping simulation." % str(exc) else: raise if gui: self.draw() if count % self.gui_update == 0: if "canvas" in set_values: set_values["canvas"].value = str(self.render()) if "energy" in set_values: if len(self.robots) > 0: set_values["energy"].value = str(self.robots[0].energy) count += 1 self.realsleep(self.sim_time) if self.robots[0].energy <= 0: self.need_to_stop.set() self.is_running.clear() for robot in self.robots: robot.stop() threading.Thread(target=loop).start()
python
{ "resource": "" }
q9216
Simulation.draw
train
def draw(self): """ Render and draw the world and robots. """ from calysto.display import display, clear_output canvas = self.render() clear_output(wait=True) display(canvas)
python
{ "resource": "" }
q9217
Simulation.sleep
train
def sleep(self, seconds): """ Sleep in simulated time. """ start = self.time() while (self.time() - start < seconds and not self.need_to_stop.is_set()): self.need_to_stop.wait(self.sim_time)
python
{ "resource": "" }
q9218
Simulation.runBrain
train
def runBrain(self, f): """ Run a brain program in the background. """ if self.error: self.error.value = "" def wrapper(): self.brain_running.set() try: f() except KeyboardInterrupt: # Just stop pass except Exception as e: if self.error: self.error.value = "<pre style='background: #fdd'>" + traceback.format_exc() + "</pre>" else: raise finally: self.brain_running.clear() # Otherwise, will show error threading.Thread(target=wrapper).start()
python
{ "resource": "" }
q9219
Robot.forward
train
def forward(self, seconds, vx=5): """ Move continuously in simulator for seconds and velocity vx. """ self.vx = vx self.sleep(seconds) self.vx = 0
python
{ "resource": "" }
q9220
DNARobot.codon2weight
train
def codon2weight(self, codon): """ Turn a codon of "000" to "999" to a number between -5.0 and 5.0. """ length = len(codon) retval = int(codon) return retval/(10 ** (length - 1)) - 5.0
python
{ "resource": "" }
q9221
DNARobot.weight2codon
train
def weight2codon(self, weight, length=None): """ Given a weight between -5 and 5, turn it into a codon, eg "000" to "999" """ if length is None: length = self.clen retval = 0 weight = min(max(weight + 5.0, 0), 10.0) * (10 ** (length - 1)) for i in range(length): if i == length - 1: # last one d = int(round(weight / (10 ** (length - i - 1)))) else: d = int(weight / (10 ** (length - i - 1))) weight = weight % (10 ** (length - i - 1)) retval += d * (10 ** (length - i - 1)) return ("%0" + str(length) + "d") % retval
python
{ "resource": "" }
q9222
is_nullable_list
train
def is_nullable_list(val, vtype): """Return True if list contains either values of type `vtype` or None.""" return (isinstance(val, list) and any(isinstance(v, vtype) for v in val) and all((isinstance(v, vtype) or v is None) for v in val))
python
{ "resource": "" }
q9223
Namelist.column_width
train
def column_width(self, width): """Validate and set the column width.""" if isinstance(width, int): if width >= 0: self._column_width = width else: raise ValueError('Column width must be nonnegative.') else: raise TypeError('Column width must be a nonnegative integer.')
python
{ "resource": "" }
q9224
Namelist.indent
train
def indent(self, value): """Validate and set the indent width.""" # Explicit indent setting if isinstance(value, str): if value.isspace() or len(value) == 0: self._indent = value else: raise ValueError('String indentation can only contain ' 'whitespace.') # Set indent width elif isinstance(value, int): if value >= 0: self._indent = value * ' ' else: raise ValueError('Indentation spacing must be nonnegative.') else: raise TypeError('Indentation must be specified by string or space ' 'width.')
python
{ "resource": "" }
q9225
Namelist.end_comma
train
def end_comma(self, value): """Validate and set the comma termination flag.""" if not isinstance(value, bool): raise TypeError('end_comma attribute must be a logical type.') self._end_comma = value
python
{ "resource": "" }
q9226
Namelist.index_spacing
train
def index_spacing(self, value): """Validate and set the index_spacing flag.""" if not isinstance(value, bool): raise TypeError('index_spacing attribute must be a logical type.') self._index_spacing = value
python
{ "resource": "" }
q9227
Namelist.uppercase
train
def uppercase(self, value): """Validate and set the uppercase flag.""" if not isinstance(value, bool): raise TypeError('uppercase attribute must be a logical type.') self._uppercase = value
python
{ "resource": "" }
q9228
Namelist.float_format
train
def float_format(self, value): """Validate and set the upper case flag.""" if isinstance(value, str): # Duck-test the format string; raise ValueError on fail '{0:{1}}'.format(1.23, value) self._float_format = value else: raise TypeError('Floating point format code must be a string.')
python
{ "resource": "" }
q9229
Namelist.logical_repr
train
def logical_repr(self, value): """Set the string representation of logical values.""" if not any(isinstance(value, t) for t in (list, tuple)): raise TypeError("Logical representation must be a tuple with " "a valid true and false value.") if not len(value) == 2: raise ValueError("List must contain two values.") self.false_repr = value[0] self.true_repr = value[1]
python
{ "resource": "" }
q9230
Namelist.false_repr
train
def false_repr(self, value): """Validate and set the logical false representation.""" if isinstance(value, str): if not (value.lower().startswith('f') or value.lower().startswith('.f')): raise ValueError("Logical false representation must start " "with 'F' or '.F'.") else: self._logical_repr[0] = value else: raise TypeError('Logical false representation must be a string.')
python
{ "resource": "" }
q9231
Namelist.start_index
train
def start_index(self, value): """Validate and set the vector start index.""" # TODO: Validate contents? (May want to set before adding the data.) if not isinstance(value, dict): raise TypeError('start_index attribute must be a dict.') self._start_index = value
python
{ "resource": "" }
q9232
Namelist.write
train
def write(self, nml_path, force=False, sort=False): """Write Namelist to a Fortran 90 namelist file. >>> nml = f90nml.read('input.nml') >>> nml.write('out.nml') """ nml_is_file = hasattr(nml_path, 'read') if not force and not nml_is_file and os.path.isfile(nml_path): raise IOError('File {0} already exists.'.format(nml_path)) nml_file = nml_path if nml_is_file else open(nml_path, 'w') try: self._writestream(nml_file, sort) finally: if not nml_is_file: nml_file.close()
python
{ "resource": "" }
q9233
Namelist.patch
train
def patch(self, nml_patch): """Update the namelist from another partial or full namelist. This is different from the intrinsic `update()` method, which replaces a namelist section. Rather, it updates the values within a section. """ for sec in nml_patch: if sec not in self: self[sec] = Namelist() self[sec].update(nml_patch[sec])
python
{ "resource": "" }
q9234
Namelist.groups
train
def groups(self): """Return an iterator that spans values with group and variable names. Elements of the iterator consist of a tuple containing two values. The first is internal tuple containing the current namelist group and its variable name. The second element of the returned tuple is the value associated with the current group and variable. """ for key, value in self.items(): for inner_key, inner_value in value.items(): yield (key, inner_key), inner_value
python
{ "resource": "" }
q9235
Namelist._write_nmlgrp
train
def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False): """Write namelist group to target file.""" if self._newline: print(file=nml_file) self._newline = True if self.uppercase: grp_name = grp_name.upper() if sort: grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0])) print('&{0}'.format(grp_name), file=nml_file) for v_name, v_val in grp_vars.items(): v_start = grp_vars.start_index.get(v_name, None) for v_str in self._var_strings(v_name, v_val, v_start=v_start): nml_line = self.indent + '{0}'.format(v_str) print(nml_line, file=nml_file) print('/', file=nml_file)
python
{ "resource": "" }
q9236
Namelist.todict
train
def todict(self, complex_tuple=False): """Return a dict equivalent to the namelist. Since Fortran variables and names cannot start with the ``_`` character, any keys starting with this token denote metadata, such as starting index. The ``complex_tuple`` flag is used to convert complex data into an equivalent 2-tuple, with metadata stored to flag the variable as complex. This is primarily used to facilitate the storage of the namelist into an equivalent format which does not support complex numbers, such as JSON or YAML. """ # TODO: Preserve ordering nmldict = OrderedDict(self) # Search for namelists within the namelist # TODO: Move repeated stuff to new functions for key, value in self.items(): if isinstance(value, Namelist): nmldict[key] = value.todict(complex_tuple) elif isinstance(value, complex) and complex_tuple: nmldict[key] = [value.real, value.imag] try: nmldict['_complex'].append(key) except KeyError: nmldict['_complex'] = [key] elif isinstance(value, list): complex_list = False for idx, entry in enumerate(value): if isinstance(entry, Namelist): nmldict[key][idx] = entry.todict(complex_tuple) elif isinstance(entry, complex) and complex_tuple: nmldict[key][idx] = [entry.real, entry.imag] complex_list = True if complex_list: try: nmldict['_complex'].append(key) except KeyError: nmldict['_complex'] = [key] # Append the start index if present if self.start_index: nmldict['_start_index'] = self.start_index return nmldict
python
{ "resource": "" }
q9237
Namelist._f90repr
train
def _f90repr(self, value): """Convert primitive Python types to equivalent Fortran strings.""" if isinstance(value, bool): return self._f90bool(value) elif isinstance(value, numbers.Integral): return self._f90int(value) elif isinstance(value, numbers.Real): return self._f90float(value) elif isinstance(value, numbers.Complex): return self._f90complex(value) elif isinstance(value, basestring): return self._f90str(value) elif value is None: return '' else: raise ValueError('Type {0} of {1} cannot be converted to a Fortran' ' type.'.format(type(value), value))
python
{ "resource": "" }
q9238
Namelist._f90complex
train
def _f90complex(self, value): """Return a Fortran 90 representation of a complex number.""" return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag, fmt=self.float_format)
python
{ "resource": "" }
q9239
Namelist._f90str
train
def _f90str(self, value): """Return a Fortran 90 representation of a string.""" # Replace Python quote escape sequence with Fortran result = repr(str(value)).replace("\\'", "''").replace('\\"', '""') # Un-escape the Python backslash escape sequence result = result.replace('\\\\', '\\') return result
python
{ "resource": "" }
q9240
Cursor.extract_data
train
def extract_data(self, page): """Extract the AppNexus object or list of objects from the response""" response_keys = set(page.keys()) uncommon_keys = response_keys - self.common_keys for possible_data_key in uncommon_keys: element = page[possible_data_key] if isinstance(element, dict): return [self.representation(self.client, self.service_name, element)] if isinstance(element, list): return [self.representation(self.client, self.service_name, x) for x in element]
python
{ "resource": "" }
q9241
Cursor.first
train
def first(self): """Extract the first AppNexus object present in the response""" page = self.get_page(num_elements=1) data = self.extract_data(page) if data: return data[0]
python
{ "resource": "" }
q9242
Cursor.size
train
def size(self): """Return the number of elements of the cursor with skip and limit""" initial_count = self.count() count_with_skip = max(0, initial_count - self._skip) size = min(count_with_skip, self._limit) return size
python
{ "resource": "" }
q9243
sumMerge
train
def sumMerge(dict1, dict2): """ Adds two dictionaries together, and merges into the first, dict1. Returns first dict. """ for key in dict2: dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key])) return dict1
python
{ "resource": "" }
q9244
loadNetworkFromFile
train
def loadNetworkFromFile(filename, mode = 'pickle'): """ Deprecated. Use loadNetwork instead. """ if mode == 'pickle': import pickle fp = open(filename) network = pickle.load(fp) fp.close() return network elif mode in ['plain', 'conx']: fp = open(filename, "r") line = fp.readline() network = None while line: if line.startswith("layer,"): # layer, name, size temp, name, sizeStr = line.split(",") name = name.strip() size = int(sizeStr) network.addLayer(name, size) line = fp.readline() weights = [float(f) for f in line.split()] for i in range(network[name].size): network[name].weight[i] = weights[i] elif line.startswith("connection,"): # connection, fromLayer, toLayer temp, nameFrom, nameTo = line.split(",") nameFrom, nameTo = nameFrom.strip(), nameTo.strip() network.connect(nameFrom, nameTo) for i in range(network[nameFrom].size): line = fp.readline() weights = [float(f) for f in line.split()] for j in range(network[nameTo].size): network[nameFrom, nameTo].weight[i][j] = weights[j] elif line.startswith("parameter,"): temp, exp = line.split(",") exec(exp) # network is the neural network object elif line.startswith("network,"): temp, netType = line.split(",") netType = netType.strip().lower() if netType == "cascornetwork": from pyrobot.brain.cascor import CascorNetwork network = CascorNetwork() elif netType == "network": network = Network() elif netType == "srn": network = SRN() else: raise AttributeError("unknown network type: '%s'" % netType) line = fp.readline() return network
python
{ "resource": "" }
q9245
randomArray2
train
def randomArray2(size, bound): """ Returns an array initialized to random values between -bound and bound distributed in a gaussian probability distribution more appropriate for a Tanh activation function. """ if type(size) == type(1): size = (size,) temp = Numeric.array( ndim(*size), thunk=lambda: random.gauss(0, 1)) * (2.0 * bound) return temp - bound
python
{ "resource": "" }
q9246
randomArray
train
def randomArray(size, bound): """ Returns an array initialized to random values between -max and max. """ if type(size) == type(1): size = (size,) temp = Numeric.array( ndim(*size) ) * (2.0 * bound) return temp - bound
python
{ "resource": "" }
q9247
writeArray
train
def writeArray(fp, a, delim = " ", nl = 1): """ Writes a sequence a of floats to file pointed to by file pointer. """ for i in a: fp.write("%f%s" % (i, delim)) if nl: fp.write("\n")
python
{ "resource": "" }
q9248
Layer.RMSError
train
def RMSError(self): """ Returns Root Mean Squared Error for this layer's pattern. """ tss = self.TSSError() return math.sqrt(tss / self.size)
python
{ "resource": "" }
q9249
Layer.getCorrect
train
def getCorrect(self, tolerance): """ Returns the number of nodes within tolerance of the target. """ return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance)
python
{ "resource": "" }
q9250
Layer.setLog
train
def setLog(self, fileName, writeName=False): """ Opens a log file with name fileName. """ self.log = 1 self.logFile = fileName self._logPtr = open(fileName, "w") if writeName: self._namePtr = open(fileName + ".name", "w")
python
{ "resource": "" }
q9251
Layer.closeLog
train
def closeLog(self): """ Closes the log file. """ self._logPtr.close() if self._namePtr: self._namePtr.close() self.log = 0
python
{ "resource": "" }
q9252
Layer.writeLog
train
def writeLog(self, network): """ Writes to the log file. """ if self.log: writeArray(self._logPtr, self.activation) if self._namePtr: self._namePtr.write(network.getWord(self.activation)) self._namePtr.write("\n")
python
{ "resource": "" }
q9253
Layer.toString
train
def toString(self): """ Returns a string representation of Layer instance. """ string = "Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % ( self.name, self.kind, self.size, self.active, self.frozen) if (self.type == 'Output'): string += toStringArray('Target ', self.target, self.displayWidth) string += toStringArray('Activation', self.activation, self.displayWidth) if (self.type != 'Input' and self._verbosity > 1): string += toStringArray('Error ', self.error, self.displayWidth) if (self._verbosity > 4 and self.type != 'Input'): string += toStringArray('weight ', self.weight, self.displayWidth) string += toStringArray('dweight ', self.dweight, self.displayWidth) string += toStringArray('delta ', self.delta, self.displayWidth) string += toStringArray('netinput ', self.netinput, self.displayWidth) string += toStringArray('wed ', self.wed, self.displayWidth) return string
python
{ "resource": "" }
q9254
Layer.display
train
def display(self): """ Displays the Layer instance to the screen. """ if self.displayWidth == 0: return print("=============================") print("Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)" % ( self.name, self.kind, self.size, self.active, self.frozen)) if (self.type == 'Output'): displayArray('Target ', self.target, self.displayWidth) displayArray('Activation', self.activation, self.displayWidth) if (self.type != 'Input' and self._verbosity > 1): displayArray('Error ', self.error, self.displayWidth) if (self._verbosity > 4 and self.type != 'Input'): print(" ", end=" "); displayArray('weight', self.weight) print(" ", end=" "); displayArray('dweight', self.dweight) print(" ", end=" "); displayArray('delta', self.delta) print(" ", end=" "); displayArray('netinput', self.netinput) print(" ", end=" "); displayArray('wed', self.wed)
python
{ "resource": "" }
q9255
Layer.copyActivations
train
def copyActivations(self, arr, reckless = 0): """ Copies activations from the argument array into layer activations. """ array = Numeric.array(arr) if not len(array) == self.size: raise LayerError('Mismatched activation size and layer size in call to copyActivations()', \ (len(array), self.size)) if self.verify and not self.activationSet == 0: if not reckless: raise LayerError('Activation flag not reset before call to copyActivations()', \ self.activationSet) self.activation = array self.activationSet = 1
python
{ "resource": "" }
q9256
Layer.copyTargets
train
def copyTargets(self, arr): """ Copies the targets of the argument array into the self.target attribute. """ array = Numeric.array(arr) if not len(array) == self.size: raise LayerError('Mismatched target size and layer size in call to copyTargets()', \ (len(array), self.size)) # Removed this because both propagate and backprop (via compute_error) set targets #if self.verify and not self.targetSet == 0: # if not self.warningIssued: # print 'Warning! Targets have already been set and no intervening backprop() was called.', \ # (self.name, self.targetSet) # print "(Warning will not be issued again)" # self.warningIssued = 1 if Numeric.add.reduce(array < self.minTarget) or Numeric.add.reduce(array > self.maxTarget): print(self.name, self.minTarget, self.maxTarget) raise LayerError('Targets for this layer are out of range.', (self.name, array)) self.target = array self.targetSet = 1
python
{ "resource": "" }
q9257
Connection.initialize
train
def initialize(self): """ Initializes self.dweight and self.wed to zero matrices. """ self.randomize() self.dweight = Numeric.zeros((self.fromLayer.size, \ self.toLayer.size), 'f') self.wed = Numeric.zeros((self.fromLayer.size, \ self.toLayer.size), 'f') self.wedLast = Numeric.zeros((self.fromLayer.size, \ self.toLayer.size), 'f')
python
{ "resource": "" }
q9258
Connection.display
train
def display(self): """ Displays connection information to the screen. """ if self.toLayer._verbosity > 4: print("wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'") for j in range(self.toLayer.size): print(self.toLayer.name, "[", j, "]", end=" ") print('') for i in range(self.fromLayer.size): print(self.fromLayer.name, "[", i, "]", ": ", end=" ") for j in range(self.toLayer.size): print(self.wed[i][j], end=" ") print('') print('') print("dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'") for j in range(self.toLayer.size): print(self.toLayer.name, "[", j, "]", end=" ") print('') for i in range(self.fromLayer.size): print(self.fromLayer.name, "[", i, "]", ": ", end=" ") for j in range(self.toLayer.size): print(self.dweight[i][j], end=" ") print('') print('') if self.toLayer._verbosity > 2: print("Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'") print(" ", end=" ") for j in range(self.toLayer.size): print(self.toLayer.name, "[", j, "]", end=" ") print('') for i in range(self.fromLayer.size): print(self.fromLayer.name, "[", i, "]", ": ", end=" ") for j in range(self.toLayer.size): print(self.weight[i][j], end=" ") print('') print('')
python
{ "resource": "" }
q9259
Connection.toString
train
def toString(self): """ Connection information as a string. """ string = "" if self.toLayer._verbosity > 4: string += "wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n" string += " " for j in range(self.toLayer.size): string += " " + self.toLayer.name + "[" + str(j) + "]" string += '\n' for i in range(self.fromLayer.size): string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": " for j in range(self.toLayer.size): string += " " + str(self.wed[i][j]) string += '\n' string += '\n' string += "dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n" string += " " for j in range(self.toLayer.size): string += " " + self.toLayer.name+ "["+ str(j)+ "]" string += '\n' for i in range(self.fromLayer.size): string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": " for j in range(self.toLayer.size): string += " " + str(self.dweight[i][j]) string += '\n' string += '\n' if self.toLayer._verbosity > 2: string += "Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n" string += " " for j in range(self.toLayer.size): string += " " + self.toLayer.name+ "["+ str(j)+ "]" string += '\n' for i in range(self.fromLayer.size): string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": " for j in range(self.toLayer.size): string += " " + str(self.weight[i][j]) string += '\n' string += '\n' return string
python
{ "resource": "" }
q9260
Network.getLayerIndex
train
def getLayerIndex(self, layer): """ Given a reference to a layer, returns the index of that layer in self.layers. """ for i in range(len(self.layers)): if layer == self.layers[i]: # shallow cmp return i return -1 # not in list
python
{ "resource": "" }
q9261
Network.isConnected
train
def isConnected(self, fromName, toName): """ Are these two layers connected this way? """ for c in self.connections: if (c.fromLayer.name == fromName and c.toLayer.name == toName): return 1 return 0
python
{ "resource": "" }
q9262
Network.connect
train
def connect(self, *names): """ Connects a list of names, one to the next. """ fromName, toName, rest = names[0], names[1], names[2:] self.connectAt(fromName, toName) if len(rest) != 0: self.connect(toName, *rest)
python
{ "resource": "" }
q9263
Network.connectAt
train
def connectAt(self, fromName, toName, position = None): """ Connects two layers by instantiating an instance of Connection class. Allows a position number, indicating the ordering of the connection. """ fromLayer = self.getLayer(fromName) toLayer = self.getLayer(toName) if self.getLayerIndex(fromLayer) >= self.getLayerIndex(toLayer): raise NetworkError('Layers out of order.', (fromLayer.name, toLayer.name)) if (fromLayer.type == 'Output'): fromLayer.type = 'Hidden' fromLayer.patternReport = 0 # automatically turned off for hidden layers if fromLayer.kind == 'Output': fromLayer.kind = 'Hidden' elif (fromLayer.type == 'Undefined'): fromLayer.type = 'Input' fromLayer.patternReport = 0 # automatically turned off for input layers if fromLayer.kind == 'Undefined': fromLayer.kind = 'Input' if (toLayer.type == 'Input'): raise NetworkError('Connections out of order', (fromLayer.name, toLayer.name)) elif (toLayer.type == 'Undefined'): toLayer.type = 'Output' toLayer.patternReport = 1 # automatically turned on for output layers if toLayer.kind == 'Undefined': toLayer.kind = 'Output' if position == None: self.connections.append(Connection(fromLayer, toLayer)) else: self.connections.insert(position, Connection(fromLayer, toLayer))
python
{ "resource": "" }
q9264
Network.addLayerNode
train
def addLayerNode(self, layerName, bias = None, weights = {}): """ Adds a new node to a layer, and puts in new weights. Adds node on the end. Weights will be random, unless specified. bias = the new node's bias weight weights = dict of {connectedLayerName: [weights], ...} Example: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]}) """ self.changeLayerSize(layerName, self[layerName].size + 1) if bias != None: self[layerName].weight[-1] = bias for name in list(weights.keys()): for c in self.connections: if c.fromLayer.name == name and c.toLayer.name == layerName: for i in range(self[name].size): self[name, layerName].weight[i][-1] = weights[name][i] elif c.toLayer.name == name and c.fromLayer.name == layerName: for j in range(self[name].size): self[layerName, name].weight[-1][j] = weights[name][j]
python
{ "resource": "" }
q9265
Network.changeLayerSize
train
def changeLayerSize(self, layername, newsize): """ Changes layer size. Newsize must be greater than zero. """ # for all connection from to this layer, change matrix: if self.sharedWeights: raise AttributeError("shared weights broken") for connection in self.connections: if connection.fromLayer.name == layername: connection.changeSize( newsize, connection.toLayer.size ) if connection.toLayer.name == layername: connection.changeSize( connection.fromLayer.size, newsize ) # then, change the actual layer size: self.getLayer(layername).changeSize(newsize)
python
{ "resource": "" }
q9266
Network.getActivationsDict
train
def getActivationsDict(self, nameList): """ Returns a dictionary of layer names that map to a list of activations. """ retval = {} for name in nameList: retval[name] = self.layersByName[name].getActivationsList() return retval
python
{ "resource": "" }
q9267
Network.setSeed
train
def setSeed(self, value): """ Sets the seed to value. """ self.seed = value random.seed(self.seed) if self.verbosity >= 0: print("Conx using seed:", self.seed)
python
{ "resource": "" }
q9268
Network.setVerbosity
train
def setVerbosity(self, value): """ Sets network self._verbosity and each layer._verbosity to value. """ self._verbosity = value for layer in self.layers: layer._verbosity = value
python
{ "resource": "" }
q9269
Network.setOrderedInputs
train
def setOrderedInputs(self, value): """ Sets self.orderedInputs to value. Specifies if inputs should be ordered and if so orders the inputs. """ self.orderedInputs = value if self.orderedInputs: self.loadOrder = [0] * len(self.inputs) for i in range(len(self.inputs)): self.loadOrder[i] = i
python
{ "resource": "" }
q9270
Network.verifyArguments
train
def verifyArguments(self, arg): """ Verifies that arguments to setInputs and setTargets are appropriately formatted. """ for l in arg: if not type(l) == list and \ not type(l) == type(Numeric.array([0.0])) and \ not type(l) == tuple and \ not type(l) == dict: return 0 if type(l) == dict: for i in l: if not type(i) == str and i not in list(self.layers.keys()): return 0 else: for i in l: if not type(i) == float and not type(i) == int: return 0 return 1
python
{ "resource": "" }
q9271
Network.setTargets
train
def setTargets(self, targets): """ Sets the targets. """ if not self.verifyArguments(targets) and not self.patterned: raise NetworkError('setTargets() requires [[...],[...],...] or [{"layerName": [...]}, ...].', targets) self.targets = targets
python
{ "resource": "" }
q9272
Network.copyActivations
train
def copyActivations(self, layer, vec, start = 0): """ Copies activations in vec to the specified layer, replacing patterns if necessary. """ vector = self.replacePatterns(vec, layer.name) if self.verbosity > 4: print("Copying Activations: ", vector[start:start+layer.size]) layer.copyActivations(vector[start:start+layer.size])
python
{ "resource": "" }
q9273
Network.getDataMap
train
def getDataMap(self, intype, pos, name, offset = 0): """ Hook defined to lookup a name, and get it from a vector. Can be overloaded to get it from somewhere else. """ if intype == "input": vector = self.inputs elif intype == "target": vector = self.targets else: raise AttributeError("invalid map type '%s'" % intype) return vector[pos][offset:offset+self[name].size]
python
{ "resource": "" }
q9274
Network.getData
train
def getData(self, pos): """ Returns dictionary with input and target given pos. """ retval = {} if pos >= len(self.inputs): raise IndexError('getData() pattern beyond range.', pos) if self.verbosity >= 1: print("Getting input", pos, "...") if len(self.inputMap) == 0: if type(self.inputs[pos]) == dict: # allow inputs to be a dict retval.update(self.inputs[pos]) else: retval[self.layers[0].name] = self.inputs[pos] else: # mapInput set manually for vals in self.inputMap: (name, offset) = vals retval[name] = self.getDataMap("input", pos, name, offset) if self.verbosity > 1: print("Loading target", pos, "...") if len(self.targets) == 0: pass # ok, no targets elif len(self.targetMap) == 0: if type(self.targets[pos]) == dict: # allow targets to be a dict retval.update(self.targets[pos]) else: retval[self.layers[len(self.layers)-1].name] = self.targets[pos] else: # set manually for vals in self.targetMap: (name, offset) = vals retval[name] = self.getDataMap("target", pos, name, offset) return retval
python
{ "resource": "" }
q9275
Network.RMSError
train
def RMSError(self): """ Returns Root Mean Squared Error for all output layers in this network. """ tss = 0.0 size = 0 for layer in self.layers: if layer.type == 'Output': tss += layer.TSSError() size += layer.size return math.sqrt( tss / size )
python
{ "resource": "" }
q9276
Network.numConnects
train
def numConnects(self, layerName): """ Number of incoming weights, including bias. Assumes fully connected. """ count = 0 if self[layerName].active: count += 1 # 1 = bias for connection in self.connections: if connection.active and connection.fromLayer.active and connection.toLayer.name == layerName: count += connection.fromLayer.size return count
python
{ "resource": "" }
q9277
Network.prop_from
train
def prop_from(self, startLayers): """ Start propagation from the layers in the list startLayers. Make sure startLayers are initialized with the desired activations. NO ERROR CHECKING. """ if self.verbosity > 2: print("Partially propagating network:") # find all the layers involved in the propagation propagateLayers = [] # propagateLayers should not include startLayers (no loops) for startLayer in startLayers: for layer in self.layers: if self.path(startLayer, layer): propagateLayers.append(layer) for layer in propagateLayers: if layer.active: layer.netinput = (layer.weight).copy() for layer in propagateLayers: if layer.active: for connection in self.connections: if connection.active and connection.toLayer.name == layer.name: connection.toLayer.netinput = connection.toLayer.netinput + \ Numeric.matrixmultiply(connection.fromLayer.activation,\ connection.weight) # propagate! if layer.type != 'Input': layer.activation = self.activationFunction(layer.netinput) for layer in propagateLayers: if layer.log and layer.active: layer.writeLog(self)
python
{ "resource": "" }
q9278
Network.propagateTo
train
def propagateTo(self, toLayer, **args): """ Propagates activation to a layer. Optionally, takes input layer names as keywords, and their associated activations. Returns the toLayer's activation. Examples: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> len(net.propagateTo("output")) 1 >>> len(net.propagateTo("hidden")) 5 >>> len(net.propagateTo("hidden", input = [0, 0])) 5 """ for layerName in args: self[layerName].activationSet = 0 # force it to be ok self[layerName].copyActivations(args[layerName]) # init toLayer: self[toLayer].netinput = (self[toLayer].weight).copy() # for each connection, in order: for connection in self.connections: if connection.active and connection.toLayer.name == toLayer and connection.fromLayer.active: connection.toLayer.netinput = connection.toLayer.netinput + \ Numeric.matrixmultiply(connection.fromLayer.activation,\ connection.weight) # propagate! if self[toLayer].type != 'Input': self[toLayer].activation = self.activationFunction(self[toLayer].netinput) return self[toLayer].activation.copy()
python
{ "resource": "" }
q9279
Network.activationFunctionASIG
train
def activationFunctionASIG(self, x): """ Determine the activation of a node based on that nodes net input. """ def act(v): if v < -15.0: return 0.0 elif v > 15.0: return 1.0 else: return 1.0 / (1.0 + Numeric.exp(-v)) return Numeric.array(list(map(act, x)), 'f')
python
{ "resource": "" }
q9280
Network.actDerivASIG
train
def actDerivASIG(self, x): """ Only works on scalars. """ def act(v): if v < -15.0: return 0.0 elif v > 15.0: return 1.0 else: return 1.0 / (1.0 + Numeric.exp(-v)) return (act(x) * (1.0 - act(x))) + self.sigmoid_prime_offset
python
{ "resource": "" }
q9281
Network.useTanhActivationFunction
train
def useTanhActivationFunction(self): """ Change the network to use the hyperbolic tangent activation function for all layers. Must be called after all layers have been added. """ self.activationFunction = self.activationFunctionTANH self.ACTPRIME = self.ACTPRIMETANH self.actDeriv = self.actDerivTANH for layer in self: layer.minTarget, layer.minActivation = -1.7159, -1.7159 layer.maxTarget, layer.maxActivation = 1.7159, 1.7159
python
{ "resource": "" }
q9282
Network.useFahlmanActivationFunction
train
def useFahlmanActivationFunction(self): """ Change the network to use Fahlman's default activation function for all layers. Must be called after all layers have been added. """ self.activationFunction = self.activationFunctionFahlman self.ACTPRIME = self.ACTPRIME_Fahlman self.actDeriv = self.actDerivFahlman for layer in self: layer.minTarget, layer.minActivation = -0.5, -0.5 layer.maxTarget, layer.maxActivation = 0.5, 0.5
python
{ "resource": "" }
q9283
Network.backprop
train
def backprop(self, **args): """ Computes error and wed for back propagation of error. """ retval = self.compute_error(**args) if self.learning: self.compute_wed() return retval
python
{ "resource": "" }
q9284
Network.errorFunction
train
def errorFunction(self, t, a): """ Using a hyperbolic arctan on the error slightly exaggerates the actual error non-linearly. Return t - a to just use the difference. t - target vector a - activation vector """ def difference(v): if not self.hyperbolicError: #if -0.1 < v < 0.1: return 0.0 #else: return v else: if v < -0.9999999: return -17.0 elif v > 0.9999999: return 17.0 else: return math.log( (1.0 + v) / (1.0 - v) ) #else: return Numeric.arctanh(v) # half that above return list(map(difference, t - a))
python
{ "resource": "" }
q9285
Network.ce_init
train
def ce_init(self): """ Initializes error computation. Calculates error for output layers and initializes hidden layer error to zero. """ retval = 0.0; correct = 0; totalCount = 0 for layer in self.layers: if layer.active: if layer.type == 'Output': layer.error = self.errorFunction(layer.target, layer.activation) totalCount += layer.size retval += Numeric.add.reduce((layer.target - layer.activation) ** 2) correct += Numeric.add.reduce(Numeric.fabs(layer.target - layer.activation) < self.tolerance) elif (layer.type == 'Hidden'): for i in range(layer.size): # do it this way so you don't break reference links layer.error[i] = 0.0 return (retval, correct, totalCount)
python
{ "resource": "" }
q9286
Network.compute_error
train
def compute_error(self, **args): """ Computes error for all non-output layers backwards through all projections. """ for key in args: layer = self.getLayer(key) if layer.kind == 'Output': self.copyTargets(layer, args[key]) self.verifyTargets() # better have targets set error, correct, total = self.ce_init() pcorrect = {} # go backwards through each proj but don't redo output errors! if len(self.cacheConnections) != 0: changeConnections = self.cacheConnections else: changeConnections = self.connections for connect in reverse(changeConnections): if connect.active and connect.toLayer.active and connect.fromLayer.active: connect.toLayer.delta = (connect.toLayer.error * (self.ACTPRIME(connect.toLayer.activation))) connect.fromLayer.error = connect.fromLayer.error + \ Numeric.matrixmultiply(connect.weight, connect.toLayer.delta) # now all errors are set on all layers! pcorrect = self.getLayerErrors() return (error, correct, total, pcorrect)
python
{ "resource": "" }
q9287
Network.compute_wed
train
def compute_wed(self): """ Computes weight error derivative for all connections in self.connections starting with the last connection. """ if len(self.cacheConnections) != 0: changeConnections = self.cacheConnections else: changeConnections = self.connections for connect in reverse(changeConnections): if connect.active and connect.fromLayer.active and connect.toLayer.active: connect.wed = connect.wed + Numeric.outerproduct(connect.fromLayer.activation, connect.toLayer.delta) if len(self.cacheLayers) != 0: changeLayers = self.cacheLayers else: changeLayers = self.layers for layer in changeLayers: if layer.active: layer.wed = layer.wed + layer.delta
python
{ "resource": "" }
q9288
Network.toString
train
def toString(self): """ Returns the network layers as a string. """ output = "" for layer in reverse(self.layers): output += layer.toString() return output
python
{ "resource": "" }
q9289
Network.arrayify
train
def arrayify(self): """ Returns an array of node bias values and connection weights for use in a GA. """ gene = [] for layer in self.layers: if layer.type != 'Input': for i in range(layer.size): gene.append( layer.weight[i] ) for connection in self.connections: for i in range(connection.fromLayer.size): for j in range(connection.toLayer.size): gene.append( connection.weight[i][j] ) return gene
python
{ "resource": "" }
q9290
Network.unArrayify
train
def unArrayify(self, gene): """ Copies gene bias values and weights to network bias values and weights. """ g = 0 # if gene is too small an IndexError will be thrown for layer in self.layers: if layer.type != 'Input': for i in range(layer.size): layer.weight[i] = float( gene[g]) g += 1 for connection in self.connections: for i in range(connection.fromLayer.size): for j in range(connection.toLayer.size): connection.weight[i][j] = gene[g] g += 1 # if gene is too long we may have a problem if len(gene) > g: raise IndexError('Argument to unArrayify is too long.', len(gene))
python
{ "resource": "" }
q9291
Network.saveWeightsToFile
train
def saveWeightsToFile(self, filename, mode='pickle', counter=None): """ Deprecated. Use saveWeights instead. """ self.saveWeights(filename, mode, counter)
python
{ "resource": "" }
q9292
Network.saveWeights
train
def saveWeights(self, filename, mode='pickle', counter=None): """ Saves weights to file in pickle, plain, or tlearn mode. """ # modes: pickle/conx, plain, tlearn if "?" in filename: # replace ? pattern in filename with epoch number import re char = "?" match = re.search(re.escape(char) + "+", filename) if match: num = self.epoch if counter != None: num = counter elif self.totalEpoch != 0: # use a total epoch, if one: num = self.totalEpoch fstring = "%%0%dd" % len(match.group()) filename = filename[:match.start()] + \ fstring % self.epoch + \ filename[match.end():] self.lastAutoSaveWeightsFilename = filename if mode == 'pickle': mylist = self.arrayify() import pickle fp = open(filename, "w") pickle.dump(mylist, fp) fp.close() elif mode in ['plain', 'conx']: fp = open(filename, "w") fp.write("# Biases\n") for layer in self.layers: if layer.type != 'Input': fp.write("# Layer: " + layer.name + "\n") for i in range(layer.size): fp.write("%f " % layer.weight[i] ) fp.write("\n") fp.write("# Weights\n") for connection in self.connections: fp.write("# from " + connection.fromLayer.name + " to " + connection.toLayer.name + "\n") for i in range(connection.fromLayer.size): for j in range(connection.toLayer.size): fp.write("%f " % connection.weight[i][j] ) fp.write("\n") fp.close() elif mode == 'tlearn': fp = open(filename, "w") fp.write("NETWORK CONFIGURED BY TLEARN\n") fp.write("# weights after %d sweeps\n" % self.epoch) fp.write("# WEIGHTS\n") cnt = 1 for lto in self.layers: if lto.type != 'Input': for j in range(lto.size): fp.write("# TO NODE %d\n" % cnt) fp.write("%f\n" % lto.weight[j] ) for lfrom in self.layers: try: connection = self.getConnection(lfrom.name,lto.name) for i in range(connection.fromLayer.size): fp.write("%f\n" % connection.weight[i][j]) except NetworkError: # should return an exception here for i in range(lfrom.size): fp.write("%f\n" % 0.0) cnt += 1 fp.close() else: raise ValueError('Unknown mode in saveWeights().', mode)
python
{ "resource": "" }
q9293
Network.saveNetwork
train
def saveNetwork(self, filename, makeWrapper = 1, mode = "pickle", counter = None): """ Saves network to file using pickle. """ self.saveNetworkToFile(filename, makeWrapper, mode, counter)
python
{ "resource": "" }
q9294
Network.loadInputPatterns
train
def loadInputPatterns(self, filename, cols = None, everyNrows = 1, delim = ' ', checkEven = 1): """ Loads inputs as patterns from file. """ self.loadInputPatternsFromFile(filename, cols, everyNrows, delim, checkEven)
python
{ "resource": "" }
q9295
Network.loadInputs
train
def loadInputs(self, filename, cols = None, everyNrows = 1, delim = ' ', checkEven = 1): """ Loads inputs from file. Patterning is lost. """ self.loadInputsFromFile(filename, cols, everyNrows, delim, checkEven)
python
{ "resource": "" }
q9296
Network.loadTargetPatterns
train
def loadTargetPatterns(self, filename, cols = None, everyNrows = 1, delim = ' ', checkEven = 1): """ Loads targets as patterns from file. """ self.loadTargetPatternssFromFile(filename, cols, everyNrows, delim, checkEven)
python
{ "resource": "" }
q9297
Network.replacePatterns
train
def replacePatterns(self, vector, layer = None): """ Replaces patterned inputs or targets with activation vectors. """ if not self.patterned: return vector if type(vector) == str: return self.replacePatterns(self.lookupPattern(vector, layer), layer) elif type(vector) != list: return vector # should be a vector if we made it here vec = [] for v in vector: if type(v) == str: retval = self.replacePatterns(self.lookupPattern(v, layer), layer) if type(retval) == list: vec.extend( retval ) else: vec.append( retval ) else: vec.append( v ) return vec
python
{ "resource": "" }
q9298
Network.patternVector
train
def patternVector(self, vector): """ Replaces vector with patterns. Used for loading inputs or targets from a file and still preserving patterns. """ if not self.patterned: return vector if type(vector) == int: if self.getWord(vector) != '': return self.getWord(vector) else: return vector elif type(vector) == float: if self.getWord(vector) != '': return self.getWord(vector) else: return vector elif type(vector) == str: return vector elif type(vector) == list: if self.getWord(vector) != '': return self.getWord(vector) # should be a list vec = [] for v in vector: if self.getWord(v) != '': retval = self.getWord(v) vec.append( retval ) else: retval = self.patternVector(v) vec.append( retval ) return vec
python
{ "resource": "" }
q9299
Network.getPattern
train
def getPattern(self, word): """ Returns the pattern with key word. Example: net.getPattern("tom") => [0, 0, 0, 1] """ if word in self.patterns: return self.patterns[word] else: raise ValueError('Unknown pattern in getPattern().', word)
python
{ "resource": "" }