id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,700 | dividertray.py | florianfesti_boxes/boxes/generators/dividertray.py | # Copyright (C) 2013-2014 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import math
from functools import partial
from boxes import Boxes, boolarg, edges, lids
class NotchSettings(edges.Settings):
"""Settings for Notches on the Dividers"""
absolute_params = {
"upper_radius": 1,
"lower_radius": 8,
"depth": 15,
}
class SlotSettings(edges.Settings):
"""Settings for Divider Slots
Values:
* absolute
* depth : 20 : depth of the slot in mm
* angle : 0 : angle at which slots are generated, in degrees. 0° is vertical.
* radius : 2 : radius of the slot entrance in mm
* extra_slack : 0.2 : extra slack (in addition to thickness and kerf) to help insert dividers in mm"""
absolute_params = {
"depth": 20,
"angle": 0,
"radius": 2,
"extra_slack": 0.2,
}
class DividerSettings(edges.Settings):
"""Settings for Dividers
Values:
* absolute_params
* bottom_margin : 0 : margin between box's bottom and divider's in mm
* relative (in multiples of thickness)
* play : 0.05 : play to avoid them clamping onto the walls (in multiples of thickness)
"""
absolute_params = {
"bottom_margin": 0,
}
relative_params = {
"play": 0.05,
}
class DividerTray(Boxes):
"""Divider tray - rows and dividers"""
description = """
Adding '0:' at the start of the sy parameter adds a slot at the very back. Adding ':0' at the end of sy adds a slot meeting the bottom at the very front. This is especially useful if slot angle is set above zero.
There are 4 different sets of dividers rendered:
* With asymmetric tabs so the tabs fit on top of each other
* With tabs of half wall thickness that can go side by side
* With tabs of a full wall thickness
* One single divider spanning across all columns
You will likely need to cut each of the dividers you want multiple times.
"""
ui_group = "Tray"
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.addSettingsArgs(edges.HandleEdgeSettings)
self.addSettingsArgs(lids.LidSettings)
self.buildArgParser("sx", "sy", "h", "outside")
self.addSettingsArgs(SlotSettings)
self.addSettingsArgs(NotchSettings)
self.addSettingsArgs(DividerSettings)
self.argparser.add_argument(
"--notches_in_wall",
type=boolarg,
default=True,
help="generate the same notches on the walls that are on the dividers",
)
self.argparser.add_argument(
"--left_wall",
type=boolarg,
default=True,
help="generate wall on the left side",
)
self.argparser.add_argument(
"--right_wall",
type=boolarg,
default=True,
help="generate wall on the right side",
)
self.argparser.add_argument(
"--bottom", type=boolarg, default=False, help="generate wall on the bottom",
)
self.argparser.add_argument(
"--handle", type=boolarg, default=False, help="add handle to the bottom",
)
def render(self):
side_walls_number = len(self.sx) - 1 + sum([self.left_wall, self.right_wall])
if side_walls_number == 0:
raise ValueError("You need at least one side wall to generate this tray")
# We need to adjust height before slot generation
if self.outside:
if self.bottom:
self.h -= self.thickness
else:
# If the parameter 'h' is the inner height of the content itself,
# then the actual tray height needs to be adjusted with the angle
self.h = self.h * math.cos(math.radians(self.Slot_angle))
slot_descriptions = SlotDescriptionsGenerator().generate_all_same_angles(
self.sy,
self.thickness,
self.Slot_extra_slack,
self.Slot_depth,
self.h,
self.Slot_angle,
self.Slot_radius,
)
# If measures are outside, we need to readjust slots afterwards
if self.outside:
self.sx = self.adjustSize(self.sx, self.left_wall, self.right_wall)
side_wall_target_length = sum(self.sy) - 2 * self.thickness
slot_descriptions.adjust_to_target_length(side_wall_target_length)
self.ctx.save()
# Facing walls (outer) with finger holes to support side walls
facing_wall_length = sum(self.sx) + self.thickness * (len(self.sx) - 1)
side_edge = lambda with_wall: "F" if with_wall else "e"
bottom_edge = lambda with_wall, with_handle: ("f" if with_handle else "F") if with_wall else "e"
upper_edge = (
DividerNotchesEdge(
self,
list(reversed(self.sx)),
)
if self.notches_in_wall
else "e"
)
for _ in range(2):
self.rectangularWall(
facing_wall_length,
self.h,
[
bottom_edge(self.bottom, _ and self.handle),
side_edge(self.right_wall),
upper_edge,
side_edge(self.left_wall),
],
callback=[partial(self.generate_finger_holes, self.h)],
move="up", label = "Front" if _ else "Back",
)
# Side walls (outer & inner) with slots to support dividers
side_wall_length = slot_descriptions.total_length()
for _ in range(side_walls_number):
if _ < side_walls_number - (len(self.sx) - 1):
be = "F" if self.bottom else "e"
else:
be = "f" if self.bottom else "e"
se = DividerSlotsEdge(self, slot_descriptions.descriptions)
self.rectangularWall(
side_wall_length, self.h, [be, "f", se, "f"], move="up", label="Sidepiece " + str(_ + 1)
)
self.lid(facing_wall_length, side_wall_length)
# Switch to right side of the file
self.ctx.restore()
self.rectangularWall(
max(facing_wall_length, side_wall_length), self.h, "ffff", move="right only", label="invisible"
)
# Bottom piece.
if self.bottom:
self.rectangularWall(
facing_wall_length,
side_wall_length,
[
"f",
"f" if self.right_wall else "e",
"Y" if self.handle else "f",
"f" if self.left_wall else "e",
],
callback=[partial(self.generate_finger_holes, side_wall_length)],
move="up", label="Bottom",
)
# Dividers
divider_height = (
# h, with angle adjustment
self.h / math.cos(math.radians(self.Slot_angle))
# removing what exceeds in the width of the divider
- self.thickness * math.tan(math.radians(self.Slot_angle))
# with margin
- self.Divider_bottom_margin
)
self.generate_divider(
self.sx, divider_height, "up",
first_tab_width=self.thickness if self.left_wall else 0,
second_tab_width=self.thickness if self.right_wall else 0
)
for tabs, asymmetric_tabs in [(self.thickness, None),
(self.thickness / 2, None),
(self.thickness, 0.5),]:
with self.saved_context():
for i, length in enumerate(self.sx):
self.generate_divider(
[length],
divider_height,
"right",
first_tab_width=tabs if self.left_wall or i>0 else 0,
second_tab_width=tabs if self.right_wall or i<(len(self.sx) - 1) else 0,
asymmetric_tabs=asymmetric_tabs,
)
if asymmetric_tabs:
self.moveTo(-tabs, self.spacing)
self.generate_divider(self.sx, divider_height, "up only")
if self.debug:
debug_info = ["Debug"]
debug_info.append(f"Slot_edge_outer_length:{slot_descriptions.total_length() + 2 * self.thickness:.2f}")
debug_info.append(
"Slot_edge_inner_lengths:{}".format(
str.join(
"|",
[
f"{e.useful_length():.2f}"
for e in slot_descriptions.get_straight_edges()
],
)
)
)
debug_info.append(f"Face_edge_outer_length:{facing_wall_length + self.thickness * sum([self.left_wall, self.right_wall]):.2f}")
debug_info.append("Face_edge_inner_lengths:{}".format(str.join("|", [f"{e:.2f}" for e in self.sx])))
debug_info.append(f"Tray_height:{self.h:.2f}")
debug_info.append(f"Content_height:{self.h / math.cos(math.radians(self.Slot_angle)):.2f}")
self.text(str.join("\n", debug_info), x=5, y=5, align="bottom left")
def generate_finger_holes(self, length):
posx = -0.5 * self.thickness
for x in self.sx[:-1]:
posx += x + self.thickness
self.fingerHolesAt(posx, 0, length)
def generate_divider(
self, widths, height, move,
first_tab_width=0, second_tab_width=0,
asymmetric_tabs=None):
total_width = sum(widths) + (len(widths)-1) * self.thickness + first_tab_width + second_tab_width
if self.move(total_width, height, move, True):
return
play = self.Divider_play
left_tab_height = right_tab_height = self.Slot_depth
if asymmetric_tabs:
left_tab_height = left_tab_height * asymmetric_tabs - play
right_tab_height = right_tab_height * (1 - asymmetric_tabs)
# Upper: first tab width
if asymmetric_tabs:
self.moveTo(first_tab_width - play)
else:
self.edge(first_tab_width - play)
# Upper edge with a finger notch
for nr, width in enumerate(widths):
if nr > 0:
self.edge(self.thickness)
DividerNotchesEdge(
self,
[width],
)(width)
self.polyline(
# Upper: second tab width if needed
second_tab_width - play,
# First side, with tab depth only if there is 2 walls
90,
left_tab_height,
90,
second_tab_width,
-90,
height - left_tab_height,
90,
)
# Lower edge
for width in reversed(widths[1:]):
self.polyline(
width - 2 * play,
90,
height - self.Slot_depth,
-90,
self.thickness + 2 * play,
-90,
height - self.Slot_depth,
90,
)
self.polyline(
# Second side tab
widths[0] - 2 * play,
90,
height - self.Slot_depth,
-90,
first_tab_width,
90,
right_tab_height,
90
)
if asymmetric_tabs:
self.polyline(
first_tab_width - play,
-90,
self.Slot_depth-right_tab_height,
90
)
# Move for next piece
self.move(total_width, height, move, label="Divider")
class SlottedEdgeDescriptions:
def __init__(self) -> None:
self.descriptions: list[str] = []
def add(self, description: str) -> None:
self.descriptions.append(description)
def get_straight_edges(self):
return [x for x in self.descriptions if isinstance(x, StraightEdgeDescription)]
def get_last_edge(self):
return self.descriptions[-1]
def adjust_to_target_length(self, target_length):
actual_length = sum([d.tracing_length() for d in self.descriptions])
compensation = actual_length - target_length
compensation_ratio = compensation / sum(
[d.asked_length for d in self.get_straight_edges()]
)
for edge in self.get_straight_edges():
edge.outside_ratio = 1 - compensation_ratio
def total_length(self):
return sum([x.tracing_length() for x in self.descriptions])
class StraightEdgeDescription:
def __init__(
self,
asked_length,
round_edge_compensation=0,
outside_ratio=1,
angle_compensation=0,
) -> None:
self.asked_length = asked_length
self.round_edge_compensation = round_edge_compensation
self.outside_ratio = outside_ratio
self.angle_compensation = angle_compensation
def __repr__(self) -> str:
return f"StraightEdgeDescription({self.asked_length}, round_edge_compensation={self.round_edge_compensation}, angle_compensation={self.angle_compensation}, outside_ratio={self.outside_ratio})"
def tracing_length(self):
"""
How much length should take tracing this straight edge
"""
return (
(self.asked_length * self.outside_ratio)
- self.round_edge_compensation
+ self.angle_compensation
)
def useful_length(self):
"""
Part of the length which might be used by the content of the tray
"""
return self.asked_length * self.outside_ratio
class Memoizer(dict):
def __init__(self, computation) -> None:
self.computation = computation
def __missing__(self, key):
res = self[key] = self.computation(key)
return res
class SlotDescription:
_div_by_cos_cache = Memoizer(lambda a: 1 / math.cos(math.radians(a)))
_tan_cache = Memoizer(lambda a: math.tan(math.radians(a)))
def __init__(
self, width, depth=20, angle=0, radius=0, start_radius=None, end_radius=None
) -> None:
self.depth = depth
self.width = width
self.start_radius = radius if start_radius is None else start_radius
self.end_radius = radius if end_radius is None else end_radius
self.angle = angle
def __repr__(self) -> str:
return f"SlotDescription({self.width}, depth={self.depth}, angle={self.angle}, start_radius={self.start_radius}, end_radius={self.end_radius})"
def _div_by_cos(self):
return SlotDescription._div_by_cos_cache[self.angle]
def _tan(self):
return SlotDescription._tan_cache[self.angle]
def angle_corrected_width(self):
"""
returns how much width is the slot when measured horizontally, since the angle makes it bigger.
It's the same as the slot entrance width when radius is 0°.
"""
return self.width * self._div_by_cos()
def round_edge_start_correction(self):
"""
returns by how much we need to stop tracing our straight lines at the start of the slot
in order to do a curve line instead
"""
return self.start_radius * (self._div_by_cos() - self._tan())
def round_edge_end_correction(self):
"""
returns by how much we need to stop tracing our straight lines at the end of the slot
in order to do a curve line instead
"""
return self.end_radius * (self._div_by_cos() + self._tan())
def _depth_angle_correction(self):
"""
The angle makes one side of the slot deeper than the other.
"""
extra_depth = self.width * self._tan()
return extra_depth
def corrected_start_depth(self):
"""
Returns the depth of the straight part of the slot starting side
"""
extra_depth = self._depth_angle_correction()
return self.depth + max(0, extra_depth) - self.round_edge_start_correction()
def corrected_end_depth(self):
"""
Returns the depth of the straight part of the slot ending side
"""
extra_depth = self._depth_angle_correction()
return self.depth + max(0, -extra_depth) - self.round_edge_end_correction()
def tracing_length(self):
"""
How much length this slot takes on an edge
"""
return (
self.round_edge_start_correction()
+ self.angle_corrected_width()
+ self.round_edge_end_correction()
)
class SlotDescriptionsGenerator:
def generate_all_same_angles(
self, sections, thickness, extra_slack, depth, height, angle, radius=2,
):
width = thickness + extra_slack
descriptions = SlottedEdgeDescriptions()
# Special case: if first slot start at 0, then radius is 0
first_correction = 0
current_section = 0
if sections[0] == 0:
slot = SlotDescription(
width, depth=depth, angle=angle, start_radius=0, end_radius=radius,
)
descriptions.add(slot)
first_correction = slot.round_edge_end_correction()
current_section += 1
first_length = sections[current_section]
current_section += 1
descriptions.add(
StraightEdgeDescription(
first_length, round_edge_compensation=first_correction
)
)
for l in sections[current_section:]:
slot = SlotDescription(width, depth=depth, angle=angle, radius=radius,)
# Fix previous edge length
previous_edge = descriptions.get_last_edge()
previous_edge.round_edge_compensation += slot.round_edge_start_correction()
# Add this slot
descriptions.add(slot)
# Add the straight edge after this slot
descriptions.add(
StraightEdgeDescription(l, slot.round_edge_end_correction())
)
# We need to add extra space for the divider (or the actual content)
# to slide all the way down to the bottom of the tray in spite of walls
end_length = height * math.tan(math.radians(angle))
descriptions.get_last_edge().angle_compensation += end_length
return descriptions
class DividerNotchesEdge(edges.BaseEdge):
"""Edge with multiple notches for easier access to dividers"""
description = "Edge with multiple notches for easier access to dividers"
def __init__(self, boxes, sx) -> None:
super().__init__(boxes, None)
self.sx = sx
def __call__(self, _, **kw):
first = True
for width in self.sx:
if first:
first = False
else:
self.edge(self.thickness)
self.edge_with_notch(width)
def edge_with_notch(self, width):
# width (with notch if possible)
upper_third = (
width - 2 * self.Notch_upper_radius - 2 * self.Notch_lower_radius
) / 3
if upper_third > 0:
straightHeight = (
self.Notch_depth - self.Notch_upper_radius - self.Notch_lower_radius
)
self.polyline(
upper_third,
(90, self.Notch_upper_radius),
straightHeight,
(-90, self.Notch_lower_radius),
upper_third,
(-90, self.Notch_lower_radius),
straightHeight,
(90, self.Notch_upper_radius),
upper_third,
)
else:
# if there isn't enough room for the radius, we don't use it
self.edge(width)
class DividerSlotsEdge(edges.BaseEdge):
"""Edge with multiple angled rounded slots for dividers"""
description = "Edge with multiple angled rounded slots for dividers"
def __init__(self, boxes, descriptions) -> None:
super().__init__(boxes, None)
self.descriptions = descriptions
def __call__(self, length, **kw):
self.ctx.save()
for description in self.descriptions:
if isinstance(description, SlotDescription):
self.do_slot(description)
elif isinstance(description, StraightEdgeDescription):
self.do_straight_edge(description)
# rounding errors might accumulate :
# restore context and redo the move straight
self.ctx.restore()
self.moveTo(length)
def do_straight_edge(self, straight_edge):
self.edge(straight_edge.tracing_length())
def do_slot(self, slot):
self.ctx.save()
self.polyline(
0,
(90 - slot.angle, slot.start_radius),
slot.corrected_start_depth(),
-90,
slot.width,
-90,
slot.corrected_end_depth(),
(90 + slot.angle, slot.end_radius),
)
# rounding errors might accumulate :
# restore context and redo the move straight
self.ctx.restore()
self.moveTo(slot.tracing_length())
| 21,934 | Python | .py | 530 | 30.579245 | 212 | 0.580104 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,701 | gear.py | florianfesti_boxes/boxes/generators/gear.py | # Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class Gears(Boxes):
"""Gears"""
ui_group = "Part"
def __init__(self) -> None:
Boxes.__init__(self)
self.argparser.add_argument(
"--teeth1", action="store", type=int, default=12,
help="number of teeth")
self.argparser.add_argument(
"--shaft1", action="store", type=float, default=6.,
help="diameter of the shaft 1")
self.argparser.add_argument(
"--dpercentage1", action="store", type=float, default=75,
help="percent of the D section of shaft 1 (100 for round shaft)")
self.argparser.add_argument(
"--teeth2", action="store", type=int, default=32,
help="number of teeth in the other size of gears")
self.argparser.add_argument(
"--shaft2", action="store", type=float, default=0.0,
help="diameter of the shaft2 (zero for same as shaft 1)")
self.argparser.add_argument(
"--dpercentage2", action="store", type=float, default=0,
help="percent of the D section of shaft 1 (0 for same as shaft 1)")
self.argparser.add_argument(
"--modulus", action="store", type=float, default=2,
help="size of teeth (diameter / #teeth) in mm")
self.argparser.add_argument(
"--pressure_angle", action="store", type=float, default=20,
help="angle of the teeth touching (in degrees)")
self.argparser.add_argument(
"--profile_shift", action="store", type=float, default=20,
help="in percent of the modulus")
def render(self):
# adjust to the variables you want in the local scope
t = self.thickness
self.teeth1 = max(2, self.teeth1)
self.teeth2 = max(2, self.teeth2)
if not self.shaft2:
self.shaft2 = self.shaft1
if not self.dpercentage2:
self.dpercentage2 = self.dpercentage1
self.gears(teeth=self.teeth2, dimension=self.modulus,
angle=self.pressure_angle, profile_shift=self.profile_shift,
callback=lambda:self.dHole(0, 0, d=self.shaft2,
rel_w=self.dpercentage2/100.),
move="up")
r2, d2, d2 = self.gears.sizes(
teeth=self.teeth2, dimension=self.modulus,
angle=self.pressure_angle, profile_shift=self.profile_shift)
self.gears(teeth=self.teeth1, dimension=self.modulus,
angle=self.pressure_angle, profile_shift=self.profile_shift,
callback=lambda:self.dHole(0, 0, d=self.shaft1,
rel_w=self.dpercentage1/100.),
move="up")
r1, d1, d1 = self.gears.sizes(
teeth=self.teeth1, dimension=self.modulus,
angle=self.pressure_angle, profile_shift=self.profile_shift)
r = max(self.shaft1, self.shaft2)/2
self.hole(t+r, t+r, self.shaft1/2)
self.hole(t+r+r1+r2, t+r, self.shaft2/2)
self.moveTo(0, 2*r+t)
self.text(f"Pitch radius 1: {r1:.1f}mm\n"
f"Outer diameter 1: {d1:.1f}mm\n"
f"Pitch radius 2: {r2:.1f}mm\n"
f"Outer diameter 2: {d2:.1f}mm\n"
f"Axis distance: {r1 + r2:.1f}mm\n",
align="bottom left")
| 4,098 | Python | .py | 82 | 39.060976 | 79 | 0.59955 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,702 | wallslottedholder.py | florianfesti_boxes/boxes/generators/wallslottedholder.py | # Copyright (C) 2013-2019 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
from boxes.walledges import _WallMountedBox
class FrontEdge(edges.Edge):
def __call__(self, length, **kw):
ws = self.slot_width
wt = self.tool_width
ds = self.slot_depth
r1 = min(self.radius, ds/2, (wt-ws)/2)
r2 = min(self.radius, ws/2)
w = (wt-ws)/2 - r1
for i in range(self.number):
self.polyline(w, (90, r1), ds-r1-r2, (-90, r2), ws-2*r2,
(-90, r2), ds-r1-r2, (90, r1), w)
class WallSlottedHolder(_WallMountedBox):
"""Wall tool holder with slots"""
def __init__(self) -> None:
super().__init__()
self.buildArgParser(h=120)
self.argparser.add_argument(
"--slot_depth", action="store", type=float, default=50.,
help="depth of slots from the front")
self.argparser.add_argument(
"--additional_depth", action="store", type=float, default=50.,
help="depth behind the lots")
self.argparser.add_argument(
"--slot_width", action="store", type=float, default=5.,
help="width of slots")
self.argparser.add_argument(
"--tool_width", action="store", type=float, default=35.,
help="overall width for the tools")
#self.argparser.add_argument(
# "--angle", action="store", type=float, default=0.,
# help="angle of the top - positive for leaning backwards")
self.argparser.add_argument(
"--radius", action="store", type=float, default=5.,
help="radius of the slots at the front")
self.argparser.add_argument(
"--number", action="store", type=int, default=6,
help="number of tools/slots")
self.argparser.add_argument(
"--hooks", action="store", type=str, default="all",
choices=("all", "odds", "everythird"),
help="amount of hooks / braces")
def brace(self, i):
n = self.number
if i in (0, n):
return True
# fold for symmetry
#if i > n//2:
# i = n - i
if self.hooks == "all":
return True
elif self.hooks == "odds":
return not (i % 2)
elif self.hooks == "everythird":
return not (i % 3)
def braces(self):
return sum(self.brace(i) for i in range(self.number+1))
def backCB(self):
n = self.number
ws = self.slot_width
wt = self.tool_width
t = self.thickness
d = min(2*t, (wt-ws)/4.)
self.wallHolesAt(d, 0, self.h, 90)
self.wallHolesAt(n*wt-d, 0, self.h, 90)
for i in range(1, n):
if self.brace(i):
self.wallHolesAt(i*wt, 0, self.h, 90)
def topCB(self):
n = self.number
ws = self.slot_width
wt = self.tool_width
t = self.thickness
l = self.additional_depth + self.slot_depth
d = min(2*t, (wt-ws)/4.)
self.fingerHolesAt(d, 0, l, 90)
self.fingerHolesAt(n*wt-d, 0, l, 90)
for i in range(1, n):
if self.brace(i):
self.fingerHolesAt(i*wt, 0, l, 90)
def render(self):
self.generateWallEdges()
t = self.thickness
l1, l2 = self.additional_depth, self.slot_depth
ws = self.slot_width
wt = self.tool_width
n = self.number
self.rectangularWall(n*wt, self.h, "eeee", callback=[self.backCB], move="up")
self.rectangularWall(n*wt, l1+l2, [FrontEdge(self, None), "e","e","e"], callback=[self.topCB], move="up")
self.moveTo(0, t)
self.rectangularTriangle(l1+l2, self.h, "fbe", r=3*t, num=self.braces())
| 4,415 | Python | .py | 106 | 32.981132 | 113 | 0.582517 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,703 | filltest.py | florianfesti_boxes/boxes/generators/filltest.py | # Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from boxes import *
class FillTest(Boxes): # Change class name!
"""Piece for testing different settings for hole filling"""
ui_group = "Part"
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(fillHolesSettings, fill_pattern="hex")
self.buildArgParser(x=320, y=220)
def xHoles(self):
# border = [(5, 10), (245, 10), (225, 150), (235, 150), (255, 10), (290, 10), (270, 190), (45, 190), (45, 50), (35, 50), (35, 190), (5, 190)]
x, y = self.x, self.y
border = [
( 5/320*x, 10/220*y),
(245/320*x, 10/220*y),
(225/320*x, 150/220*y),
(235/320*x, 150/220*y),
(255/320*x, 10/220*y),
(290/320*x, 10/220*y),
(270/320*x, 190/220*y),
( 45/320*x, 190/220*y),
( 45/320*x, 50/220*y),
( 35/320*x, 50/220*y),
( 35/320*x, 190/220*y),
( 5/320*x, 190/220*y),
]
self.showBorderPoly(border)
self.text("Area to be filled", x/2, 190/220*y, align="bottom center", color=Color.ANNOTATIONS)
start_time = time.time()
self.fillHoles(
pattern=self.fillHoles_fill_pattern,
border=border,
max_radius=self.fillHoles_hole_max_radius,
hspace=self.fillHoles_space_between_holes,
bspace=self.fillHoles_space_to_border,
min_radius=self.fillHoles_hole_min_radius,
style=self.fillHoles_hole_style,
bar_length=self.fillHoles_bar_length,
max_random=self.fillHoles_max_random
)
end_time = time.time()
# print('fillHoles - Execution time:', (end_time-start_time)*1000, 'ms ', self.fillHoles_fill_pattern)
def render(self):
self.rectangularWall(self.x, self.y, "eeee", callback=[self.xHoles, None, None, None],)
| 2,617 | Python | .py | 58 | 37.206897 | 148 | 0.606216 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,704 | laserclamp.py | florianfesti_boxes/boxes/generators/laserclamp.py | # Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class LaserClamp(Boxes):
"""A clamp to hold down material to a knife table"""
description = """You need a tension spring of the proper length to make the clamp work.
Increase extraheight to get more space for the spring and to make the
sliding mechanism less likely to bind. You may need to add some wax on the
parts sliding on each other to reduce friction.
"""
ui_group = "Misc"
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings, surroundingspaces=0)
self.argparser.add_argument(
"--minheight", action="store", type=float, default=25.,
help="minimal clamping height in mm")
self.argparser.add_argument(
"--maxheight", action="store", type=float, default=50.,
help="maximal clamping height in mm")
self.argparser.add_argument(
"--extraheight", action="store", type=float, default=0.,
help="extra height to make operation smoother in mm")
def topPart(self, l, move=None):
t = self. thickness
tw, th = 12*t, l+4*t
if self.move(tw, th, move, True):
return
self.moveTo(8*t, 0)
self.rectangularHole(t, 2*t+l/2, 1.05*t, l)
self.polyline(2*t, (90, t), l+1.5*t, (-90, 0.5*t),
2*t, -90, 0, (180, 0.5*t), 0,
(90, 1.5*t), 9*t,
(180, 4*t), 2*t, (-90, t))
self.hole(-5*t, -3*t, 2.5*t)
self.polyline(l-5.5*t, (90, t))
self.move(tw, th, move)
def bottomPart(self, h_min, h_extra, move=None):
t = self. thickness
tw, th = 14*t, h_min+4*t
if self.move(tw, th, move, True):
return
ls = t/2*(2**.5)
self.moveTo(2*t, 0)
self.fingerHolesAt(3*t, 2*t, h_min+h_extra, 90)
if h_extra:
self.polyline(4*t, (90,t), h_extra-2*t, (-90, t))
else:
self.polyline(6*t)
self.polyline(4*t, (90, 2*t), 3*t, 135, 2*ls, 45, 1*t, -90, 6*t, -90)
self.polyline(h_min, (90, t), 2*t, (90, t),
h_min+h_extra-0*t, (-90, t), t, (180, t),
0, 90, 0, (-180, 0.5*t), 0 , 90)
self.move(tw, th, move)
def render(self):
t = self. thickness
h_max, h_min, h_extra = self.maxheight, self.minheight,self.extraheight
if h_extra and h_extra < 2*t:
h_extra = 2*t
self.topPart(h_max+h_extra, move="right")
self.bottomPart(h_min, h_extra, move="right")
self.roundedPlate(4*t, h_min+h_extra+4*t, edge="e", r=t,
extend_corners=False, move="right",
callback=[lambda: self.fingerHolesAt(1*t, 2*t, h_min+h_extra)])
self.rectangularWall(1.1*t, h_min+h_extra, "efef")
| 3,548 | Python | .py | 77 | 37.493506 | 91 | 0.591423 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,705 | lamp.py | florianfesti_boxes/boxes/generators/lamp.py | # Copyright (C) 2013-2014 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
"""
22x7.5x7cm
D=23cm, d=21cm
d = 8" D = 9"
"""
class RoundedTriangleSettings(edges.Settings):
absolute_params = {
"angle": 60,
"radius": 30,
"r_hole": 0.0,
}
class RoundedTriangle(edges.Edge):
char = "t"
def __call__(self, length, **kw):
angle = self.settings.angle
r = self.settings.radius
if self.settings.r_hole:
x = 0.5 * (length - 2 * r) * math.tan(math.radians(angle))
y = 0.5 * (length)
self.hole(x, y, self.settings.r_hole)
l = 0.5 * (length - 2 * r) / math.cos(math.radians(angle))
self.corner(90 - angle, r)
self.edge(l)
self.corner(2 * angle, r)
self.edge(l)
self.corner(90 - angle, r)
def startAngle(self) -> float:
return 90.0
def endAngle(self) -> float:
return 90.0
class Lamp(Boxes):
webinterface = False
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.buildArgParser(x=220, y=75, h=70)
self.argparser.add_argument(
"--radius", action="store", type=float, default="105",
help="radius of the lamp")
self.argparser.add_argument(
"--width", action="store", type=float, default="10",
help="width of the ring")
def side(self, y, h):
return
self.edges["f"](y)
self.corner(90)
self.edges["f"](h)
self.roundedTriangle(y, 75, 25)
self.edges["f"](h)
self.corner(90)
def render(self):
"""
r : radius of lamp
w : width of surrounding ring
x : length box
y : width box
h : height box
"""
# self.edges["f"].settings = (5, 5) # XXX
x, y, h = self.x, self.y, self.h
r, w = self.radius, self.width
s = RoundedTriangleSettings(self.thickness, angle=72, r_hole=2)
self.addPart(RoundedTriangle(self, s))
self.flexSettings = (3, 5.0, 20.0)
self.edges["f"].settings.setValues(self.thickness, finger=5, space=5, relative=False)
d = 2 * (r + w)
self.roundedPlate(d, d, r, move="right", callback=[
lambda: self.hole(w, r + w, r), ])
# dist = ((2**0.5)*r-r) / (2**0.5) + 4
# pos = (w-dist, dist)
self.roundedPlate(d, d, r, holesMargin=w / 2.0) # , callback=[
# lambda: self.hole(pos[0], pos[1], 7),])
self.roundedPlate(d, d, r, move="only left up")
hole = lambda: self.hole(w, 70, 2)
self.surroundingWall(d, d, r, 120, top='h', bottom='h', callback=[
None, hole, None, hole], move="up")
with self.saved_context():
self.rectangularWall(x, y, edges="fFfF", holesMargin=5, move="right")
self.rectangularWall(x, y, edges="fFfF", holesMargin=5, move="right")
# sides
self.rectangularWall(y, h, "fftf", move="right")
self.rectangularWall(y, h, "fftf")
self.rectangularWall(x, y, edges="fFfF", holesMargin=5,
move="up only")
self.rectangularWall(x, h, edges='hFFF', holesMargin=5, move="right")
self.rectangularWall(x, h, edges='hFFF', holesMargin=5)
| 4,002 | Python | .py | 101 | 31.80198 | 93 | 0.586625 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,706 | keypad.py | florianfesti_boxes/boxes/generators/keypad.py | """Generator for keypads with mechanical switches."""
from copy import deepcopy
from boxes import Boxes, boolarg
from boxes.edges import FingerJointSettings
from .keyboard import Keyboard
class Keypad(Boxes, Keyboard):
"""Generator for keypads with mechanical switches."""
description = "Note that top layers use a different material thickness according to the top1_thickness and top2_thickness (if enabled)."
ui_group = 'Box'
btn_size = 15.6
space_between_btn = 4
box_padding = 10
triangle = 25.0
def __init__(self) -> None:
super().__init__()
self.argparser.add_argument(
'--h', action='store', type=int, default=30,
help='height of the box'
)
self.argparser.add_argument(
'--top1_thickness', action='store', type=float, default=1.5,
help=('thickness of the button hold layer, cherry like switches '
'need 1.5mm or smaller to snap in')
)
self.argparser.add_argument(
'--top2_enable', action='store', type=boolarg, default=False,
help=('enables another top layer that can hold CPG151101S11 '
'hotswap sockets')
)
self.argparser.add_argument(
'--top2_thickness', action='store', type=float, default=1.5,
help=('thickness of the hotplug layer, CPG151101S11 hotswap '
'sockets need 1.2mm to 1.5mm')
)
# Add parameter common with other keyboard projects
self.add_common_keyboard_parameters(
# Hotswap already depends on top2_enable setting, a second parameter
# for it would be useless
add_hotswap_parameter=False,
# By default, 3 columns of 4 rows
default_columns_definition="4x3"
)
self.addSettingsArgs(FingerJointSettings, surroundingspaces=1)
def _get_x_y(self):
"""Gets the keypad's size based on the number of buttons."""
spacing = self.btn_size + self.space_between_btn
border = 2*self.box_padding - self.space_between_btn
x = len(self.columns_definition) * spacing + border
y = max(offset + keys * spacing for (offset, keys) in self.columns_definition) + border
return x, y
def render(self):
"""Renders the keypad."""
# deeper edge for top to add multiple layers
deep_edge = deepcopy(self.edges['f'].settings)
deep_edge.thickness = self.thickness + self.top1_thickness
if self.top2_enable:
deep_edge.thickness += self.top2_thickness
deep_edge.edgeObjects(self, 'gGH', True)
d1, d2 = 2., 3.
x, y = self._get_x_y()
h = self.h
# box sides
self.rectangularWall(x, h, "GFEF", callback=[self.wallx_cb], move="right")
self.rectangularWall(y, h, "GfEf", callback=[self.wally_cb], move="up")
self.rectangularWall(y, h, "GfEf", callback=[self.wally_cb])
self.rectangularWall(x, h, "GFEF", callback=[self.wallx_cb], move="left up")
# keypad lids
self.rectangularWall(x, y, "ffff", callback=self.to_grid_callback(self.support_hole), move="right")
self.rectangularWall(x, y, "ffff", callback=self.to_grid_callback(self.key_hole), move="up")
if self.top2_enable:
self.rectangularWall(x, y, "ffff", callback=self.to_grid_callback(self.hotplug))
# screwable
tr = self.triangle
trh = tr / 3
self.rectangularWall(
x, y,
callback=[lambda: self.hole(trh, trh, d=d2)] * 4,
move='left up'
)
self.rectangularTriangle(
tr, tr, "ffe", num=4,
callback=[None, lambda: self.hole(trh, trh, d=d1)]
)
def to_grid_callback(self, inner_callback):
def callback():
# move to first key center
key_margin = self.box_padding + self.btn_size / 2
self.moveTo(key_margin, key_margin)
self.apply_callback_on_columns(
inner_callback, self.columns_definition, self.btn_size + self.space_between_btn
)
return [callback]
def hotplug(self):
"""Callback for the key stabilizers."""
self.pcb_holes(
with_pcb_mount=self.pcb_mount_enable,
with_diode=self.diode_enable,
with_led=self.led_enable,
)
def support_hole(self):
self.configured_plate_cutout(support=True)
def key_hole(self):
self.configured_plate_cutout()
# stolen form electronics-box
def wallx_cb(self):
"""Callback for triangle holes on x-side."""
x, _ = self._get_x_y()
t = self.thickness
self.fingerHolesAt(0, self.h - 1.5 * t, self.triangle, 0)
self.fingerHolesAt(x, self.h - 1.5 * t, self.triangle, 180)
# stolen form electronics-box
def wally_cb(self):
"""Callback for triangle holes on y-side."""
_, y = self._get_x_y()
t = self.thickness
self.fingerHolesAt(0, self.h - 1.5 * t, self.triangle, 0)
self.fingerHolesAt(y, self.h - 1.5 * t, self.triangle, 180)
| 5,168 | Python | .py | 117 | 34.717949 | 140 | 0.608791 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,707 | keyholder.py | florianfesti_boxes/boxes/generators/keyholder.py | # Copyright (C) 2013-2014 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class HangerEdge(edges.BaseEdge):
char = "H"
def margin(self) -> float:
return self.hook_height * 0.7
def __call__(self, l, **kw):
# Radius of the bottom part of the hook
radius_outside = self.hook_height * 0.5
radius_inside = radius_outside - self.hook_thickness
# Make corners less sharp
radius_burr = 1.5
hookInnerHeight = self.hook_height * 0.7
hookLength = self.hook_height * 0.7
# Correct orientation
self.polyline(0, -90)
# Line bottom
self.edge(hookLength - radius_outside)
# Outer corner
self.corner(90, radius_outside)
# Line right
self.edge(hookInnerHeight - radius_outside - self.hook_thickness / 2)
# Semicircle at top
self.corner(180, self.hook_thickness / 2)
# Line left-ish
self.edge(
hookInnerHeight
- self.hook_thickness
- self.hook_thickness / 2
- radius_inside
)
# Inner corner
self.corner(-90, radius_inside)
# Line bottom
self.edge(hookLength - self.hook_thickness - 2 * radius_burr - radius_inside)
self.corner(-90, radius_burr)
# Line top
self.edge(self.hook_height - self.hook_thickness - 2 * radius_burr)
self.corner(90, radius_burr)
# Correct orientation
self.polyline(0, -90)
class KeyHolder(Boxes):
"""Wall organizer with hooks for keys or similar small items"""
description = """Example for a KeyHolder with a slightly larger backplate and 8 hooks. This uses 6mm plywood for extra stability.
Closeup:

Full picture:
"""
ui_group = "WallMounted"
def __init__(self) -> None:
Boxes.__init__(self)
self.argparser.add_argument(
"--num_hooks", action="store", type=int, default=7, help="Number of hooks"
)
self.argparser.add_argument(
"--hook_distance",
action="store",
type=float,
default=20,
help="Distance between hooks",
)
self.argparser.add_argument(
"--hook_thickness",
action="store",
type=float,
default=5,
help="Thickness of hook",
)
self.argparser.add_argument(
"--hook_height",
action="store",
type=float,
default=20,
help="Height of back part of hook",
)
# Padding around the hooks to define the size of the back plate
self.argparser.add_argument(
"--padding_top",
action="store",
type=float,
default=10,
help="Padding above hooks",
)
self.argparser.add_argument(
"--padding_left_right",
action="store",
type=float,
default=5,
help="Padding left/right from hooks",
)
self.argparser.add_argument(
"--padding_bot",
action="store",
type=float,
default=30,
help="Padding below hooks",
)
self.argparser.add_argument(
"--mounting",
action="store",
type=boolarg,
default=False,
help="Add mounting holes",
)
self.addSettingsArgs(
edges.FingerJointSettings, surroundingspaces=0.0, finger=1.0, space=1.0
)
self.addSettingsArgs(edges.MountingSettings)
def yHoles(self):
"""
Holes for hooks to attach to
"""
posx = 0.5 * self.thickness
posx += self.padding_left_right
for _ in range(self.num_hooks):
self.fingerHolesAt(posx, self.padding_bot, self.hook_height)
posx += self.hook_distance + self.thickness
def render(self):
self.addPart(HangerEdge(self, 1))
# Total height and width of the backplate
h = self.hook_height + self.padding_bot + self.padding_top
w = (
(self.padding_left_right * 2)
+ self.num_hooks * self.thickness
+ (self.num_hooks - 1) * self.hook_distance
)
# Back plate
self.rectangularWall(
w,
h,
"eeGe" if self.mounting else "eeee",
callback=[self.yHoles, None, None, None],
move="up",
)
# Hooks
for _ in range(self.num_hooks):
self.rectangularWall(
self.hook_thickness, self.hook_height, "eHef", move="right"
)
| 5,382 | Python | .py | 152 | 26.085526 | 133 | 0.582259 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,708 | boxes.py.pot | florianfesti_boxes/po/boxes.py.pot | # SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-16 21:47+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
#. e edge description
#: boxes/edges.py
msgid "Straight Edge"
msgstr ""
#. e edge description
#: boxes/edges.py
msgid "e Straight Edge"
msgstr ""
#. E edge description
#: boxes/edges.py
msgid "Straight Edge (outset by thickness)"
msgstr ""
#. E edge description
#: boxes/edges.py
msgid "E Straight Edge (outset by thickness)"
msgstr ""
#. G edge description
#: boxes/edges.py
msgid "Edge with pear shaped mounting holes"
msgstr ""
#. G edge description
#: boxes/edges.py
msgid "G Edge with pear shaped mounting holes"
msgstr ""
#. z edge description
#: boxes/edges.py
msgid "Edge with grooves"
msgstr ""
#. z edge description
#: boxes/edges.py
msgid "z Edge with grooves"
msgstr ""
#. Z edge description
#: boxes/edges.py
msgid "Edge with grooves (opposing side)"
msgstr ""
#. Z edge description
#: boxes/edges.py
msgid "Z Edge with grooves (opposing side)"
msgstr ""
#. g edge description
#: boxes/edges.py
msgid "Corrugated edge useful as an gipping area"
msgstr ""
#. g edge description
#: boxes/edges.py
msgid "g Corrugated edge useful as an gipping area"
msgstr ""
#. f edge description
#: boxes/edges.py
msgid "Finger Joint"
msgstr ""
#. f edge description
#: boxes/edges.py
msgid "f Finger Joint"
msgstr ""
#. F edge description
#: boxes/edges.py
msgid "Finger Joint (opposing side)"
msgstr ""
#. F edge description
#: boxes/edges.py
msgid "F Finger Joint (opposing side)"
msgstr ""
#. h edge description
#: boxes/edges.py
msgid "Edge (parallel Finger Joint Holes)"
msgstr ""
#. h edge description
#: boxes/edges.py
msgid "h Edge (parallel Finger Joint Holes)"
msgstr ""
#. | edge description
#: boxes/edges.py
msgid "Edge (orthogonal Finger Joint Holes)"
msgstr ""
#. | edge description
#: boxes/edges.py
msgid "| Edge (orthogonal Finger Joint Holes)"
msgstr ""
#. s edge description
#: boxes/edges.py
msgid "Stackable (bottom, finger joint holes)"
msgstr ""
#. s edge description
#: boxes/edges.py
msgid "s Stackable (bottom, finger joint holes)"
msgstr ""
#. S edge description
#: boxes/edges.py
msgid "Stackable (top)"
msgstr ""
#. S edge description
#: boxes/edges.py
msgid "S Stackable (top)"
msgstr ""
#. Å¡ edge description
#: boxes/edges.py
msgid "Stackable feet (bottom)"
msgstr ""
#. Å¡ edge description
#: boxes/edges.py
msgid "Å¡ Stackable feet (bottom)"
msgstr ""
#. Å edge description
#: boxes/edges.py
msgid "Stackable edge with finger holes (top)"
msgstr ""
#. Å edge description
#: boxes/edges.py
msgid "Å Stackable edge with finger holes (top)"
msgstr ""
#. i edge description
#: boxes/edges.py
msgid "Straight edge with hinge eye"
msgstr ""
#. i edge description
#: boxes/edges.py
msgid "i Straight edge with hinge eye"
msgstr ""
#. I edge description
#: boxes/edges.py
msgid "Edge with hinge pin"
msgstr ""
#. I edge description
#: boxes/edges.py
msgid "I Edge with hinge pin"
msgstr ""
#. o edge description
#: boxes/edges.py
msgid "Edge with chest hinge"
msgstr ""
#. o edge description
#: boxes/edges.py
msgid "o Edge with chest hinge"
msgstr ""
#. p edge description
#: boxes/edges.py
msgid "p Edge with chest hinge"
msgstr ""
#. q edge description
#: boxes/edges.py
msgid "Edge with pins for an chest hinge"
msgstr ""
#. q edge description
#: boxes/edges.py
msgid "q Edge with pins for an chest hinge"
msgstr ""
#. Q edge description
#: boxes/edges.py
msgid "Edge opposing a chest hinge"
msgstr ""
#. Q edge description
#: boxes/edges.py
msgid "Q Edge opposing a chest hinge"
msgstr ""
#. u edge description
#: boxes/edges.py
msgid "Edge with cabinet hinges"
msgstr ""
#. u edge description
#: boxes/edges.py
msgid "u Edge with cabinet hinges"
msgstr ""
#. l edge description
#: boxes/edges.py
msgid "Edge for slide on lid (back)"
msgstr ""
#. l edge description
#: boxes/edges.py
msgid "l Edge for slide on lid (back)"
msgstr ""
#. L edge description
#: boxes/edges.py
msgid "Edge for slide on lid (box back)"
msgstr ""
#. L edge description
#: boxes/edges.py
msgid "L Edge for slide on lid (box back)"
msgstr ""
#. n edge description
#: boxes/edges.py
msgid "Edge for slide on lid (right)"
msgstr ""
#. n edge description
#: boxes/edges.py
msgid "n Edge for slide on lid (right)"
msgstr ""
#. m edge description
#: boxes/edges.py
msgid "Edge for slide on lid (left)"
msgstr ""
#. m edge description
#: boxes/edges.py
msgid "m Edge for slide on lid (left)"
msgstr ""
#. N edge description
#: boxes/edges.py
msgid "Edge for slide on lid (box right)"
msgstr ""
#. N edge description
#: boxes/edges.py
msgid "N Edge for slide on lid (box right)"
msgstr ""
#. M edge description
#: boxes/edges.py
msgid "Edge for slide on lid (box left)"
msgstr ""
#. M edge description
#: boxes/edges.py
msgid "M Edge for slide on lid (box left)"
msgstr ""
#. c edge description
#: boxes/edges.py
msgid "Click on (bottom side)"
msgstr ""
#. c edge description
#: boxes/edges.py
msgid "c Click on (bottom side)"
msgstr ""
#. C edge description
#: boxes/edges.py
msgid "Click on (top)"
msgstr ""
#. C edge description
#: boxes/edges.py
msgid "C Click on (top)"
msgstr ""
#. d edge description
#: boxes/edges.py
msgid "Dove Tail Joint"
msgstr ""
#. d edge description
#: boxes/edges.py
msgid "d Dove Tail Joint"
msgstr ""
#. D edge description
#: boxes/edges.py
msgid "Dove Tail Joint (opposing side)"
msgstr ""
#. D edge description
#: boxes/edges.py
msgid "D Dove Tail Joint (opposing side)"
msgstr ""
#. X edge description
#: boxes/edges.py
msgid "Flex cut"
msgstr ""
#. X edge description
#: boxes/edges.py
msgid "X Flex cut"
msgstr ""
#. R edge description
#: boxes/edges.py
msgid "Rack (and pinion) Edge"
msgstr ""
#. R edge description
#: boxes/edges.py
msgid "R Rack (and pinion) Edge"
msgstr ""
#. t edge description
#: boxes/edges.py
msgid "Triangle for handle"
msgstr ""
#. t edge description
#: boxes/edges.py
msgid "t Triangle for handle"
msgstr ""
#. T edge description
#: boxes/edges.py
msgid "T Triangle for handle"
msgstr ""
#. y edge description
#: boxes/edges.py
msgid "Handle for e.g. a drawer"
msgstr ""
#. y edge description
#: boxes/edges.py
msgid "y Handle for e.g. a drawer"
msgstr ""
#. Y edge description
#: boxes/edges.py
msgid "Handle with holes for parallel finger joint"
msgstr ""
#. Y edge description
#: boxes/edges.py
msgid "Y Handle with holes for parallel finger joint"
msgstr ""
#. j edge description
#: boxes/edges.py
msgid "Straight edge with hinge eye (other end)"
msgstr ""
#. j edge description
#: boxes/edges.py
msgid "j Straight edge with hinge eye (other end)"
msgstr ""
#. J edge description
#: boxes/edges.py
msgid "Edge with hinge pin (other end)"
msgstr ""
#. J edge description
#: boxes/edges.py
msgid "J Edge with hinge pin (other end)"
msgstr ""
#. k edge description
#: boxes/edges.py
msgid "Straight edge with hinge eye (both ends)"
msgstr ""
#. k edge description
#: boxes/edges.py
msgid "k Straight edge with hinge eye (both ends)"
msgstr ""
#. K edge description
#: boxes/edges.py
msgid "Edge with hinge pin (both ends)"
msgstr ""
#. K edge description
#: boxes/edges.py
msgid "K Edge with hinge pin (both ends)"
msgstr ""
#. O edge description
#: boxes/edges.py
msgid "Edge with chest hinge (other end)"
msgstr ""
#. O edge description
#: boxes/edges.py
msgid "O Edge with chest hinge (other end)"
msgstr ""
#. P edge description
#: boxes/edges.py
msgid "P Edge with chest hinge (other end)"
msgstr ""
#. U edge description
#: boxes/edges.py
msgid "Edge with cabinet hinges top side"
msgstr ""
#. U edge description
#: boxes/edges.py
msgid "U Edge with cabinet hinges top side"
msgstr ""
#. v edge description
#: boxes/edges.py
msgid "Edge with cabinet hinges for 90° lid"
msgstr ""
#. v edge description
#: boxes/edges.py
msgid "v Edge with cabinet hinges for 90° lid"
msgstr ""
#. V edge description
#: boxes/edges.py
msgid "Edge with cabinet hinges 90° lid"
msgstr ""
#. V edge description
#: boxes/edges.py
msgid "V Edge with cabinet hinges 90° lid"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Box"
msgstr ""
#. title of group Box
#: boxes/generators/__init__.py
msgid "Boxes"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "FlexBox"
msgstr ""
#. title of group FlexBox
#: boxes/generators/__init__.py
msgid "Boxes with flex"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Tray"
msgstr ""
#. title of group Tray
#: boxes/generators/__init__.py
msgid "Trays and Drawer Inserts"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Shelf"
msgstr ""
#. title of group Shelf
#: boxes/generators/__init__.py
msgid "Shelves"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "WallMounted"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Holes"
msgstr ""
#. title of group Holes
#: boxes/generators/__init__.py
msgid "Hole patterns"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Part"
msgstr ""
#. title of group Part
#: boxes/generators/__init__.py
msgid "Parts and Samples"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Misc"
msgstr ""
#. name of generator group
#: boxes/generators/__init__.py
msgid "Unstable"
msgstr ""
#. description of group Unstable
#: boxes/generators/__init__.py
msgid "Generators are still untested or need manual adjustment to be useful."
msgstr ""
msgid "DefaultParams Settings"
msgstr ""
#. parameter name
msgid "x"
msgstr ""
#. help for parameter x
msgid "inner width in mm (unless outside selected)"
msgstr ""
#. parameter name
msgid "y"
msgstr ""
#. help for parameter y
msgid "inner depth in mm (unless outside selected)"
msgstr ""
#. parameter name
msgid "h"
msgstr ""
#. help for parameter h
msgid "inner height in mm (unless outside selected)"
msgstr ""
#. parameter name
msgid "hi"
msgstr ""
#. help for parameter hi
msgid ""
"inner height of inner walls in mm (unless outside selected)(leave to zero "
"for same as outer walls)"
msgstr ""
#. parameter name
msgid "sx"
msgstr ""
#. help for parameter sx
msgid ""
"sections left to right in mm [🛈](https://florianfesti.github.io/boxes/html/"
"usermanual.html#section-parameters)"
msgstr ""
#. parameter name
msgid "sy"
msgstr ""
#. help for parameter sy
msgid ""
"sections back to front in mm [🛈](https://florianfesti.github.io/boxes/html/"
"usermanual.html#section-parameters)"
msgstr ""
#. parameter name
msgid "sh"
msgstr ""
#. help for parameter sh
msgid ""
"sections bottom to top in mm [🛈](https://florianfesti.github.io/boxes/html/"
"usermanual.html#section-parameters)"
msgstr ""
#. parameter name
msgid "bottom_edge"
msgstr ""
#. help for parameter bottom_edge
msgid "edge type for bottom edge"
msgstr ""
#. possible choice for bottom_edge
msgid "F"
msgstr ""
#. possible choice for bottom_edge
msgid "s"
msgstr ""
#. possible choice for bottom_edge
msgid "e"
msgstr ""
#. parameter name
msgid "top_edge"
msgstr ""
#. help for parameter top_edge
msgid "edge type for top edge"
msgstr ""
#. possible choice for top_edge
msgid "f"
msgstr ""
#. possible choice for top_edge
msgid "c"
msgstr ""
#. possible choice for top_edge
msgid "E"
msgstr ""
#. possible choice for top_edge
msgid "S"
msgstr ""
#. possible choice for top_edge
msgid "Å "
msgstr ""
#. possible choice for top_edge
msgid "i"
msgstr ""
#. possible choice for top_edge
msgid "k"
msgstr ""
#. possible choice for top_edge
msgid "v"
msgstr ""
#. possible choice for top_edge
msgid "L"
msgstr ""
#. possible choice for top_edge
msgid "t"
msgstr ""
#. possible choice for top_edge
msgid "G"
msgstr ""
#. possible choice for top_edge
msgid "Y"
msgstr ""
#. parameter name
msgid "outside"
msgstr ""
#. help for parameter outside
msgid ""
"treat sizes as outside measurements [🛈](https://florianfesti.github.io/boxes/"
"html/usermanual.html#outside)"
msgstr ""
#. parameter name
msgid "nema_mount"
msgstr ""
#. help for parameter nema_mount
msgid "NEMA size of motor"
msgstr ""
msgid "Default Settings"
msgstr ""
#. parameter name
msgid "thickness"
msgstr ""
#. help for parameter thickness
msgid ""
"thickness of the material (in mm) [🛈](https://florianfesti.github.io/boxes/"
"html/usermanual.html#thickness)"
msgstr ""
#. parameter name
msgid "format"
msgstr ""
#. help for parameter format
msgid ""
"format of resulting file [🛈](https://florianfesti.github.io/boxes/html/"
"usermanual.html#format)"
msgstr ""
#. possible choice for format
msgid "dxf"
msgstr ""
#. possible choice for format
msgid "gcode"
msgstr ""
#. possible choice for format
msgid "lbrn2"
msgstr ""
#. possible choice for format
msgid "pdf"
msgstr ""
#. possible choice for format
msgid "plt"
msgstr ""
#. possible choice for format
msgid "ps"
msgstr ""
#. possible choice for format
msgid "svg"
msgstr ""
#. possible choice for format
msgid "svg_Ponoko"
msgstr ""
#. parameter name
msgid "tabs"
msgstr ""
#. help for parameter tabs
msgid ""
"width of tabs holding the parts in place (in mm)(not supported everywhere) "
"[🛈](https://florianfesti.github.io/boxes/html/usermanual.html#tabs)"
msgstr ""
#. parameter name
msgid "qr_code"
msgstr ""
#. help for parameter qr_code
msgid "Add a QR Code with link or command line to the generated output"
msgstr ""
#. parameter name
msgid "debug"
msgstr ""
#. help for parameter debug
msgid ""
"print surrounding boxes for some structures [🛈](https://florianfesti.github."
"io/boxes/html/usermanual.html#debug)"
msgstr ""
#. parameter name
msgid "labels"
msgstr ""
#. help for parameter labels
msgid "label the parts (where available)"
msgstr ""
#. parameter name
msgid "reference"
msgstr ""
#. help for parameter reference
msgid ""
"print reference rectangle with given length (in mm)(zero to disable) [🛈]"
"(https://florianfesti.github.io/boxes/html/usermanual.html#reference)"
msgstr ""
#. parameter name
msgid "inner_corners"
msgstr ""
#. help for parameter inner_corners
msgid ""
"style for inner corners [🛈](https://florianfesti.github.io/boxes/html/"
"usermanual.html#inner-corners)"
msgstr ""
#. possible choice for inner_corners
msgid "loop"
msgstr ""
#. possible choice for inner_corners
msgid "corner"
msgstr ""
#. possible choice for inner_corners
msgid "backarc"
msgstr ""
#. parameter name
msgid "burn"
msgstr ""
#. help for parameter burn
msgid ""
"burn correction (in mm)(bigger values for tighter fit) [🛈](https://"
"florianfesti.github.io/boxes/html/usermanual.html#burn)"
msgstr ""
msgid "Settings for Finger Joints"
msgstr ""
#. parameter name for FingerJoint
msgid "angle"
msgstr ""
#. parameter name for FingerJoint
msgid "style"
msgstr ""
#. help for parameter style
msgid "style of the fingers"
msgstr ""
#. possible choice for style
msgid "rectangular"
msgstr ""
#. possible choice for style
msgid "springs"
msgstr ""
#. possible choice for style
msgid "barbs"
msgstr ""
#. possible choice for style
msgid "snap"
msgstr ""
#. parameter name for FingerJoint
msgid "surroundingspaces"
msgstr ""
#. help for parameter surroundingspaces
msgid "space at the start and end in multiple of normal spaces"
msgstr ""
#. parameter name for FingerJoint
msgid "bottom_lip"
msgstr ""
#. help for parameter bottom_lip
msgid ""
"height of the bottom lips sticking out (multiples of thickness) "
"FingerHoleEdge only!"
msgstr ""
#. parameter name for FingerJoint
msgid "edge_width"
msgstr ""
#. help for parameter edge_width
msgid "space below holes of FingerHoleEdge (multiples of thickness)"
msgstr ""
#. parameter name for FingerJoint
msgid "extra_length"
msgstr ""
#. help for parameter extra_length
msgid "extra material to grind away burn marks (multiples of thickness)"
msgstr ""
#. parameter name for FingerJoint
msgid "finger"
msgstr ""
#. help for parameter finger
msgid "width of the fingers (multiples of thickness)"
msgstr ""
#. parameter name for FingerJoint
msgid "play"
msgstr ""
#. help for parameter play
msgid "extra space to allow finger move in and out (multiples of thickness)"
msgstr ""
#. parameter name for FingerJoint
msgid "space"
msgstr ""
#. help for parameter space
msgid "space between fingers (multiples of thickness)"
msgstr ""
#. parameter name for FingerJoint
msgid "width"
msgstr ""
#. help for parameter width
msgid "width of finger holes (multiples of thickness)"
msgstr ""
msgid "Settings for Stackable Edges"
msgstr ""
#. help for parameter angle
msgid "inside angle of the feet"
msgstr ""
#. parameter name for Stackable
msgid "bottom_stabilizers"
msgstr ""
#. help for parameter bottom_stabilizers
msgid ""
"height of strips to be glued to the inside of bottom edges (multiples of "
"thickness)"
msgstr ""
#. parameter name for Stackable
msgid "height"
msgstr ""
#. help for parameter height
msgid "height of the feet (multiples of thickness)"
msgstr ""
#. parameter name for Stackable
msgid "holedistance"
msgstr ""
#. help for parameter holedistance
msgid "distance from finger holes to bottom edge (multiples of thickness)"
msgstr ""
#. help for parameter width
msgid "width of the feet (multiples of thickness)"
msgstr ""
msgid "Settings for Hinges and HingePins"
msgstr ""
#. parameter name for Hinge
msgid "grip_percentage"
msgstr ""
#. parameter name for Hinge
msgid "outset"
msgstr ""
#. help for parameter outset
msgid "have lid overlap at the sides (similar to OutSetEdge)"
msgstr ""
#. parameter name for Hinge
msgid "pinwidth"
msgstr ""
#. help for parameter pinwidth
msgid "set to lower value to get disks surrounding the pins"
msgstr ""
#. help for parameter style
msgid "\"outset\" or \"flush\""
msgstr ""
#. possible choice for style
msgid "flush"
msgstr ""
#. parameter name for Hinge
msgid "axle"
msgstr ""
#. help for parameter axle
msgid "diameter of the pin hole (multiples of thickness)"
msgstr ""
#. parameter name for Hinge
msgid "grip_length"
msgstr ""
#. help for parameter grip_length
msgid "fixed length of the grips on he lids (multiples of thickness)"
msgstr ""
#. parameter name for Hinge
msgid "hingestrength"
msgstr ""
#. help for parameter hingestrength
msgid "thickness of the arc holding the pin in place (multiples of thickness)"
msgstr ""
msgid "Settings for Slide-on Lids"
msgstr ""
#. parameter name for SlideOnLid
msgid "hole_width"
msgstr ""
#. help for parameter hole_width
msgid "width of the \"finger hole\" in mm"
msgstr ""
#. parameter name for SlideOnLid
msgid "second_pin"
msgstr ""
#. help for parameter second_pin
msgid "additional pin for better positioning"
msgstr ""
#. parameter name for SlideOnLid
msgid "spring"
msgstr ""
#. help for parameter spring
msgid "position(s) of the extra locking springs in the lid"
msgstr ""
#. possible choice for spring
msgid "both"
msgstr ""
#. possible choice for spring
msgid "none"
msgstr ""
#. possible choice for spring
msgid "left"
msgstr ""
#. possible choice for spring
msgid "right"
msgstr ""
msgid "Settings for Click-on Lids"
msgstr ""
#. help for parameter angle
msgid "angle of the hooks bending outward"
msgstr ""
#. parameter name for Click
msgid "bottom_radius"
msgstr ""
#. help for parameter bottom_radius
msgid "radius at the bottom (multiples of thickness)"
msgstr ""
#. parameter name for Click
msgid "depth"
msgstr ""
#. help for parameter depth
msgid "length of the hooks (multiples of thickness)"
msgstr ""
msgid "Settings for Flex"
msgstr ""
#. parameter name for Flex
msgid "stretch"
msgstr ""
#. help for parameter stretch
msgid "Hint of how much the flex part should be shortened"
msgstr ""
#. parameter name for Flex
msgid "connection"
msgstr ""
#. help for parameter connection
msgid "width of the gaps in the cuts (multiples of thickness)"
msgstr ""
#. parameter name for Flex
msgid "distance"
msgstr ""
#. help for parameter distance
msgid "width of the pattern perpendicular to the cuts (multiples of thickness)"
msgstr ""
#. help for parameter width
msgid "width of the pattern in direction of the cuts (multiples of thickness)"
msgstr ""
msgid "Settings for the Lid"
msgstr ""
#. parameter name for Lid
msgid "handle"
msgstr ""
#. help for parameter handle
msgid "type of handle"
msgstr ""
#. possible choice for handle
msgid "long_rounded"
msgstr ""
#. possible choice for handle
msgid "long_trapezoid"
msgstr ""
#. possible choice for handle
msgid "long_doublerounded"
msgstr ""
#. possible choice for handle
msgid "knob"
msgstr ""
#. help for parameter style
msgid "type of lid to create"
msgstr ""
#. possible choice for style
msgid "flat"
msgstr ""
#. possible choice for style
msgid "chest"
msgstr ""
#. possible choice for style
msgid "overthetop"
msgstr ""
#. possible choice for style
msgid "ontop"
msgstr ""
#. parameter name for Lid
msgid "handle_height"
msgstr ""
#. help for parameter handle_height
msgid "height of the handle (if applicable)"
msgstr ""
#. help for parameter height
msgid "height of the brim (if any)"
msgstr ""
#. help for parameter play
msgid "play when sliding the lid on (if applicable)"
msgstr ""
#. name of box generator
#: boxes/generators/abox.py
msgid "ABox"
msgstr ""
#. description of ABox
#: boxes/generators/abox.py
msgid "A simple Box"
msgstr ""
#. long description of ABox in markdown
#: boxes/generators/abox.py
msgid ""
"This box is kept simple on purpose. If you need more features have a look at "
"the UniversalBox."
msgstr ""
msgid "ABox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/agricolainsert.py
msgid "AgricolaInsert"
msgstr ""
#. description of AgricolaInsert
#: boxes/generators/agricolainsert.py
msgid ""
"\n"
" Agricola Revised Edition game box insert, including some expansions.\n"
" "
msgstr ""
#. long description of AgricolaInsert in markdown
#: boxes/generators/agricolainsert.py
msgid ""
"\n"
"This insert was designed with 3 mm plywood in mind, and should work fine "
"with\n"
"materials around this thickness.\n"
"\n"
"This is an insert for the [Agricola Revised Edition](https://boardgamegeek."
"com/boardgame/200680/agricola-revised-edition)\n"
"board game. It is specifically designed around the [Farmers Of The Moor "
"expansion](https://boardgamegeek.com/boardgameexpansion/257344/agricola-"
"farmers-moor),\n"
"and should also store the [5-6 players expansion](https://boardgamegeek.com/"
"boardgameexpansion/210625/agricola-expansion-5-and-6-players)\n"
"(not tested, but I tried to take everything into account for it, please "
"inform\n"
"us if you tested it).\n"
"\n"
"It can be stored inside the original game box, including the 2 expansions,\n"
"with the lid slightly raised.\n"
"\n"
"The parts of a given element are mostly generated next to each other "
"vertically.\n"
"It should be straightforward to match them.\n"
"\n"
"Here are the different elements, from left to right in the generated file.\n"
"\n"
"#### Card tray\n"
"\n"
"The cards are all kept in a tray, with paper dividers to sort them easily. "
"When\n"
"the tray is not full of cards, wood dividers slides in slots in order to "
"keep\n"
"the cards from falling into the empty space.\n"
"\n"
"There should be enough space for the main game, Farmers Of The Moor, and the "
"5-6\n"
"player expansion, but not much more than that.\n"
"\n"
"To keep a lower profile, the cards are at a slight angle, and the paper "
"dividers\n"
"tabs are horizontal instead of vertical.\n"
"A small wall keeps the card against one side while the tabs protrude on the\n"
"other side, above the small wall.\n"
"\n"
"The wall with the big hole is the sloped one. It goes between the two\n"
"\"comb-like\" walls first, with its two small holes at the bottom. Then "
"there is a\n"
"low-height long wall with a sloped edge which should go from the sloped wall "
"to\n"
"the other side. You can finish the tray with the last wall at the end.\n"
"\n"
"#### Upper level trays\n"
"\n"
"4 trays with movable walls are used to store resources. They were designed "
"to\n"
"store them in this order:\n"
"\n"
"* Stone / Vegetable / Pig / Cow\n"
"* Reed / Grain / Sheep\n"
"* Wood / Clay\n"
"* Food / Fire\n"
"\n"
"The wall would probably be better if fixed instead of movable, but I would "
"like\n"
"to test with the 5-6 player expansion to be sure their positions are "
"correct\n"
"with it too.\n"
"\n"
"The little feet of the movable wall should be glued. The triangles are put\n"
"horizontally, with their bases towards the sides.\n"
"\n"
"#### Lower level tray\n"
"\n"
"The lower level tray is used to store the horses.\n"
"\n"
"#### Room/Field tiles\n"
"\n"
"Two boxes are generated to store the room/field tiles. One for the wood/"
"field,\n"
"the other for the clay/stone. They are stored with the main opening upside, "
"but\n"
"I prefer to use them during play with this face on the side.\n"
"\n"
"#### Moor/Forest and miscellaneous tiles\n"
"\n"
"A box is generated to store the Moor/Forest tiles, and some other tiles such "
"as\n"
"the \"multiple resources\" cardboard tokens.\n"
"\n"
"The Moor/Forest tiles are at the same height as the Room/Field, and the "
"upper\n"
"level trays are directly on them. The horse box and player box are slightly\n"
"lower. This Moor/Forest box have a lowered corner (the one for the "
"miscellaneous\n"
"tiles). Two cardboard pieces can be stored between the smaller boxes and "
"the\n"
"upper level trays (as seen on the picture).\n"
"\n"
"Be sure to match the pieces so that the walls with smaller heights are next "
"to\n"
"each other.\n"
"\n"
"#### Players bit boxes\n"
"\n"
"Each player has its own box where the bits of his color are stored.\n"
"The cardboard bed from Farmers Of The Moor is central to this box.\n"
"\n"
"* The fences are stored inside the bed\n"
"* The bed is placed in the box, with holes to keep it there (and to take "
"less\n"
" height)\n"
"* The stables are stored in the two corners\n"
"* The five farmers are stored between the bed and the three walls, "
"alternatively\n"
" head up and head down.\n"
"\n"
"During assembly, the small bars are put in the middle holes. The two bigger\n"
"holes at the ends are used for the bed feet. The bar keeps the bed from\n"
"protruding underneath.\n"
"\n"
msgstr ""
msgid "AgricolaInsert Settings"
msgstr ""
#. name of box generator
#: boxes/generators/alledges.py
msgid "AllEdges"
msgstr ""
#. description of AllEdges
#: boxes/generators/alledges.py
msgid "Showing all edge types"
msgstr ""
msgid "AllEdges Settings"
msgstr ""
#. help for parameter x
#: boxes/generators/alledges.py
msgid "inner width in mm"
msgstr ""
msgid "Settings for HandleEdge"
msgstr ""
#. help for parameter height
#: boxes/generators/alledges.py
msgid "height above the wall in mm"
msgstr ""
#. parameter name for HandleEdge
#: boxes/generators/alledges.py
msgid "hole_height"
msgstr ""
#. help for parameter hole_height
#: boxes/generators/alledges.py
msgid ""
"height of hole(s) in percentage of maximum hole height (handle height - 2 * "
"material thickness)"
msgstr ""
#. help for parameter hole_width
#: boxes/generators/alledges.py
msgid ""
"width of hole(s) in percentage of maximum hole width (width of edge - (n+1) "
"* material thickness)"
msgstr ""
#. parameter name for HandleEdge
#: boxes/generators/alledges.py
msgid "on_sides"
msgstr ""
#. help for parameter on_sides
#: boxes/generators/alledges.py
msgid ""
"added to side panels if checked, to front and back otherwise (only used with "
"top_edge parameter)"
msgstr ""
#. parameter name for HandleEdge
#: boxes/generators/alledges.py
msgid "radius"
msgstr ""
#. help for parameter radius
#: boxes/generators/alledges.py
msgid "radius of corners in mm"
msgstr ""
#. help for parameter outset
#: boxes/generators/alledges.py
msgid "extend the handle along the length of the edge (multiples of thickness)"
msgstr ""
#. name of box generator
#: boxes/generators/angledbox.py
msgid "AngledBox"
msgstr ""
#. description of AngledBox
#: boxes/generators/angledbox.py
msgid "Box with both ends cornered"
msgstr ""
msgid "AngledBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/angledbox.py
msgid "n"
msgstr ""
#. help for parameter n
#: boxes/generators/angledbox.py
msgid "number of walls at one side (1+)"
msgstr ""
#. parameter name
#: boxes/generators/angledbox.py
msgid "top"
msgstr ""
#. help for parameter top
#: boxes/generators/angledbox.py
msgid "style of the top and lid"
msgstr ""
#. possible choice for top
#: boxes/generators/angledbox.py
msgid "angled hole"
msgstr ""
#. possible choice for top
#: boxes/generators/angledbox.py
msgid "angled lid"
msgstr ""
#. possible choice for top
#: boxes/generators/angledbox.py
msgid "angled lid2"
msgstr ""
#. name of box generator
#: boxes/generators/angledcutjig.py
msgid "AngledCutJig"
msgstr ""
#. description of AngledCutJig
#: boxes/generators/angledcutjig.py
msgid "Jig for making angled cuts in a laser cutter"
msgstr ""
msgid "AngledCutJig Settings"
msgstr ""
#. help for parameter y
#: boxes/generators/angledcutjig.py
msgid "inner depth in mm"
msgstr ""
#. help for parameter angle
#: boxes/generators/angledcutjig.py
msgid "Angle of the cut"
msgstr ""
#. name of box generator
#: boxes/generators/arcade.py
msgid "Arcade"
msgstr ""
#. description of Arcade
#: boxes/generators/arcade.py
msgid "Desktop Arcade Machine"
msgstr ""
msgid "Arcade Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/arcade.py
msgid "inner width of the console"
msgstr ""
#. parameter name
#: boxes/generators/arcade.py
msgid "monitor_height"
msgstr ""
#. parameter name
#: boxes/generators/arcade.py
msgid "keyboard_depth"
msgstr ""
#. name of box generator
#: boxes/generators/atreus21.py
msgid "Atreus21"
msgstr ""
#. description of Atreus21
#: boxes/generators/atreus21.py
msgid "Generator for a split atreus keyboard."
msgstr ""
msgid "Atreus21 Settings"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "hotswap_enable"
msgstr ""
#. help for parameter hotswap_enable
#: boxes/generators/atreus21.py
msgid "enlarge switches holes for hotswap pcb sockets"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "pcb_mount_enable"
msgstr ""
#. help for parameter pcb_mount_enable
#: boxes/generators/atreus21.py
msgid "adds holes for pcb mount switches"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "led_enable"
msgstr ""
#. help for parameter led_enable
#: boxes/generators/atreus21.py
msgid "adds pin holes under switches for leds"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "diode_enable"
msgstr ""
#. help for parameter diode_enable
#: boxes/generators/atreus21.py
msgid "adds pin holes under switches for diodes"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "cutout_type"
msgstr ""
#. help for parameter cutout_type
#: boxes/generators/atreus21.py
msgid ""
"Shape of the plate cutout: 'castle' allows for modding, and 'simple' is a "
"tighter and simpler square"
msgstr ""
#. parameter name
#: boxes/generators/atreus21.py
msgid "columns_definition"
msgstr ""
#. help for parameter columns_definition
#: boxes/generators/atreus21.py
msgid ""
"Each column is separated by '/', and is in the form 'nb_rows @ offset x "
"repeat_count'. Nb_rows is the number of rows for this column. The offset is "
"in mm and optional. Repeat_count is optional and repeats this column "
"multiple times. Spaces are not important.For example '3x2 / 4@11' means we "
"want 3 columns, the two first with 3 rows without offset, and the last with "
"4 rows starting at 11mm high."
msgstr ""
#. name of box generator
#: boxes/generators/basedbox.py
msgid "BasedBox"
msgstr ""
#. description of BasedBox
#: boxes/generators/basedbox.py
msgid "Fully closed box on a base"
msgstr ""
#. long description of BasedBox in markdown
#: boxes/generators/basedbox.py
msgid ""
"This box is more of a building block than a finished item.\n"
"Use a vector graphics program (like Inkscape) to add holes or adjust the "
"base\n"
"plate. The width of the \"brim\" can also be adjusted with the "
"**edge_width**\n"
" parameter in the **Finger Joints Settings**.\n"
" \n"
"See ClosedBox for variant without a base.\n"
msgstr ""
msgid "BasedBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/bayonetbox.py
msgid "BayonetBox"
msgstr ""
#. description of BayonetBox
#: boxes/generators/bayonetbox.py
msgid "Round box made from layers with twist on top"
msgstr ""
#. long description of BayonetBox in markdown
#: boxes/generators/bayonetbox.py
msgid ""
"Glue together - all outside rings to the bottom, all inside rings to the top."
msgstr ""
msgid "BayonetBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/bayonetbox.py
msgid "diameter"
msgstr ""
#. help for parameter diameter
#: boxes/generators/bayonetbox.py
msgid "Diameter of the box in mm"
msgstr ""
#. parameter name
#: boxes/generators/bayonetbox.py
msgid "lugs"
msgstr ""
#. help for parameter lugs
#: boxes/generators/bayonetbox.py
msgid "number of locking lugs"
msgstr ""
#. parameter name
#: boxes/generators/bayonetbox.py
msgid "alignment_pins"
msgstr ""
#. help for parameter alignment_pins
#: boxes/generators/bayonetbox.py
msgid "diameter of the alignment pins"
msgstr ""
#. name of box generator
#: boxes/generators/bintray.py
msgid "BinTray"
msgstr ""
#. description of BinTray
#: boxes/generators/bintray.py
msgid "A Type tray variant to be used up right with sloped walls in front"
msgstr ""
msgid "BinTray Settings"
msgstr ""
#. parameter name
#: boxes/generators/bintray.py
msgid "hole_dD"
msgstr ""
#. help for parameter hole_dD
#: boxes/generators/bintray.py
msgid ""
"mounting hole diameter (shaft:head) in mm [🛈](https://florianfesti.github.io/"
"boxes/html/usermanual.html#mounting-holes)"
msgstr ""
#. parameter name
#: boxes/generators/bintray.py
msgid "front"
msgstr ""
#. help for parameter front
#: boxes/generators/bintray.py
msgid "fraction of bin height covert with slope"
msgstr ""
#. name of box generator
#: boxes/generators/birdhouse.py
msgid "BirdHouse"
msgstr ""
#. description of BirdHouse
#: boxes/generators/birdhouse.py
msgid "Simple Bird House"
msgstr ""
msgid "BirdHouse Settings"
msgstr ""
#. help for parameter h
#: boxes/generators/birdhouse.py
msgid "inner height in mm"
msgstr ""
#. parameter name
#: boxes/generators/birdhouse.py
msgid "roof_overhang"
msgstr ""
#. help for parameter roof_overhang
#: boxes/generators/birdhouse.py
msgid "overhang as fraction of the roof length"
msgstr ""
#. name of box generator
#: boxes/generators/bottlestack.py
msgid "BottleStack"
msgstr ""
#. description of BottleStack
#: boxes/generators/bottlestack.py
msgid "Stack bottles in a fridge"
msgstr ""
#. long description of BottleStack in markdown
#: boxes/generators/bottlestack.py
msgid ""
"When rendered with the \"double\" option the parts with the double slots get "
"connected the shorter beams in the asymmetrical slots.\n"
"\n"
"Without the \"double\" option the stand is a bit more narrow.\n"
msgstr ""
msgid "BottleStack Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/bottlestack.py
msgid "diameter of the bottles in mm"
msgstr ""
#. parameter name
#: boxes/generators/bottlestack.py
msgid "number"
msgstr ""
#. help for parameter number
#: boxes/generators/bottlestack.py
msgid "number of bottles to hold in the bottom row"
msgstr ""
#. help for parameter depth
#: boxes/generators/bottlestack.py
msgid "depth of the stand along the base of the bottles"
msgstr ""
#. parameter name
#: boxes/generators/bottlestack.py
msgid "double"
msgstr ""
#. help for parameter double
#: boxes/generators/bottlestack.py
msgid "two pieces that can be combined to up to double the width"
msgstr ""
#. name of box generator
#: boxes/generators/bottletag.py
msgid "BottleTag"
msgstr ""
#. description of BottleTag
#: boxes/generators/bottletag.py
msgid "Paper slip over bottle tag"
msgstr ""
msgid "BottleTag Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/bottletag.py
msgid "width of neck tag"
msgstr ""
#. help for parameter height
#: boxes/generators/bottletag.py
msgid "height of neck tag"
msgstr ""
#. parameter name
#: boxes/generators/bottletag.py
msgid "min_diameter"
msgstr ""
#. help for parameter min_diameter
#: boxes/generators/bottletag.py
msgid "inner diameter of bottle neck hole"
msgstr ""
#. parameter name
#: boxes/generators/bottletag.py
msgid "max_diameter"
msgstr ""
#. help for parameter max_diameter
#: boxes/generators/bottletag.py
msgid "outer diameter of bottle neck hole"
msgstr ""
#. help for parameter radius
#: boxes/generators/bottletag.py
msgid "corner radius of bottom tag"
msgstr ""
#. parameter name
#: boxes/generators/bottletag.py
msgid "segment_width"
msgstr ""
#. help for parameter segment_width
#: boxes/generators/bottletag.py
msgid "inner segment width"
msgstr ""
#. name of box generator
#: boxes/generators/breadbox.py
msgid "BreadBox"
msgstr ""
#. description of BreadBox
#: boxes/generators/breadbox.py
msgid "A BreadBox with a gliding door"
msgstr ""
#. long description of BreadBox in markdown
#: boxes/generators/breadbox.py
msgid "Beware of the rolling shutter effect! Use wax on sliding surfaces.\n"
msgstr ""
msgid "BreadBox Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/breadbox.py
msgid "radius of the corners"
msgstr ""
#. name of box generator
#: boxes/generators/burntest.py
msgid "BurnTest"
msgstr ""
#. description of BurnTest
#: boxes/generators/burntest.py
msgid "Test different burn values"
msgstr ""
#. long description of BurnTest in markdown
#: boxes/generators/burntest.py
msgid ""
"This generator will make shapes that you can use to select\n"
"optimal value for burn parameter for other generators. After burning try to\n"
"attach sides with the same value and use best fitting one on real projects.\n"
"In this generator set burn in the Default Settings to the lowest value\n"
"to be tested. To get an idea cut a rectangle with known nominal size and\n"
"measure the shrinkage due to the width of the laser cut. Now you can\n"
"measure the burn value that you should use in other generators. It is half\n"
"the difference of the overall size as shrinkage is occurring on both\n"
"sides. You can use the reference rectangle as it is rendered without burn\n"
"correction.\n"
"\n"
"See also LBeam that can serve as compact BurnTest and FlexTest for testing "
"flex settings.\n"
msgstr ""
msgid "BurnTest Settings"
msgstr ""
#. parameter name
#: boxes/generators/burntest.py
msgid "step"
msgstr ""
#. help for parameter step
#: boxes/generators/burntest.py
msgid "increases in burn value between the sides"
msgstr ""
#. parameter name
#: boxes/generators/burntest.py
msgid "pairs"
msgstr ""
#. help for parameter pairs
#: boxes/generators/burntest.py
msgid "number of pairs (each testing four burn values)"
msgstr ""
#. name of box generator
#: boxes/generators/can_storage.py
msgid "CanStorage"
msgstr ""
#. description of CanStorage
#: boxes/generators/can_storage.py
msgid "Storage box for round containers"
msgstr ""
#. long description of CanStorage in markdown
#: boxes/generators/can_storage.py
msgid ""
"\n"
"for AA batteries:\n"
"\n"
"\n"
"\n"
"for canned tomatoes:\n"
msgstr ""
msgid "CanStorage Settings"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/can_storage.py
msgid "Å¡"
msgstr ""
#. parameter name
#: boxes/generators/can_storage.py
msgid "canDiameter"
msgstr ""
#. help for parameter canDiameter
#: boxes/generators/can_storage.py
msgid "outer diameter of the cans to be stored (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/can_storage.py
msgid "canHeight"
msgstr ""
#. help for parameter canHeight
#: boxes/generators/can_storage.py
msgid "height of the cans to be stored (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/can_storage.py
msgid "canNum"
msgstr ""
#. help for parameter canNum
#: boxes/generators/can_storage.py
msgid "number of cans to be stored"
msgstr ""
#. parameter name
#: boxes/generators/can_storage.py
msgid "chuteAngle"
msgstr ""
#. help for parameter chuteAngle
#: boxes/generators/can_storage.py
msgid "slope angle of the chutes"
msgstr ""
msgid "Settings for Hole filling"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "bar_length"
msgstr ""
#. help for parameter bar_length
#: boxes/generators/can_storage.py
msgid "maximum length of bars"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "fill_pattern"
msgstr ""
#. help for parameter fill_pattern
#: boxes/generators/can_storage.py
msgid "style of hole pattern"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "no fill"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "hex"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "square"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "random"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "hbar"
msgstr ""
#. possible choice for fill_pattern
#: boxes/generators/can_storage.py
msgid "vbar"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "hole_max_radius"
msgstr ""
#. help for parameter hole_max_radius
#: boxes/generators/can_storage.py
msgid "maximum radius of generated holes (in mm)"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "hole_min_radius"
msgstr ""
#. help for parameter hole_min_radius
#: boxes/generators/can_storage.py
msgid "minimum radius of generated holes (in mm)"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "hole_style"
msgstr ""
#. help for parameter hole_style
#: boxes/generators/can_storage.py
msgid "style of holes (does not apply to fill patterns 'vbar' and 'hbar')"
msgstr ""
#. possible choice for hole_style
#: boxes/generators/can_storage.py
msgid "round"
msgstr ""
#. possible choice for hole_style
#: boxes/generators/can_storage.py
msgid "triangle"
msgstr ""
#. possible choice for hole_style
#: boxes/generators/can_storage.py
msgid "hexagon"
msgstr ""
#. possible choice for hole_style
#: boxes/generators/can_storage.py
msgid "octagon"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "max_random"
msgstr ""
#. help for parameter max_random
#: boxes/generators/can_storage.py
msgid "maximum number of random holes"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "space_between_holes"
msgstr ""
#. help for parameter space_between_holes
#: boxes/generators/can_storage.py
msgid "hole to hole spacing (in mm)"
msgstr ""
#. parameter name for fillHoles
#: boxes/generators/can_storage.py
msgid "space_to_border"
msgstr ""
#. help for parameter space_to_border
#: boxes/generators/can_storage.py
msgid "hole to border spacing (in mm)"
msgstr ""
#. name of box generator
#: boxes/generators/cardbox.py
msgid "CardBox"
msgstr ""
#. description of CardBox
#: boxes/generators/cardbox.py
msgid "Box for storage of playing cards, with versatile options"
msgstr ""
#. long description of CardBox in markdown
#: boxes/generators/cardbox.py
msgid ""
"\n"
"### Description\n"
"Versatile Box for Storage of playing cards. Multiple different styles of "
"storage are supportet, e.g. a flat storage or a trading card deck box style "
"storage. See images for ideas.\n"
"\n"
"#### Building instructions\n"
"Place inner walls on floor first (if any). Then add the outer walls. Glue "
"the two walls without finger joins to the inside of the side walls. Make "
"sure there is no squeeze out on top, as this is going to form the rail for "
"the lid.\n"
"\n"
"Add the top of the rails to the sides (front open) or to the back and front "
"(right side open) and the grip rail to the lid.\n"
"Details of the lid and rails\n"
"\n"
"Whole box (early version still missing grip rail on the lid):\n"
msgstr ""
msgid "CardBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/cardbox.py
msgid "openingdirection"
msgstr ""
#. help for parameter openingdirection
#: boxes/generators/cardbox.py
msgid ""
"Direction in which the lid slides open. Lid length > Lid width recommended."
msgstr ""
#. parameter name
#: boxes/generators/cardbox.py
msgid "fingerhole"
msgstr ""
#. help for parameter fingerhole
#: boxes/generators/cardbox.py
msgid "Depth of cutout to grab the cards"
msgstr ""
#. possible choice for fingerhole
#: boxes/generators/cardbox.py
msgid "regular"
msgstr ""
#. possible choice for fingerhole
#: boxes/generators/cardbox.py
msgid "deep"
msgstr ""
#. possible choice for fingerhole
#: boxes/generators/cardbox.py
msgid "custom"
msgstr ""
#. parameter name
#: boxes/generators/cardbox.py
msgid "fingerhole_depth"
msgstr ""
#. help for parameter fingerhole_depth
#: boxes/generators/cardbox.py
msgid "Depth of cutout if fingerhole is set to 'custom'. Disabled otherwise."
msgstr ""
#. parameter name
#: boxes/generators/cardbox.py
msgid "add_lidtopper"
msgstr ""
#. help for parameter add_lidtopper
#: boxes/generators/cardbox.py
msgid "Add an additional lid topper for optical reasons and customisation"
msgstr ""
#. name of box generator
#: boxes/generators/cardholder.py
msgid "CardHolder"
msgstr ""
#. description of CardHolder
#: boxes/generators/cardholder.py
msgid "Shelf for holding (multiple) piles of playing cards / notes"
msgstr ""
msgid "CardHolder Settings"
msgstr ""
#. help for parameter angle
#: boxes/generators/cardholder.py
msgid "backward angle of floor"
msgstr ""
#. parameter name
#: boxes/generators/cardholder.py
msgid "stackable"
msgstr ""
#. help for parameter stackable
#: boxes/generators/cardholder.py
msgid "make holders stackable"
msgstr ""
msgid "Settings for Grooved Edge"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "arc_angle"
msgstr ""
#. help for parameter arc_angle
#: boxes/generators/cardholder.py
msgid "the angle of arc cuts"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "gap"
msgstr ""
#. help for parameter gap
#: boxes/generators/cardholder.py
msgid "the gap between grooves (fraction of the edge length)"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "interleave"
msgstr ""
#. help for parameter interleave
#: boxes/generators/cardholder.py
msgid "alternate the direction of grooves"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "inverse"
msgstr ""
#. help for parameter inverse
#: boxes/generators/cardholder.py
msgid "invert the groove directions"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "margin"
msgstr ""
#. help for parameter margin
#: boxes/generators/cardholder.py
msgid ""
"minimum space left and right without grooves (fraction of the edge length)"
msgstr ""
#. help for parameter style
#: boxes/generators/cardholder.py
msgid "the style of grooves"
msgstr ""
#. possible choice for style
#: boxes/generators/cardholder.py
msgid "arc"
msgstr ""
#. possible choice for style
#: boxes/generators/cardholder.py
msgid "softarc"
msgstr ""
#. parameter name for Grooved
#: boxes/generators/cardholder.py
msgid "tri_angle"
msgstr ""
#. help for parameter tri_angle
#: boxes/generators/cardholder.py
msgid "the angle of triangular cuts"
msgstr ""
#. help for parameter width
#: boxes/generators/cardholder.py
msgid "the width of each groove (fraction of the edge length)"
msgstr ""
#. name of box generator
#: boxes/generators/castle.py
msgid "Castle"
msgstr ""
#. description of Castle
#: boxes/generators/castle.py
msgid "Castle tower with two walls"
msgstr ""
#. long description of Castle in markdown
#: boxes/generators/castle.py
msgid ""
"This was done as a table decoration. May be at some point in the future "
"someone will create a proper castle\n"
"with towers and gates and walls that can be attached in multiple "
"configurations."
msgstr ""
msgid "Castle Settings"
msgstr ""
#. name of box generator
#: boxes/generators/closedbox.py
msgid "ClosedBox"
msgstr ""
#. description of ClosedBox
#: boxes/generators/closedbox.py
msgid "Fully closed box"
msgstr ""
#. long description of ClosedBox in markdown
#: boxes/generators/closedbox.py
msgid ""
"This box is more of a building block than a finished item.\n"
"Use a vector graphics program (like Inkscape) to add holes or adjust the "
"base\n"
"plate.\n"
"\n"
"See BasedBox for variant with a base."
msgstr ""
msgid "ClosedBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/coffeecapsulesholder.py
msgid "CoffeeCapsuleHolder"
msgstr ""
#. description of CoffeeCapsuleHolder
#: boxes/generators/coffeecapsulesholder.py
msgid ""
"\n"
" Coffee capsule holder\n"
" "
msgstr ""
#. long description of CoffeeCapsuleHolder in markdown
#: boxes/generators/coffeecapsulesholder.py
msgid ""
"\n"
" You can store your coffee capsule near your espresso machine with this. "
"It works both vertically, or upside down under a shelf.\n"
msgstr ""
msgid "CoffeeCapsuleHolder Settings"
msgstr ""
#. parameter name
#: boxes/generators/coffeecapsulesholder.py
msgid "columns"
msgstr ""
#. help for parameter columns
#: boxes/generators/coffeecapsulesholder.py
msgid "Number of columns of capsules."
msgstr ""
#. parameter name
#: boxes/generators/coffeecapsulesholder.py
msgid "rows"
msgstr ""
#. help for parameter rows
#: boxes/generators/coffeecapsulesholder.py
msgid "Number of capsules by columns."
msgstr ""
#. parameter name
#: boxes/generators/coffeecapsulesholder.py
msgid "backplate"
msgstr ""
#. help for parameter backplate
#: boxes/generators/coffeecapsulesholder.py
msgid "True if a backplate should be generated."
msgstr ""
#. name of box generator
#: boxes/generators/coindisplay.py
msgid "CoinDisplay"
msgstr ""
#. description of CoinDisplay
#: boxes/generators/coindisplay.py
msgid "A showcase for a single coin"
msgstr ""
msgid "CoinDisplay Settings"
msgstr ""
#. parameter name
#: boxes/generators/coindisplay.py
msgid "coin_d"
msgstr ""
#. help for parameter coin_d
#: boxes/generators/coindisplay.py
msgid "The diameter of the coin in mm"
msgstr ""
#. parameter name
#: boxes/generators/coindisplay.py
msgid "coin_plate"
msgstr ""
#. help for parameter coin_plate
#: boxes/generators/coindisplay.py
msgid "The size of the coin plate"
msgstr ""
#. parameter name
#: boxes/generators/coindisplay.py
msgid "coin_showcase_h"
msgstr ""
#. help for parameter coin_showcase_h
#: boxes/generators/coindisplay.py
msgid "The height of the coin showcase piece"
msgstr ""
#. help for parameter angle
#: boxes/generators/coindisplay.py
msgid "The angle that the coin will tilt as"
msgstr ""
#. name of box generator
#: boxes/generators/concaveknob.py
msgid "ConcaveKnob"
msgstr ""
#. description of ConcaveKnob
#: boxes/generators/concaveknob.py
msgid "Round knob serrated outside for better gripping"
msgstr ""
msgid "ConcaveKnob Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/concaveknob.py
msgid "Diameter of the knob (mm)"
msgstr ""
#. parameter name
#: boxes/generators/concaveknob.py
msgid "serrations"
msgstr ""
#. help for parameter serrations
#: boxes/generators/concaveknob.py
msgid "Number of serrations"
msgstr ""
#. parameter name
#: boxes/generators/concaveknob.py
msgid "rounded"
msgstr ""
#. help for parameter rounded
#: boxes/generators/concaveknob.py
msgid "Amount of circumference used for non convex parts"
msgstr ""
#. help for parameter angle
#: boxes/generators/concaveknob.py
msgid "Angle between convex and concave parts"
msgstr ""
#. parameter name
#: boxes/generators/concaveknob.py
msgid "bolthole"
msgstr ""
#. help for parameter bolthole
#: boxes/generators/concaveknob.py
msgid "Diameter of the bolt hole (mm)"
msgstr ""
#. parameter name
#: boxes/generators/concaveknob.py
msgid "dhole"
msgstr ""
#. help for parameter dhole
#: boxes/generators/concaveknob.py
msgid "D-Flat in fraction of the diameter"
msgstr ""
#. parameter name
#: boxes/generators/concaveknob.py
msgid "hexhead"
msgstr ""
#. help for parameter hexhead
#: boxes/generators/concaveknob.py
msgid "Width of the hex bolt head (mm)"
msgstr ""
#. name of box generator
#: boxes/generators/console.py
msgid "Console"
msgstr ""
#. description of Console
#: boxes/generators/console.py
msgid "Console with slanted panel"
msgstr ""
#. long description of Console in markdown
#: boxes/generators/console.py
msgid ""
"\n"
"\n"
"Console Arcade Stick\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"Keyboard enclosure:\n"
msgstr ""
msgid "Console Settings"
msgstr ""
#. parameter name
#: boxes/generators/console.py
msgid "front_height"
msgstr ""
#. help for parameter front_height
#: boxes/generators/console.py
msgid "height of the front below the panel (in mm)"
msgstr ""
#. help for parameter angle
#: boxes/generators/console.py
msgid "angle of the front panel (90°=upright)"
msgstr ""
#. name of box generator
#: boxes/generators/console2.py
msgid "Console2"
msgstr ""
#. description of Console2
#: boxes/generators/console2.py
msgid "Console with slanted panel and service hatches"
msgstr ""
#. long description of Console2 in markdown
#: boxes/generators/console2.py
msgid ""
"\n"
"This box is designed as a housing for electronic projects. It has hatches "
"that can be re-opened with simple tools. It intentionally cannot be opened "
"with bare hands - if build with thin enough material.\n"
"\n"
"#### Caution\n"
"There is a chance that the latches of the back wall or the back wall itself "
"interfere with the front panel or it's mounting frame/lips. The generator "
"does not check for this. So depending on the variant chosen you might need "
"to make the box deeper (increase y parameter) or the panel angle steeper "
"(increase angle parameter) until there is enough room.\n"
"\n"
"It's also possible that the frame of the panel interferes with the floor if "
"the hi parameter is too small.\n"
"\n"
"#### Assembly instructions\n"
"The main body is easy to assemble by starting with the floor and then adding "
"the four walls and (if present) the top piece.\n"
"\n"
"If the back wall is removable you need to add the lips and latches. The U-"
"shaped clamps holding the latches in place need to be clued in place without "
"also gluing the latches themselves. Make sure the springs on the latches "
"point inwards and the angled ends point to the side walls as shown here:\n"
"\n"
"\n"
"\n"
"If the panel is removable you need to add the springs with the tabs to the "
"side lips. This photo shows the variant which has the panel glued to the "
"frame:\n"
"\n"
"\n"
"\n"
"If space is tight you may consider not gluing the cross pieces in place and "
"remove them after the glue-up. This may prevent the latches of the back wall "
"and the panel from interfering with each other.\n"
"\n"
"The variant using finger joints only has the two side lips without the cross "
"bars.\n"
"\n"
"#### Re-Opening\n"
"\n"
"The latches at the back wall lock in place when closed. To open them they "
"need to be pressed in and can then be moved aside.\n"
"\n"
"To remove the panel you have to press in the four tabs at the side. It is "
"easiest to push them in and then pull the panel up a little bit so the tabs "
"stay in.\n"
msgstr ""
msgid "Console2 Settings"
msgstr ""
#. parameter name
#: boxes/generators/console2.py
msgid "removable_backwall"
msgstr ""
#. help for parameter removable_backwall
#: boxes/generators/console2.py
msgid "have latches at the backwall"
msgstr ""
#. parameter name
#: boxes/generators/console2.py
msgid "removable_panel"
msgstr ""
#. help for parameter removable_panel
#: boxes/generators/console2.py
msgid "The panel is held by tabs and can be removed"
msgstr ""
#. parameter name
#: boxes/generators/console2.py
msgid "glued_panel"
msgstr ""
#. help for parameter glued_panel
#: boxes/generators/console2.py
msgid "the panel is glued and not held by finger joints"
msgstr ""
#. name of box generator
#: boxes/generators/crypticfont.py
msgid "CrypticFont"
msgstr ""
#. description of CrypticFont
#: boxes/generators/crypticfont.py
msgid "DESCRIPTION"
msgstr ""
msgid "CrypticFont Settings"
msgstr ""
#. parameter name
#: boxes/generators/crypticfont.py
msgid "ctext"
msgstr ""
#. help for parameter ctext
#: boxes/generators/crypticfont.py
msgid "text to render"
msgstr ""
msgid "Settings for the Cryptic Font"
msgstr ""
#. parameter name for CrypticFont
#: boxes/generators/crypticfont.py
msgid "ratio"
msgstr ""
#. help for parameter ratio
#: boxes/generators/crypticfont.py
msgid "ratio of height to width"
msgstr ""
#. parameter name for CrypticFont
#: boxes/generators/crypticfont.py
msgid "spacing"
msgstr ""
#. help for parameter width
#: boxes/generators/crypticfont.py
msgid "width of the glphys in mm"
msgstr ""
#. name of box generator
#: boxes/generators/desksign.py
msgid "Desksign"
msgstr ""
#. description of Desksign
#: boxes/generators/desksign.py
msgid "Simple diagonal plate with stands to show name or mesage"
msgstr ""
#. long description of Desksign in markdown
#: boxes/generators/desksign.py
msgid ""
"Text to be engraved can be genarated by inputing the label and fontsize "
"fields.\n"
" height represents the area that can be used for writing "
"text, does not match the actual\n"
" height when standing. Generated text is put in the center. "
"Currently only a single\n"
" line of text is supported."
msgstr ""
msgid "Desksign Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/desksign.py
msgid "plate width in mm (excluding holes)"
msgstr ""
#. help for parameter height
#: boxes/generators/desksign.py
msgid "plate height in mm"
msgstr ""
#. help for parameter angle
#: boxes/generators/desksign.py
msgid "plate angle in degrees (90 is vertical)"
msgstr ""
#. parameter name
#: boxes/generators/desksign.py
msgid "label"
msgstr ""
#. help for parameter label
#: boxes/generators/desksign.py
msgid "optional text to engrave (leave blank to omit)"
msgstr ""
#. parameter name
#: boxes/generators/desksign.py
msgid "fontsize"
msgstr ""
#. help for parameter fontsize
#: boxes/generators/desksign.py
msgid "height of text"
msgstr ""
#. parameter name
#: boxes/generators/desksign.py
msgid "feet"
msgstr ""
#. help for parameter feet
#: boxes/generators/desksign.py
msgid "add raised feet"
msgstr ""
#. parameter name
#: boxes/generators/desksign.py
msgid "mirror"
msgstr ""
#. help for parameter mirror
#: boxes/generators/desksign.py
msgid ""
"mirrors one of the stand so the same side of the material can be placed on "
"the outside"
msgstr ""
#. name of box generator
#: boxes/generators/dicebox.py
msgid "DiceBox"
msgstr ""
#. description of DiceBox
#: boxes/generators/dicebox.py
msgid "Box with lid and integraded hinge for storing dice"
msgstr ""
msgid "DiceBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/dicebox.py
msgid "lidheight"
msgstr ""
#. help for parameter lidheight
#: boxes/generators/dicebox.py
msgid "height of lid in mm"
msgstr ""
#. parameter name
#: boxes/generators/dicebox.py
msgid "hex_hole_corner_radius"
msgstr ""
#. help for parameter hex_hole_corner_radius
#: boxes/generators/dicebox.py
msgid "The corner radius of the hexagonal dice holes, in mm"
msgstr ""
#. parameter name
#: boxes/generators/dicebox.py
msgid "magnet_diameter"
msgstr ""
#. help for parameter magnet_diameter
#: boxes/generators/dicebox.py
msgid "The diameter of magnets for holding the box closed, in mm"
msgstr ""
msgid "Settings for Chest Hinges"
msgstr ""
#. parameter name for ChestHinge
#: boxes/generators/dicebox.py
msgid "finger_joints_on_box"
msgstr ""
#. help for parameter finger_joints_on_box
#: boxes/generators/dicebox.py
msgid "whether to include finger joints on the edge with the box"
msgstr ""
#. parameter name for ChestHinge
#: boxes/generators/dicebox.py
msgid "finger_joints_on_lid"
msgstr ""
#. help for parameter finger_joints_on_lid
#: boxes/generators/dicebox.py
msgid "whether to include finger joints on the edge with the lid"
msgstr ""
#. parameter name for ChestHinge
#: boxes/generators/dicebox.py
msgid "hinge_strength"
msgstr ""
#. parameter name for ChestHinge
#: boxes/generators/dicebox.py
msgid "pin_height"
msgstr ""
#. help for parameter pin_height
#: boxes/generators/dicebox.py
msgid "radius of the disc rotating in the hinge (multiples of thickness)"
msgstr ""
#. name of box generator
#: boxes/generators/dinrailbox.py
msgid "DinRailBox"
msgstr ""
#. description of DinRailBox
#: boxes/generators/dinrailbox.py
msgid "Box for DIN rail used in electrical junction boxes"
msgstr ""
msgid "DinRailBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/dinrailbox.py
msgid "rail_width"
msgstr ""
#. help for parameter rail_width
#: boxes/generators/dinrailbox.py
msgid "width of the rail (typically 35 or 15mm)"
msgstr ""
#. parameter name
#: boxes/generators/dinrailbox.py
msgid "rail_offset"
msgstr ""
#. help for parameter rail_offset
#: boxes/generators/dinrailbox.py
msgid "offset of the rail from the middle of the box (in mm)"
msgstr ""
#. name of box generator
#: boxes/generators/discrack.py
msgid "DiscRack"
msgstr ""
#. description of DiscRack
#: boxes/generators/discrack.py
msgid "A rack for storing disk-shaped objects vertically next to each other"
msgstr ""
msgid "DiscRack Settings"
msgstr ""
#. parameter name
#: boxes/generators/discrack.py
msgid "disc_diameter"
msgstr ""
#. help for parameter disc_diameter
#: boxes/generators/discrack.py
msgid "Disc diameter in mm"
msgstr ""
#. parameter name
#: boxes/generators/discrack.py
msgid "disc_thickness"
msgstr ""
#. help for parameter disc_thickness
#: boxes/generators/discrack.py
msgid "Thickness of the discs in mm"
msgstr ""
#. parameter name
#: boxes/generators/discrack.py
msgid "lower_factor"
msgstr ""
#. help for parameter lower_factor
#: boxes/generators/discrack.py
msgid "Position of the lower rack grids along the radius"
msgstr ""
#. parameter name
#: boxes/generators/discrack.py
msgid "rear_factor"
msgstr ""
#. help for parameter rear_factor
#: boxes/generators/discrack.py
msgid "Position of the rear rack grids along the radius"
msgstr ""
#. parameter name
#: boxes/generators/discrack.py
msgid "disc_outset"
msgstr ""
#. help for parameter disc_outset
#: boxes/generators/discrack.py
msgid "Additional space kept between the disks and the outbox of the rack"
msgstr ""
#. help for parameter angle
#: boxes/generators/discrack.py
msgid "Backwards slant of the rack"
msgstr ""
#. name of box generator
#: boxes/generators/dispenser.py
msgid "Dispenser"
msgstr ""
#. description of Dispenser
#: boxes/generators/dispenser.py
msgid "Dispenser for stackable (flat) items of same size"
msgstr ""
#. long description of Dispenser in markdown
#: boxes/generators/dispenser.py
msgid ""
"Set *bottomheight* to 0 for a wall mounting variant.\n"
"Please add mounting holes yourself."
msgstr ""
msgid "Dispenser Settings"
msgstr ""
#. parameter name
#: boxes/generators/dispenser.py
msgid "slotheight"
msgstr ""
#. help for parameter slotheight
#: boxes/generators/dispenser.py
msgid "height of the dispenser slot / items (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/dispenser.py
msgid "bottomheight"
msgstr ""
#. help for parameter bottomheight
#: boxes/generators/dispenser.py
msgid "height underneath the dispenser (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/dispenser.py
msgid "sideedges"
msgstr ""
#. help for parameter sideedges
#: boxes/generators/dispenser.py
msgid "edges used for holding the front panels and back"
msgstr ""
#. name of box generator
#: boxes/generators/display.py
msgid "Display"
msgstr ""
#. description of Display
#: boxes/generators/display.py
msgid "Display for flyers or leaflets"
msgstr ""
msgid "Display Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/display.py
msgid "radius of the corners in mm"
msgstr ""
#. help for parameter angle
#: boxes/generators/display.py
msgid "greater zero for top wider as bottom"
msgstr ""
#. name of box generator
#: boxes/generators/displaycase.py
msgid "DisplayCase"
msgstr ""
#. description of DisplayCase
#: boxes/generators/displaycase.py
msgid ""
"Fully closed box intended to be cut from transparent acrylics and to serve "
"as a display case."
msgstr ""
msgid "DisplayCase Settings"
msgstr ""
#. parameter name
#: boxes/generators/displaycase.py
msgid "overhang"
msgstr ""
#. help for parameter overhang
#: boxes/generators/displaycase.py
msgid "overhang for joints in mm"
msgstr ""
#. name of box generator
#: boxes/generators/displayshelf.py
msgid "DisplayShelf"
msgstr ""
#. description of DisplayShelf
#: boxes/generators/displayshelf.py
msgid "Shelf with slanted floors"
msgstr ""
msgid "DisplayShelf Settings"
msgstr ""
#. parameter name
#: boxes/generators/displayshelf.py
msgid "num"
msgstr ""
#. help for parameter num
#: boxes/generators/displayshelf.py
msgid "number of shelves"
msgstr ""
#. parameter name
#: boxes/generators/displayshelf.py
msgid "front_wall_height"
msgstr ""
#. help for parameter front_wall_height
#: boxes/generators/displayshelf.py
msgid "height of front walls"
msgstr ""
#. help for parameter angle
#: boxes/generators/displayshelf.py
msgid "angle of floors (negative values for slanting backwards)"
msgstr ""
#. parameter name
#: boxes/generators/displayshelf.py
msgid "include_back"
msgstr ""
#. help for parameter include_back
#: boxes/generators/displayshelf.py
msgid "Include panel on the back of the shelf"
msgstr ""
#. parameter name
#: boxes/generators/displayshelf.py
msgid "slope_top"
msgstr ""
#. help for parameter slope_top
#: boxes/generators/displayshelf.py
msgid "Slope the sides and the top by front wall height"
msgstr ""
#. parameter name
#: boxes/generators/displayshelf.py
msgid "divider_wall_height"
msgstr ""
#. help for parameter divider_wall_height
#: boxes/generators/displayshelf.py
msgid "height of divider walls"
msgstr ""
#. name of box generator
#: boxes/generators/dividertray.py
msgid "DividerTray"
msgstr ""
#. description of DividerTray
#: boxes/generators/dividertray.py
msgid "Divider tray - rows and dividers"
msgstr ""
#. long description of DividerTray in markdown
#: boxes/generators/dividertray.py
msgid ""
"\n"
"Adding '0:' at the start of the sy parameter adds a slot at the very back. "
"Adding ':0' at the end of sy adds a slot meeting the bottom at the very "
"front. This is especially useful if slot angle is set above zero.\n"
"\n"
"There are 4 different sets of dividers rendered:\n"
"\n"
"* With asymmetric tabs so the tabs fit on top of each other\n"
"* With tabs of half wall thickness that can go side by side\n"
"* With tabs of a full wall thickness\n"
"* One single divider spanning across all columns\n"
"\n"
"You will likely need to cut each of the dividers you want multiple times.\n"
msgstr ""
msgid "DividerTray Settings"
msgstr ""
#. parameter name
#: boxes/generators/dividertray.py
msgid "notches_in_wall"
msgstr ""
#. help for parameter notches_in_wall
#: boxes/generators/dividertray.py
msgid "generate the same notches on the walls that are on the dividers"
msgstr ""
#. parameter name
#: boxes/generators/dividertray.py
msgid "left_wall"
msgstr ""
#. help for parameter left_wall
#: boxes/generators/dividertray.py
msgid "generate wall on the left side"
msgstr ""
#. parameter name
#: boxes/generators/dividertray.py
msgid "right_wall"
msgstr ""
#. help for parameter right_wall
#: boxes/generators/dividertray.py
msgid "generate wall on the right side"
msgstr ""
#. parameter name
#: boxes/generators/dividertray.py
msgid "bottom"
msgstr ""
#. help for parameter bottom
#: boxes/generators/dividertray.py
msgid "generate wall on the bottom"
msgstr ""
#. help for parameter handle
#: boxes/generators/dividertray.py
msgid "add handle to the bottom"
msgstr ""
msgid "Settings for Divider Slots"
msgstr ""
#. help for parameter angle
#: boxes/generators/dividertray.py
msgid "angle at which slots are generated, in degrees. 0° is vertical."
msgstr ""
#. help for parameter depth
#: boxes/generators/dividertray.py
msgid "depth of the slot in mm"
msgstr ""
#. parameter name for Slot
#: boxes/generators/dividertray.py
msgid "extra_slack"
msgstr ""
#. help for parameter extra_slack
#: boxes/generators/dividertray.py
msgid ""
"extra slack (in addition to thickness and kerf) to help insert dividers in mm"
msgstr ""
#. help for parameter radius
#: boxes/generators/dividertray.py
msgid "radius of the slot entrance in mm"
msgstr ""
msgid "Settings for Notches on the Dividers"
msgstr ""
#. parameter name for Notch
#: boxes/generators/dividertray.py
msgid "lower_radius"
msgstr ""
#. parameter name for Notch
#: boxes/generators/dividertray.py
msgid "upper_radius"
msgstr ""
msgid "Settings for Dividers"
msgstr ""
#. parameter name for Divider
#: boxes/generators/dividertray.py
msgid "bottom_margin"
msgstr ""
#. help for parameter bottom_margin
#: boxes/generators/dividertray.py
msgid "margin between box's bottom and divider's in mm"
msgstr ""
#. help for parameter play
#: boxes/generators/dividertray.py
msgid "play to avoid them clamping onto the walls (in multiples of thickness)"
msgstr ""
#. name of box generator
#: boxes/generators/doubleflexdoorbox.py
msgid "DoubleFlexDoorBox"
msgstr ""
#. description of DoubleFlexDoorBox
#: boxes/generators/doubleflexdoorbox.py
msgid "Box with two part lid with living hinges and round corners"
msgstr ""
msgid "DoubleFlexDoorBox Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/doubleflexdoorbox.py
msgid "Radius of the latch in mm"
msgstr ""
#. parameter name
#: boxes/generators/doubleflexdoorbox.py
msgid "latchsize"
msgstr ""
#. help for parameter latchsize
#: boxes/generators/doubleflexdoorbox.py
msgid "size of latch in multiples of thickness"
msgstr ""
#. name of box generator
#: boxes/generators/drillbox.py
msgid "DrillBox"
msgstr ""
#. description of DrillBox
#: boxes/generators/drillbox.py
msgid "A parametrized box for drills"
msgstr ""
#. long description of DrillBox in markdown
#: boxes/generators/drillbox.py
msgid " "
msgstr ""
msgid "DrillBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/drillbox.py
msgid "holes"
msgstr ""
#. help for parameter holes
#: boxes/generators/drillbox.py
msgid "Number of holes for each size"
msgstr ""
#. parameter name
#: boxes/generators/drillbox.py
msgid "firsthole"
msgstr ""
#. help for parameter firsthole
#: boxes/generators/drillbox.py
msgid "Smallest hole"
msgstr ""
#. parameter name
#: boxes/generators/drillbox.py
msgid "holeincrement"
msgstr ""
#. help for parameter holeincrement
#: boxes/generators/drillbox.py
msgid "increment between holes"
msgstr ""
msgid "Settings for RoundedTriangleEdge"
msgstr ""
#. help for parameter height
#: boxes/generators/drillbox.py
msgid "height above the wall"
msgstr ""
#. parameter name for RoundedTriangleEdge
#: boxes/generators/drillbox.py
msgid "r_hole"
msgstr ""
#. help for parameter r_hole
#: boxes/generators/drillbox.py
msgid "radius of hole"
msgstr ""
#. help for parameter radius
#: boxes/generators/drillbox.py
msgid "radius of top corner"
msgstr ""
#. help for parameter outset
#: boxes/generators/drillbox.py
msgid ""
"extend the triangle along the length of the edge (multiples of thickness)"
msgstr ""
msgid "Settings for Mounting Edge"
msgstr ""
#. parameter name for Mounting
#: boxes/generators/drillbox.py
msgid "d_head"
msgstr ""
#. help for parameter d_head
#: boxes/generators/drillbox.py
msgid "head diameter of mounting screw (in mm)"
msgstr ""
#. parameter name for Mounting
#: boxes/generators/drillbox.py
msgid "d_shaft"
msgstr ""
#. help for parameter d_shaft
#: boxes/generators/drillbox.py
msgid "shaft diameter of mounting screw (in mm)"
msgstr ""
#. help for parameter margin
#: boxes/generators/drillbox.py
msgid ""
"minimum space left and right without holes (fraction of the edge length)"
msgstr ""
#. help for parameter num
#: boxes/generators/drillbox.py
msgid "number of mounting holes (integer)"
msgstr ""
#. parameter name for Mounting
#: boxes/generators/drillbox.py
msgid "side"
msgstr ""
#. help for parameter side
#: boxes/generators/drillbox.py
msgid "side of box (not all valid configurations make sense...)"
msgstr ""
#. possible choice for side
#: boxes/generators/drillbox.py
msgid "back"
msgstr ""
#. help for parameter style
#: boxes/generators/drillbox.py
msgid "edge style"
msgstr ""
#. possible choice for style
#: boxes/generators/drillbox.py
msgid "straight edge, within"
msgstr ""
#. possible choice for style
#: boxes/generators/drillbox.py
msgid "straight edge, extended"
msgstr ""
#. possible choice for style
#: boxes/generators/drillbox.py
msgid "mounting tab"
msgstr ""
#. name of box generator
#: boxes/generators/drillstand.py
msgid "DrillStand"
msgstr ""
#. description of DrillStand
#: boxes/generators/drillstand.py
msgid "Box for drills with each compartment of a different height"
msgstr ""
#. long description of DrillStand in markdown
#: boxes/generators/drillstand.py
msgid ""
"Note: `sh` gives the hight of the rows front to back. It though should have "
"the same number of entries as `sy`. These heights are the one on the left "
"side and increase throughout the row. To have each compartment a bit higher "
"than the previous one the steps in `sh` should be a bit bigger than "
"`extra_height`.\n"
"\n"
"Assembly:\n"
"\n"
"\n"
"\n"
"Start with putting the slots of the inner walls together. Be especially "
"careful with adding the bottom. It is always asymmetrical and flush with the "
"right/lower side while being a little short on the left/higher side to not "
"protrude into the side wall.\n"
"\n"
"| | |\n"
"| ---- | ---- |\n"
"|  | !"
"[Assembly bottom](static/samples/DrillStand-assembly-2.jpg) |\n"
"| Then add the front and the back wall. | Add the very left and right walls "
"last. |\n"
"|  | !"
"[Assembly side walls](static/samples/DrillStand-assembly-4.jpg) |\n"
msgstr ""
msgid "DrillStand Settings"
msgstr ""
#. parameter name
#: boxes/generators/drillstand.py
msgid "extra_height"
msgstr ""
#. help for parameter extra_height
#: boxes/generators/drillstand.py
msgid "height difference left to right"
msgstr ""
#. name of box generator
#: boxes/generators/electronicsbox.py
msgid "ElectronicsBox"
msgstr ""
#. description of ElectronicsBox
#: boxes/generators/electronicsbox.py
msgid "Closed box with screw on top and mounting holes"
msgstr ""
msgid "ElectronicsBox Settings"
msgstr ""
#. help for parameter triangle
#: boxes/generators/electronicsbox.py
msgid "Sides of the triangles holding the lid in mm"
msgstr ""
#. parameter name
#: boxes/generators/electronicsbox.py
msgid "d1"
msgstr ""
#. help for parameter d1
#: boxes/generators/electronicsbox.py
msgid "Diameter of the inner lid screw holes in mm"
msgstr ""
#. parameter name
#: boxes/generators/electronicsbox.py
msgid "d2"
msgstr ""
#. help for parameter d2
#: boxes/generators/electronicsbox.py
msgid "Diameter of the lid screw holes in mm"
msgstr ""
#. parameter name
#: boxes/generators/electronicsbox.py
msgid "d3"
msgstr ""
#. help for parameter d3
#: boxes/generators/electronicsbox.py
msgid "Diameter of the mounting screw holes in mm"
msgstr ""
#. parameter name
#: boxes/generators/electronicsbox.py
msgid "outsidemounts"
msgstr ""
#. help for parameter outsidemounts
#: boxes/generators/electronicsbox.py
msgid "Add external mounting points"
msgstr ""
#. parameter name
#: boxes/generators/electronicsbox.py
msgid "holedist"
msgstr ""
#. help for parameter holedist
#: boxes/generators/electronicsbox.py
msgid "Distance of the screw holes from the wall in mm"
msgstr ""
#. name of box generator
#: boxes/generators/eurorackskiff.py
msgid "EuroRackSkiff"
msgstr ""
#. description of EuroRackSkiff
#: boxes/generators/eurorackskiff.py
msgid "3U Height case with adjustable width and height and included rails"
msgstr ""
msgid "EuroRackSkiff Settings"
msgstr ""
#. parameter name
#: boxes/generators/eurorackskiff.py
msgid "hp"
msgstr ""
#. help for parameter hp
#: boxes/generators/eurorackskiff.py
msgid "Width of the case in HP"
msgstr ""
#. name of box generator
#: boxes/generators/fanhole.py
msgid "FanHole"
msgstr ""
#. description of FanHole
#: boxes/generators/fanhole.py
msgid "Hole pattern for mounting a fan"
msgstr ""
msgid "FanHole Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/fanhole.py
msgid "diameter of the fan hole"
msgstr ""
#. parameter name
#: boxes/generators/fanhole.py
msgid "mounting_holes"
msgstr ""
#. help for parameter mounting_holes
#: boxes/generators/fanhole.py
msgid "diameter of the fan mounting holes"
msgstr ""
#. parameter name
#: boxes/generators/fanhole.py
msgid "mounting_holes_inset"
msgstr ""
#. help for parameter mounting_holes_inset
#: boxes/generators/fanhole.py
msgid "distance of the fan mounting holes from the outside"
msgstr ""
#. parameter name
#: boxes/generators/fanhole.py
msgid "arms"
msgstr ""
#. help for parameter arms
#: boxes/generators/fanhole.py
msgid "number of arms"
msgstr ""
#. parameter name
#: boxes/generators/fanhole.py
msgid "inner_disc"
msgstr ""
#. help for parameter inner_disc
#: boxes/generators/fanhole.py
msgid "relative size of the inner disc"
msgstr ""
#. help for parameter style
#: boxes/generators/fanhole.py
msgid "Style of the fan hole"
msgstr ""
#. possible choice for style
#: boxes/generators/fanhole.py
msgid "CW Swirl"
msgstr ""
#. possible choice for style
#: boxes/generators/fanhole.py
msgid "CCW Swirl"
msgstr ""
#. possible choice for style
#: boxes/generators/fanhole.py
msgid "Hole"
msgstr ""
#. name of box generator
#: boxes/generators/filamentspool.py
msgid "FilamentSpool"
msgstr ""
#. description of FilamentSpool
#: boxes/generators/filamentspool.py
msgid "A two part spool for 3D printing filament"
msgstr ""
#. long description of FilamentSpool in markdown
#: boxes/generators/filamentspool.py
msgid ""
"\n"
"Use small nails to properly align the pieces of the bayonet latch. Glue the "
"parts of the bayonet latch before assembling the \"axle\". The inner parts "
"go at the side and the outer parts at the inside of the axle.\n"
""
msgstr ""
msgid "FilamentSpool Settings"
msgstr ""
#. parameter name
#: boxes/generators/filamentspool.py
msgid "outer_diameter"
msgstr ""
#. help for parameter outer_diameter
#: boxes/generators/filamentspool.py
msgid "diameter of the flanges"
msgstr ""
#. parameter name
#: boxes/generators/filamentspool.py
msgid "inner_diameter"
msgstr ""
#. help for parameter inner_diameter
#: boxes/generators/filamentspool.py
msgid "diameter of the center part"
msgstr ""
#. parameter name
#: boxes/generators/filamentspool.py
msgid "axle_diameter"
msgstr ""
#. help for parameter axle_diameter
#: boxes/generators/filamentspool.py
msgid "diameter of the axle hole"
msgstr ""
#. parameter name
#: boxes/generators/filamentspool.py
msgid "sides"
msgstr ""
#. help for parameter sides
#: boxes/generators/filamentspool.py
msgid "number of pieces for the center part"
msgstr ""
#. name of box generator
#: boxes/generators/filltest.py
msgid "FillTest"
msgstr ""
#. description of FillTest
#: boxes/generators/filltest.py
msgid "Piece for testing different settings for hole filling"
msgstr ""
msgid "FillTest Settings"
msgstr ""
#. description of FlexBox
#: boxes/generators/flexbox.py
msgid "Box with living hinge and round corners"
msgstr ""
msgid "FlexBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/flexbox2.py
msgid "FlexBox2"
msgstr ""
#. description of FlexBox2
#: boxes/generators/flexbox2.py
msgid "Box with living hinge and top corners rounded"
msgstr ""
msgid "FlexBox2 Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/flexbox2.py
msgid "Radius of the corners in mm"
msgstr ""
#. name of box generator
#: boxes/generators/flexbox3.py
msgid "FlexBox3"
msgstr ""
#. description of FlexBox3
#: boxes/generators/flexbox3.py
msgid "Box with living hinge"
msgstr ""
msgid "FlexBox3 Settings"
msgstr ""
#. parameter name
#: boxes/generators/flexbox3.py
msgid "z"
msgstr ""
#. help for parameter z
#: boxes/generators/flexbox3.py
msgid "height of the box"
msgstr ""
#. help for parameter h
#: boxes/generators/flexbox3.py
msgid "height of the lid"
msgstr ""
#. help for parameter radius
#: boxes/generators/flexbox3.py
msgid "radius of the lids living hinge"
msgstr ""
#. help for parameter c
#: boxes/generators/flexbox3.py
msgid "clearance of the lid"
msgstr ""
#. name of box generator
#: boxes/generators/flexbox4.py
msgid "FlexBox4"
msgstr ""
#. description of FlexBox4
#: boxes/generators/flexbox4.py
msgid "Box with living hinge and left corners rounded"
msgstr ""
msgid "FlexBox4 Settings"
msgstr ""
#. name of box generator
#: boxes/generators/flexbox5.py
msgid "FlexBox5"
msgstr ""
msgid "FlexBox5 Settings"
msgstr ""
#. parameter name
#: boxes/generators/flexbox5.py
msgid "top_diameter"
msgstr ""
#. help for parameter top_diameter
#: boxes/generators/flexbox5.py
msgid "diameter at the top"
msgstr ""
#. parameter name
#: boxes/generators/flexbox5.py
msgid "bottom_diameter"
msgstr ""
#. help for parameter bottom_diameter
#: boxes/generators/flexbox5.py
msgid "diameter at the bottom"
msgstr ""
#. name of box generator
#: boxes/generators/flextest.py
msgid "FlexTest"
msgstr ""
#. description of FlexTest
#: boxes/generators/flextest.py
msgid "Piece for testing different flex settings"
msgstr ""
msgid "FlexTest Settings"
msgstr ""
#. name of box generator
#: boxes/generators/flextest2.py
msgid "FlexTest2"
msgstr ""
#. description of FlexTest2
#: boxes/generators/flextest2.py
msgid "Piece for testing 2D flex settings"
msgstr ""
msgid "FlexTest2 Settings"
msgstr ""
#. parameter name
#: boxes/generators/flextest2.py
msgid "fw"
msgstr ""
#. help for parameter fw
#: boxes/generators/flextest2.py
msgid "distance of flex cuts in multiples of thickness"
msgstr ""
#. name of box generator
#: boxes/generators/folder.py
msgid "Folder"
msgstr ""
#. description of Folder
#: boxes/generators/folder.py
msgid "Book cover with flex for the spine"
msgstr ""
msgid "Folder Settings"
msgstr ""
#. parameter name
#: boxes/generators/folder.py
msgid "r"
msgstr ""
#. name of box generator
#: boxes/generators/frontpanel.py
msgid "FrontPanel"
msgstr ""
#. description of FrontPanel
#: boxes/generators/frontpanel.py
msgid "Mounting Holes and cutouts for all your holy needs."
msgstr ""
#. long description of FrontPanel in markdown
#: boxes/generators/frontpanel.py
msgid ""
"\n"
"<script type=\"module\" src=\"https://md-block.verou.me/md-block.js\"></"
"script>\n"
"<md-block>\n"
"\n"
"\n"
"This will help you create font (and side and top) panels for your\n"
"boxes that are pre-configured for all the bits and bobs you'd like to\n"
"install\n"
"\n"
" The layout can create several types of holes including rectangles,\n"
" circles and mounting holes. The default shows an example layout with "
"all\n"
" currently supported objects.\n"
"\n"
"#### \n"
"`rect x y w h [cr=0] [cx=True] [cy=True]`\n"
"\n"
" x: x position\n"
" y: y position\n"
" w: width\n"
" h: height\n"
" cr: optional, Corner radius, default=0\n"
" cx: optional, Center x. the x position denotes the center of the "
"rectangle.\n"
" accepts t, T, 1, or other true-like values.\n"
" cy: optional, Center y. the y position denotes the center of the "
"rectangle.\n"
"\n"
"#### outline\n"
"`rect w h`\n"
"\n"
" w: width\n"
" h: height\n"
"\n"
"`outline` has a special meaning: You can create multiple panel outlines with "
"one command. \n"
"This has the effect of making it easy to manage all the holes on all the "
"sides of\n"
"your boxes.\n"
"\n"
"#### circle\n"
"`circle x y r`\n"
"\n"
" x: x position\n"
" y: y position\n"
" r: radius\n"
"\n"
"#### mountinghole\n"
"mountinghole x y d_shaft [d_head=0] [angle=0]\n"
"\n"
" x: x position\n"
" y: y position\n"
" d_shaft: diameter of the shaft part of the mounting hole\n"
" d_head: optional. diameter of the head\n"
" angle: optional. angle of the mounting hole\n"
"\n"
"#### text\n"
"`text x y size \"some text\" [angle=0] [align=bottom|left]`\n"
"\n"
" x: x position\n"
" y: y position\n"
" size: size, in mm\n"
" text: text to render. This *must* be in quotation marks\n"
" angle: angle (in degrees)\n"
" align: string with combinations of (top|middle|bottom) and (left|center|"
"right),\n"
" separated by '|'. Default is 'bottom|left'\n"
"\n"
"\n"
"\n"
"#### nema\n"
"`nema x y size [screwhole_size=0]`\n"
"\n"
" x: x position (center of shaft)\n"
" y: y position (center of shaft)\n"
" size: nema size. One of [8, 11, 14, 16, 17, 23, 24, 34, 42]\n"
" screw: screw size, in mm. Optional. Default=0, which means the default "
"size\n"
"</md-block>\n"
" "
msgstr ""
msgid "FrontPanel Settings"
msgstr ""
#. parameter name
#: boxes/generators/frontpanel.py
msgid "layout"
msgstr ""
#. name of box generator
#: boxes/generators/gear.py
msgid "Gears"
msgstr ""
msgid "Gears Settings"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "teeth1"
msgstr ""
#. help for parameter teeth1
#: boxes/generators/gear.py
msgid "number of teeth"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "shaft1"
msgstr ""
#. help for parameter shaft1
#: boxes/generators/gear.py
msgid "diameter of the shaft 1"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "dpercentage1"
msgstr ""
#. help for parameter dpercentage1
#: boxes/generators/gear.py
msgid "percent of the D section of shaft 1 (100 for round shaft)"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "teeth2"
msgstr ""
#. help for parameter teeth2
#: boxes/generators/gear.py
msgid "number of teeth in the other size of gears"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "shaft2"
msgstr ""
#. help for parameter shaft2
#: boxes/generators/gear.py
msgid "diameter of the shaft2 (zero for same as shaft 1)"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "dpercentage2"
msgstr ""
#. help for parameter dpercentage2
#: boxes/generators/gear.py
msgid "percent of the D section of shaft 1 (0 for same as shaft 1)"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "modulus"
msgstr ""
#. help for parameter modulus
#: boxes/generators/gear.py
msgid "size of teeth (diameter / #teeth) in mm"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "pressure_angle"
msgstr ""
#. help for parameter pressure_angle
#: boxes/generators/gear.py
msgid "angle of the teeth touching (in degrees)"
msgstr ""
#. parameter name
#: boxes/generators/gear.py
msgid "profile_shift"
msgstr ""
#. help for parameter profile_shift
#: boxes/generators/gear.py
msgid "in percent of the modulus"
msgstr ""
#. name of box generator
#: boxes/generators/gearbox.py
msgid "GearBox"
msgstr ""
#. description of GearBox
#: boxes/generators/gearbox.py
msgid "Gearbox with multiple identical stages"
msgstr ""
msgid "GearBox Settings"
msgstr ""
#. help for parameter teeth1
#: boxes/generators/gearbox.py
msgid "number of teeth on ingoing shaft"
msgstr ""
#. help for parameter teeth2
#: boxes/generators/gearbox.py
msgid "number of teeth on outgoing shaft"
msgstr ""
#. help for parameter modulus
#: boxes/generators/gearbox.py
msgid "modulus of the teeth in mm"
msgstr ""
#. parameter name
#: boxes/generators/gearbox.py
msgid "shaft"
msgstr ""
#. help for parameter shaft
#: boxes/generators/gearbox.py
msgid "diameter of the shaft"
msgstr ""
#. parameter name
#: boxes/generators/gearbox.py
msgid "stages"
msgstr ""
#. help for parameter stages
#: boxes/generators/gearbox.py
msgid "number of stages in the gear reduction"
msgstr ""
#. name of box generator
#: boxes/generators/gridfinitybase.py
msgid "GridfinityBase"
msgstr ""
#. description of GridfinityBase
#: boxes/generators/gridfinitybase.py
msgid "A parameterized Gridfinity base"
msgstr ""
#. long description of GridfinityBase in markdown
#: boxes/generators/gridfinitybase.py
msgid ""
"This is a configurable gridfinity base. This\n"
" design is based on \n"
" <a href=\"https://www.youtube.com/watch?app=desktop&v=ra_9zU-mnl8\">Zach "
"Freedman's Gridfinity system</a>"
msgstr ""
msgid "GridfinityBase Settings"
msgstr ""
#. help for parameter x
#: boxes/generators/gridfinitybase.py
msgid "number of grids in X direction"
msgstr ""
#. help for parameter y
#: boxes/generators/gridfinitybase.py
msgid "number of grids in Y direction"
msgstr ""
#. help for parameter h
#: boxes/generators/gridfinitybase.py
msgid "height of sidewalls of the tray (mm)"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitybase.py
msgid "m"
msgstr ""
#. help for parameter m
#: boxes/generators/gridfinitybase.py
msgid ""
"Extra margin around the gridfinity base to allow it to drop into the carrier "
"(mm)"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitybase.py
msgid "pitch"
msgstr ""
#. help for parameter pitch
#: boxes/generators/gridfinitybase.py
msgid "The Gridfinity pitch, in mm. Should always be 42."
msgstr ""
#. parameter name
#: boxes/generators/gridfinitybase.py
msgid "opening"
msgstr ""
#. help for parameter opening
#: boxes/generators/gridfinitybase.py
msgid "The cutout for each grid opening. Typical is 38."
msgstr ""
#. name of box generator
#: boxes/generators/traylayout.py
msgid "TrayLayout"
msgstr ""
#. description of TrayLayout
#: boxes/generators/traylayout.py
msgid "Generate a typetray from a layout file."
msgstr ""
#. long description of TrayLayout in markdown
#: boxes/generators/traylayout.py
msgid ""
"This is a two step process. This is step 2.\n"
"Edit the layout text graphics to adjust your tray.\n"
"Put in the sizes for each column and row. You can replace the hyphens and\n"
"vertical bars representing the walls with a space character to remove the "
"walls.\n"
"You can replace the space characters representing the floor by a \"X\" to "
"remove the floor for this compartment.\n"
msgstr ""
msgid "TrayLayout Settings"
msgstr ""
#. name of box generator
#: boxes/generators/gridfinitytraylayout.py
msgid "GridfinityTrayLayout"
msgstr ""
#. description of GridfinityTrayLayout
#: boxes/generators/gridfinitytraylayout.py
msgid "A Gridfinity Tray Generator based on TrayLayout"
msgstr ""
#. long description of GridfinityTrayLayout in markdown
#: boxes/generators/gridfinitytraylayout.py
msgid ""
"\n"
"This is a general purpose gridfinity tray generator. You can create\n"
"somewhat arbitrarily shaped trays, or just do nothing for simple grid\n"
"shaped trays. \n"
"\n"
"The dimensions are automatically calculated to fit perfectly into a\n"
"gridfinity grid (like the GridfinityBase, or any other Gridfinity\n"
"based base).\n"
"\n"
"Edit the layout text graphics to adjust your tray.\n"
"You can replace the hyphens and vertical bars representing the walls\n"
"with a space character to remove the walls. You can replace the space\n"
"characters representing the floor by a \"X\" to remove the floor for\n"
"this compartment. \n"
msgstr ""
msgid "GridfinityTrayLayout Settings"
msgstr ""
#. help for parameter hi
#: boxes/generators/gridfinitytraylayout.py
msgid ""
"inner height of inner walls in mm (leave to zero for same as outer walls)"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitytraylayout.py
msgid "nx"
msgstr ""
#. help for parameter nx
#: boxes/generators/gridfinitytraylayout.py
msgid "number of gridfinity grids in X direction"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitytraylayout.py
msgid "ny"
msgstr ""
#. help for parameter ny
#: boxes/generators/gridfinitytraylayout.py
msgid "number of gridfinity grids in Y direction"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitytraylayout.py
msgid "countx"
msgstr ""
#. help for parameter countx
#: boxes/generators/gridfinitytraylayout.py
msgid "split x into this many grid sections. 0 means same as --nx"
msgstr ""
#. parameter name
#: boxes/generators/gridfinitytraylayout.py
msgid "county"
msgstr ""
#. help for parameter county
#: boxes/generators/gridfinitytraylayout.py
msgid "split y into this many grid sections. 0 means same as --ny"
msgstr ""
#. help for parameter margin
#: boxes/generators/gridfinitytraylayout.py
msgid "Leave this much total margin on the outside, in mm"
msgstr ""
#. help for parameter layout
#: boxes/generators/gridfinitytraylayout.py
msgid "You can hand edit this before generating"
msgstr ""
#. name of box generator
#: boxes/generators/halfbox.py
msgid "HalfBox"
msgstr ""
#. description of HalfBox
#: boxes/generators/halfbox.py
msgid ""
"Configurable half of a box which can be: a bookend, a hanging shelf, an "
"angle clamping jig, ..."
msgstr ""
#. long description of HalfBox in markdown
#: boxes/generators/halfbox.py
msgid ""
"This can be used to create:\n"
"\n"
"* a hanging shelf:\n"
"\n"
"\n"
"* an angle clamping jig:\n"
"\n"
"\n"
"* a bookend:\n"
"\n"
"\n"
"and many more...\n"
"\n"
msgstr ""
msgid "HalfBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/halfbox.py
msgid "Clamping"
msgstr ""
#. help for parameter Clamping
#: boxes/generators/halfbox.py
msgid "add clamping holes"
msgstr ""
#. parameter name
#: boxes/generators/halfbox.py
msgid "ClampingSize"
msgstr ""
#. help for parameter ClampingSize
#: boxes/generators/halfbox.py
msgid "diameter of clamping holes"
msgstr ""
#. parameter name
#: boxes/generators/halfbox.py
msgid "Mounting"
msgstr ""
#. help for parameter Mounting
#: boxes/generators/halfbox.py
msgid "add mounting holes"
msgstr ""
#. parameter name
#: boxes/generators/halfbox.py
msgid "Sturdy"
msgstr ""
#. help for parameter Sturdy
#: boxes/generators/halfbox.py
msgid "create sturdy construction (e.g. shelf, clamping jig, ...)"
msgstr ""
#. name of box generator
#: boxes/generators/heart.py
msgid "HeartBox"
msgstr ""
#. description of HeartBox
#: boxes/generators/heart.py
msgid "Box in the form of a heart"
msgstr ""
msgid "HeartBox Settings"
msgstr ""
#. possible choice for top
#: boxes/generators/heart.py
msgid "closed"
msgstr ""
#. possible choice for top
#: boxes/generators/heart.py
msgid "hole"
msgstr ""
#. possible choice for top
#: boxes/generators/heart.py
msgid "lid"
msgstr ""
#. name of box generator
#: boxes/generators/hingebox.py
msgid "HingeBox"
msgstr ""
#. description of HingeBox
#: boxes/generators/hingebox.py
msgid "Box with lid attached by cabinet hinges"
msgstr ""
#. long description of HingeBox in markdown
#: boxes/generators/hingebox.py
msgid ""
"Needs (metal) pins as hinge axles. Pieces of nails will\n"
"do fine. They need to be cut to length as they are captured as soon as the\n"
"hinges are assembled.\n"
"\n"
"Assemble the box and the lid separately. Then insert the axle into the "
"hinges.\n"
"Then attach the hinges on the inside of the box and then connect them to "
"lid.\n"
msgstr ""
msgid "HingeBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/hingebox.py
msgid "splitlid"
msgstr ""
#. help for parameter splitlid
#: boxes/generators/hingebox.py
msgid "split the lid in y direction (mm)"
msgstr ""
msgid "Settings for Cabinet Hinges"
msgstr ""
#. parameter name for CabinetHinge
#: boxes/generators/hingebox.py
msgid "bore"
msgstr ""
#. help for parameter bore
#: boxes/generators/hingebox.py
msgid "diameter of the pin hole in mm"
msgstr ""
#. parameter name for CabinetHinge
#: boxes/generators/hingebox.py
msgid "eyes_per_hinge"
msgstr ""
#. help for parameter eyes_per_hinge
#: boxes/generators/hingebox.py
msgid "pieces per hinge"
msgstr ""
#. parameter name for CabinetHinge
#: boxes/generators/hingebox.py
msgid "hinges"
msgstr ""
#. help for parameter hinges
#: boxes/generators/hingebox.py
msgid "number of hinges per edge"
msgstr ""
#. help for parameter style
#: boxes/generators/hingebox.py
msgid "style of hinge used"
msgstr ""
#. possible choice for style
#: boxes/generators/hingebox.py
msgid "inside"
msgstr ""
#. parameter name for CabinetHinge
#: boxes/generators/hingebox.py
msgid "eye"
msgstr ""
#. help for parameter eye
#: boxes/generators/hingebox.py
msgid "radius of the eye (multiples of thickness)"
msgstr ""
#. help for parameter play
#: boxes/generators/hingebox.py
msgid "space between eyes (multiples of thickness)"
msgstr ""
#. help for parameter spacing
#: boxes/generators/hingebox.py
msgid "minimum space around the hinge (multiples of thickness)"
msgstr ""
#. name of box generator
#: boxes/generators/holepattern.py
msgid "HolePattern"
msgstr ""
#. description of HolePattern
#: boxes/generators/holepattern.py
msgid "Generate hole patterns in different simple shapes"
msgstr ""
msgid "HolePattern Settings"
msgstr ""
#. parameter name
#: boxes/generators/holepattern.py
msgid "shape"
msgstr ""
#. help for parameter shape
#: boxes/generators/holepattern.py
msgid "Shape of the hole pattern"
msgstr ""
#. possible choice for shape
#: boxes/generators/holepattern.py
msgid "rectangle"
msgstr ""
#. possible choice for shape
#: boxes/generators/holepattern.py
msgid "ellipse"
msgstr ""
#. possible choice for shape
#: boxes/generators/holepattern.py
msgid "oval"
msgstr ""
#. name of box generator
#: boxes/generators/hooks.py
msgid "Hook"
msgstr ""
#. description of Hook
#: boxes/generators/hooks.py
msgid "A hook with a rectangular mouth to mount at the wall"
msgstr ""
msgid "Hook Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/hooks.py
msgid "width of the hook (back plate is a bit wider)"
msgstr ""
#. help for parameter height
#: boxes/generators/hooks.py
msgid "inner height of the hook"
msgstr ""
#. help for parameter depth
#: boxes/generators/hooks.py
msgid "inner depth of the hook"
msgstr ""
#. parameter name
#: boxes/generators/hooks.py
msgid "strength"
msgstr ""
#. help for parameter strength
#: boxes/generators/hooks.py
msgid "width of the hook from the side"
msgstr ""
#. help for parameter angle
#: boxes/generators/hooks.py
msgid "angle of the support underneath"
msgstr ""
#. name of box generator
#: boxes/generators/integratedhingebox.py
msgid "IntegratedHingeBox"
msgstr ""
#. description of IntegratedHingeBox
#: boxes/generators/integratedhingebox.py
msgid "Box with lid and integraded hinge"
msgstr ""
msgid "IntegratedHingeBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/j.py
msgid "J"
msgstr ""
msgid "J Settings"
msgstr ""
#. parameter name
#: boxes/generators/j.py
msgid "XX"
msgstr ""
#. name of box generator
#: boxes/generators/jointpanel.py
msgid "JointPanel"
msgstr ""
#. description of JointPanel
#: boxes/generators/jointpanel.py
msgid ""
"Create pieces larger than your laser cutter by joining them with Dove Tails"
msgstr ""
#. long description of JointPanel in markdown
#: boxes/generators/jointpanel.py
msgid ""
"This can be used to just create a big panel in a smaller laser cutter. But "
"the actual use is to split large parts into multiple smaller pieces. Copy "
"the outline onto the sheet and then use the pieces to cut it into multiple "
"parts that each can fit your laser cutter. Note that each piece must be cut "
"with the sheet surrounding it to ensure the burn correction (aka kerf) is "
"correct. Depending on your vector graphics software you may need to "
"duplicate your part multiple times and then generate the intersection "
"between one copy and each rectangular part.\n"
"\n"
"The Boxes.py drawings assume that the laser is cutting in the center of the "
"line and the width of the line represents the material that is cut away. "
"Make sure your changes work the same way and you do not cutting away the "
"kerf.\n"
"\n"
"Small dove tails make it easier to fit parts in without problems. Lookout "
"for pieces cut loose where the dove tails meet the edge of the parts. Move "
"your part if necessary to avoid dove tails or details of your part colliding "
"in a weird way.\n"
"\n"
"For plywood this method works well with a very stiff press fit. Aim for "
"needing a hammer to join the pieces together. This way they will feel like "
"they have been welder together.\n"
"\n"
msgstr ""
msgid "JointPanel Settings"
msgstr ""
#. parameter name
#: boxes/generators/jointpanel.py
msgid "separate"
msgstr ""
#. help for parameter separate
#: boxes/generators/jointpanel.py
msgid "draw pieces apart so they can be cut to form a large sheet"
msgstr ""
msgid "Settings for Dove Tail Joints"
msgstr ""
#. help for parameter angle
#: boxes/generators/jointpanel.py
msgid "how much should fingers widen (-80 to 80)"
msgstr ""
#. help for parameter depth
#: boxes/generators/jointpanel.py
msgid ""
"how far the dove tails stick out of/into the edge (multiples of thickness)"
msgstr ""
#. help for parameter radius
#: boxes/generators/jointpanel.py
msgid "radius used on all four corners (multiples of thickness)"
msgstr ""
#. parameter name for DoveTail
#: boxes/generators/jointpanel.py
msgid "size"
msgstr ""
#. help for parameter size
#: boxes/generators/jointpanel.py
msgid "from one middle of a dove tail to another (multiples of thickness)"
msgstr ""
#. name of box generator
#: boxes/generators/keypad.py
msgid "Keypad"
msgstr ""
#. description of Keypad
#: boxes/generators/keypad.py
msgid "Generator for keypads with mechanical switches."
msgstr ""
#. long description of Keypad in markdown
#: boxes/generators/keypad.py
msgid ""
"Note that top layers use a different material thickness according to the "
"top1_thickness and top2_thickness (if enabled)."
msgstr ""
msgid "Keypad Settings"
msgstr ""
#. parameter name
#: boxes/generators/keypad.py
msgid "top1_thickness"
msgstr ""
#. help for parameter top1_thickness
#: boxes/generators/keypad.py
msgid ""
"thickness of the button hold layer, cherry like switches need 1.5mm or "
"smaller to snap in"
msgstr ""
#. parameter name
#: boxes/generators/keypad.py
msgid "top2_enable"
msgstr ""
#. help for parameter top2_enable
#: boxes/generators/keypad.py
msgid "enables another top layer that can hold CPG151101S11 hotswap sockets"
msgstr ""
#. parameter name
#: boxes/generators/keypad.py
msgid "top2_thickness"
msgstr ""
#. help for parameter top2_thickness
#: boxes/generators/keypad.py
msgid ""
"thickness of the hotplug layer, CPG151101S11 hotswap sockets need 1.2mm to "
"1.5mm"
msgstr ""
#. name of box generator
#: boxes/generators/laptopstand.py
msgid "LaptopStand"
msgstr ""
#. description of LaptopStand
#: boxes/generators/laptopstand.py
msgid "A simple X shaped frame to support a laptop on a given angle"
msgstr ""
msgid "LaptopStand Settings"
msgstr ""
#. parameter name
#: boxes/generators/laptopstand.py
msgid "l_depth"
msgstr ""
#. help for parameter l_depth
#: boxes/generators/laptopstand.py
msgid "laptop depth - front to back (mm)"
msgstr ""
#. parameter name
#: boxes/generators/laptopstand.py
msgid "l_thickness"
msgstr ""
#. help for parameter l_thickness
#: boxes/generators/laptopstand.py
msgid "laptop thickness (mm)"
msgstr ""
#. help for parameter angle
#: boxes/generators/laptopstand.py
msgid "desired tilt of keyboard (deg)"
msgstr ""
#. parameter name
#: boxes/generators/laptopstand.py
msgid "ground_offset"
msgstr ""
#. help for parameter ground_offset
#: boxes/generators/laptopstand.py
msgid ""
"desired height between bottom of laptop and ground at lowest point (front of "
"laptop stand)"
msgstr ""
#. parameter name
#: boxes/generators/laptopstand.py
msgid "nub_size"
msgstr ""
#. help for parameter nub_size
#: boxes/generators/laptopstand.py
msgid "desired thickness of the supporting edge"
msgstr ""
#. name of box generator
#: boxes/generators/laserclamp.py
msgid "LaserClamp"
msgstr ""
#. description of LaserClamp
#: boxes/generators/laserclamp.py
msgid "A clamp to hold down material to a knife table"
msgstr ""
#. long description of LaserClamp in markdown
#: boxes/generators/laserclamp.py
msgid ""
"You need a tension spring of the proper length to make the clamp work.\n"
"Increase extraheight to get more space for the spring and to make the\n"
"sliding mechanism less likely to bind. You may need to add some wax on the\n"
"parts sliding on each other to reduce friction.\n"
msgstr ""
msgid "LaserClamp Settings"
msgstr ""
#. parameter name
#: boxes/generators/laserclamp.py
msgid "minheight"
msgstr ""
#. help for parameter minheight
#: boxes/generators/laserclamp.py
msgid "minimal clamping height in mm"
msgstr ""
#. parameter name
#: boxes/generators/laserclamp.py
msgid "maxheight"
msgstr ""
#. help for parameter maxheight
#: boxes/generators/laserclamp.py
msgid "maximal clamping height in mm"
msgstr ""
#. parameter name
#: boxes/generators/laserclamp.py
msgid "extraheight"
msgstr ""
#. help for parameter extraheight
#: boxes/generators/laserclamp.py
msgid "extra height to make operation smoother in mm"
msgstr ""
#. name of box generator
#: boxes/generators/laserholdfast.py
msgid "LaserHoldfast"
msgstr ""
#. description of LaserHoldfast
#: boxes/generators/laserholdfast.py
msgid "A holdfast for honey comb tables of laser cutters"
msgstr ""
msgid "LaserHoldfast Settings"
msgstr ""
#. parameter name
#: boxes/generators/laserholdfast.py
msgid "hookheight"
msgstr ""
#. help for parameter hookheight
#: boxes/generators/laserholdfast.py
msgid "height of the top hook"
msgstr ""
#. parameter name
#: boxes/generators/laserholdfast.py
msgid "shaftwidth"
msgstr ""
#. help for parameter shaftwidth
#: boxes/generators/laserholdfast.py
msgid "width of the shaft"
msgstr ""
#. name of box generator
#: boxes/generators/laserlini.py
msgid "Laserlini"
msgstr ""
#. description of Laserlini
#: boxes/generators/laserlini.py
msgid "Mini cross bow"
msgstr ""
msgid "Laserlini Settings"
msgstr ""
#. name of box generator
#: boxes/generators/lbeam.py
msgid "LBeam"
msgstr ""
#. description of LBeam
#: boxes/generators/lbeam.py
msgid "Simple L-Beam: two pieces joined with a right angle"
msgstr ""
msgid "LBeam Settings"
msgstr ""
#. name of box generator
#: boxes/generators/lueftung.py
msgid "Lueftung"
msgstr ""
msgid "Lueftung Settings"
msgstr ""
#. name of box generator
#: boxes/generators/magazinefile.py
msgid "MagazineFile"
msgstr ""
#. description of MagazineFile
#: boxes/generators/magazinefile.py
msgid "Open magazine file"
msgstr ""
msgid "MagazineFile Settings"
msgstr ""
#. name of box generator
#: boxes/generators/makitapowersupply.py
msgid "MakitaPowerSupply"
msgstr ""
#. description of MakitaPowerSupply
#: boxes/generators/makitapowersupply.py
msgid ""
"Bench power supply powered with Maktia 18V battery or laptop power supply"
msgstr ""
#. long description of MakitaPowerSupply in markdown
#: boxes/generators/makitapowersupply.py
msgid ""
"\n"
"Vitamins: DSP5005 (or similar) power supply, two banana sockets, two 4.8mm "
"flat terminals with flat soldering tag\n"
"\n"
"To allow powering by laptop power supply: flip switch, Lenovo round socket "
"(or adjust right hole for different socket)\n"
msgstr ""
msgid "MakitaPowerSupply Settings"
msgstr ""
#. parameter name
#: boxes/generators/makitapowersupply.py
msgid "banana_socket_diameter"
msgstr ""
#. help for parameter banana_socket_diameter
#: boxes/generators/makitapowersupply.py
msgid "diameter of the banana socket mounting holes"
msgstr ""
#. parameter name
#: boxes/generators/makitapowersupply.py
msgid "flipswitch_diameter"
msgstr ""
#. help for parameter flipswitch_diameter
#: boxes/generators/makitapowersupply.py
msgid "diameter of the flipswitch mounting hole"
msgstr ""
#. name of box generator
#: boxes/generators/microrack.py
msgid "SBCMicroRack"
msgstr ""
#. description of SBCMicroRack
#: boxes/generators/microrack.py
msgid "Stackable rackable racks for SBC Pi-Style Computers"
msgstr ""
msgid "SBCMicroRack Settings"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "sbcs"
msgstr ""
#. help for parameter sbcs
#: boxes/generators/microrack.py
msgid "how many slots for sbcs"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "clearance_x"
msgstr ""
#. help for parameter clearance_x
#: boxes/generators/microrack.py
msgid "clearance for the board in the box (x) in mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "clearance_y"
msgstr ""
#. help for parameter clearance_y
#: boxes/generators/microrack.py
msgid "clearance for the board in the box (y) in mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "clearance_z"
msgstr ""
#. help for parameter clearance_z
#: boxes/generators/microrack.py
msgid "SBC Clearance in mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "hole_dist_edge"
msgstr ""
#. help for parameter hole_dist_edge
#: boxes/generators/microrack.py
msgid "hole distance from edge in mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "hole_grid_dimension_x"
msgstr ""
#. help for parameter hole_grid_dimension_x
#: boxes/generators/microrack.py
msgid "width of x hole area"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "hole_grid_dimension_y"
msgstr ""
#. help for parameter hole_grid_dimension_y
#: boxes/generators/microrack.py
msgid "width of y hole area"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "hole_diameter"
msgstr ""
#. help for parameter hole_diameter
#: boxes/generators/microrack.py
msgid "hole diameters"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "netusb_z"
msgstr ""
#. help for parameter netusb_z
#: boxes/generators/microrack.py
msgid "height of the net/usb hole mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "netusb_x"
msgstr ""
#. help for parameter netusb_x
#: boxes/generators/microrack.py
msgid "width of the net/usb hole in mm"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "stable"
msgstr ""
#. help for parameter stable
#: boxes/generators/microrack.py
msgid "draw some holes to put a 1/4\" dowel through at the base and top"
msgstr ""
#. parameter name
#: boxes/generators/microrack.py
msgid "switch"
msgstr ""
#. help for parameter switch
#: boxes/generators/microrack.py
msgid ""
"adds an additional vertical segment to hold the switch in place, works best "
"w/ --stable"
msgstr ""
#. name of box generator
#: boxes/generators/nemamount.py
msgid "NemaMount"
msgstr ""
#. description of NemaMount
#: boxes/generators/nemamount.py
msgid "Mounting bracket for a Nema motor"
msgstr ""
msgid "NemaMount Settings"
msgstr ""
#. help for parameter size
#: boxes/generators/nemamount.py
msgid "Nema size of the motor"
msgstr ""
#. name of box generator
#: boxes/generators/nemapattern.py
msgid "NemaPattern"
msgstr ""
#. description of NemaPattern
#: boxes/generators/nemapattern.py
msgid "Mounting holes for a Nema motor"
msgstr ""
msgid "NemaPattern Settings"
msgstr ""
#. parameter name
#: boxes/generators/nemapattern.py
msgid "screwholes"
msgstr ""
#. help for parameter screwholes
#: boxes/generators/nemapattern.py
msgid "Size of the screw holes in mm - 0 for default size"
msgstr ""
#. name of box generator
#: boxes/generators/notesholder.py
msgid "NotesHolder"
msgstr ""
#. description of NotesHolder
#: boxes/generators/notesholder.py
msgid "Box for holding a stack of paper, coasters etc"
msgstr ""
msgid "NotesHolder Settings"
msgstr ""
#. help for parameter opening
#: boxes/generators/notesholder.py
msgid "percent of front that's open"
msgstr ""
#. name of box generator
#: boxes/generators/openbox.py
msgid "OpenBox"
msgstr ""
#. description of OpenBox
#: boxes/generators/openbox.py
msgid "Box with top and front open"
msgstr ""
msgid "OpenBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/openbox.py
msgid "edgetype"
msgstr ""
#. help for parameter edgetype
#: boxes/generators/openbox.py
msgid "edge type"
msgstr ""
#. name of box generator
#: boxes/generators/organpipe.py
msgid "OrganPipe"
msgstr ""
#. description of OrganPipe
#: boxes/generators/organpipe.py
msgid "Rectangular organ pipe based on pipecalc"
msgstr ""
msgid "OrganPipe Settings"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "c#"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "d"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "d#"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "f#"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "g"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "g#"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "a"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "a#"
msgstr ""
#. possible choice for pitch
#: boxes/generators/organpipe.py
msgid "b"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "octave"
msgstr ""
#. help for parameter octave
#: boxes/generators/organpipe.py
msgid "Octave in International Pitch Notation (2 == C)"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "intonation"
msgstr ""
#. help for parameter intonation
#: boxes/generators/organpipe.py
msgid "Intonation Number. 2 for max. efficiency, 3 max."
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "mouthratio"
msgstr ""
#. help for parameter mouthratio
#: boxes/generators/organpipe.py
msgid ""
"mouth to circumference ratio (0.1 to 0.45). Determines the width to depth "
"ratio"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "cutup"
msgstr ""
#. help for parameter cutup
#: boxes/generators/organpipe.py
msgid "Cutup to mouth ratio"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "mensur"
msgstr ""
#. help for parameter mensur
#: boxes/generators/organpipe.py
msgid "Distance in halftones in the Normalmensur by Töpfer"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "windpressure"
msgstr ""
#. help for parameter windpressure
#: boxes/generators/organpipe.py
msgid "uses unit selected below"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "windpressure_units"
msgstr ""
#. help for parameter windpressure_units
#: boxes/generators/organpipe.py
msgid "in Pa"
msgstr ""
#. possible choice for windpressure_units
#: boxes/generators/organpipe.py
msgid "Pa"
msgstr ""
#. possible choice for windpressure_units
#: boxes/generators/organpipe.py
msgid "mBar"
msgstr ""
#. possible choice for windpressure_units
#: boxes/generators/organpipe.py
msgid "mmHg"
msgstr ""
#. possible choice for windpressure_units
#: boxes/generators/organpipe.py
msgid "mmH2O"
msgstr ""
#. parameter name
#: boxes/generators/organpipe.py
msgid "stopped"
msgstr ""
#. help for parameter stopped
#: boxes/generators/organpipe.py
msgid "pipe is closed at the top"
msgstr ""
#. name of box generator
#: boxes/generators/ottobody.py
msgid "OttoBody"
msgstr ""
#. description of OttoBody
#: boxes/generators/ottobody.py
msgid "Otto LC - a laser cut chassis for Otto DIY - body"
msgstr ""
msgid "OttoBody Settings"
msgstr ""
#. name of box generator
#: boxes/generators/ottolegs.py
msgid "OttoLegs"
msgstr ""
#. description of OttoLegs
#: boxes/generators/ottolegs.py
msgid "Otto LC - a laser cut chassis for Otto DIY - legs"
msgstr ""
msgid "OttoLegs Settings"
msgstr ""
#. parameter name
#: boxes/generators/ottolegs.py
msgid "anklebolt1"
msgstr ""
#. help for parameter anklebolt1
#: boxes/generators/ottolegs.py
msgid "diameter for hole for ankle bolts - foot side"
msgstr ""
#. parameter name
#: boxes/generators/ottolegs.py
msgid "anklebolt2"
msgstr ""
#. help for parameter anklebolt2
#: boxes/generators/ottolegs.py
msgid "diameter for hole for ankle bolts - leg side"
msgstr ""
#. parameter name
#: boxes/generators/ottolegs.py
msgid "length"
msgstr ""
#. help for parameter length
#: boxes/generators/ottolegs.py
msgid "length of legs (34mm min)"
msgstr ""
#. name of box generator
#: boxes/generators/ottosoles.py
msgid "OttoSoles"
msgstr ""
#. description of OttoSoles
#: boxes/generators/ottosoles.py
msgid "Foam soles for the OttO bot"
msgstr ""
msgid "OttoSoles Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/ottosoles.py
msgid "width of sole stripe"
msgstr ""
#. parameter name
#: boxes/generators/ottosoles.py
msgid "chamfer"
msgstr ""
#. help for parameter chamfer
#: boxes/generators/ottosoles.py
msgid "chamfer at the corners"
msgstr ""
#. help for parameter num
#: boxes/generators/ottosoles.py
msgid "number of soles"
msgstr ""
#. name of box generator
#: boxes/generators/paintbox.py
msgid "PaintStorage"
msgstr ""
#. description of PaintStorage
#: boxes/generators/paintbox.py
msgid "Stackable storage for hobby paint or other things"
msgstr ""
msgid "PaintStorage Settings"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "canheight"
msgstr ""
#. help for parameter canheight
#: boxes/generators/paintbox.py
msgid "Height of the paintcans"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "candiameter"
msgstr ""
#. help for parameter candiameter
#: boxes/generators/paintbox.py
msgid "Diameter of the paintcans"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "minspace"
msgstr ""
#. help for parameter minspace
#: boxes/generators/paintbox.py
msgid "Minimum space between the paintcans"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "additional_bottom"
msgstr ""
#. help for parameter additional_bottom
#: boxes/generators/paintbox.py
msgid "Additional bottom/floor with holes the paintcans go through"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "additional_top"
msgstr ""
#. help for parameter additional_top
#: boxes/generators/paintbox.py
msgid "Additional top/floor with holes the paintcans go through"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "hexpattern"
msgstr ""
#. help for parameter hexpattern
#: boxes/generators/paintbox.py
msgid "Use hexagonal arrangement for the holes instead of orthogonal"
msgstr ""
#. parameter name
#: boxes/generators/paintbox.py
msgid "drawer"
msgstr ""
#. help for parameter drawer
#: boxes/generators/paintbox.py
msgid "Create a stackable drawer instead"
msgstr ""
#. name of box generator
#: boxes/generators/paperbox.py
msgid "PaperBox"
msgstr ""
#. description of PaperBox
#: boxes/generators/paperbox.py
msgid ""
"\n"
" Box made of paper, with lid.\n"
" "
msgstr ""
#. long description of PaperBox in markdown
#: boxes/generators/paperbox.py
msgid ""
"\n"
"This box is made of paper.\n"
"\n"
"There is marks in the \"outside leftover paper\" to help see where to fold\n"
"(cutting with tabs helps use them). The cut is very precise, and could be "
"too tight if misaligned when glued. A plywood box (such as a simple "
"TypeTray) of the same size is a great guide during folding and gluing. Just "
"fold the box against it. Accurate quick and easy.\n"
"\n"
"A paper creaser (or bone folder) is also useful.\n"
msgstr ""
msgid "PaperBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "design"
msgstr ""
#. help for parameter design
#: boxes/generators/paperbox.py
msgid ""
"different design for paper consumption optimization. The tuckbox also has "
"locking cut for its lid."
msgstr ""
#. possible choice for design
#: boxes/generators/paperbox.py
msgid "automatic"
msgstr ""
#. possible choice for design
#: boxes/generators/paperbox.py
msgid "widebox"
msgstr ""
#. possible choice for design
#: boxes/generators/paperbox.py
msgid "tuckbox"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "lid_height"
msgstr ""
#. help for parameter lid_height
#: boxes/generators/paperbox.py
msgid "Height of the lid (part which goes inside the box)"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "lid_radius"
msgstr ""
#. help for parameter lid_radius
#: boxes/generators/paperbox.py
msgid "Angle, in radius, of the round corner of the lid"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "lid_sides"
msgstr ""
#. help for parameter lid_sides
#: boxes/generators/paperbox.py
msgid "Width of the two sides upon which goes the lid"
msgstr ""
#. help for parameter margin
#: boxes/generators/paperbox.py
msgid "Margin for the glued sides"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "mark_length"
msgstr ""
#. help for parameter mark_length
#: boxes/generators/paperbox.py
msgid "Length of the folding outside mark"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "tab_angle_rad"
msgstr ""
#. help for parameter tab_angle_rad
#: boxes/generators/paperbox.py
msgid "Angle (in radian) of the sides which are to be glued inside the box"
msgstr ""
#. parameter name
#: boxes/generators/paperbox.py
msgid "finger_hole_diameter"
msgstr ""
#. help for parameter finger_hole_diameter
#: boxes/generators/paperbox.py
msgid "Diameter of the hole to help catch the lid"
msgstr ""
#. name of box generator
#: boxes/generators/phoneholder.py
msgid "PhoneHolder"
msgstr ""
#. description of PhoneHolder
#: boxes/generators/phoneholder.py
msgid ""
"\n"
" Smartphone desk holder\n"
" "
msgstr ""
#. long description of PhoneHolder in markdown
#: boxes/generators/phoneholder.py
msgid ""
"\n"
" This phone stand holds your phone between two tabs, with access to its\n"
" bottom, in order to connect a charger, headphones, and also not to "
"obstruct\n"
" the mic.\n"
"\n"
" Default values are currently based on Galaxy S7.\n"
msgstr ""
msgid "PhoneHolder Settings"
msgstr ""
#. parameter name
#: boxes/generators/phoneholder.py
msgid "phone_height"
msgstr ""
#. help for parameter phone_height
#: boxes/generators/phoneholder.py
msgid "Height of the phone."
msgstr ""
#. parameter name
#: boxes/generators/phoneholder.py
msgid "phone_width"
msgstr ""
#. help for parameter phone_width
#: boxes/generators/phoneholder.py
msgid "Width of the phone."
msgstr ""
#. parameter name
#: boxes/generators/phoneholder.py
msgid "phone_depth"
msgstr ""
#. help for parameter phone_depth
#: boxes/generators/phoneholder.py
msgid ""
"Depth of the phone. Used by the bottom support holding the phone, and the "
"side tabs depth as well. Should be at least your material thickness for "
"assembly reasons."
msgstr ""
#. help for parameter angle
#: boxes/generators/phoneholder.py
msgid "angle at which the phone stands, in degrees. 0° is vertical."
msgstr ""
#. help for parameter bottom_margin
#: boxes/generators/phoneholder.py
msgid "Height of the support below the phone"
msgstr ""
#. parameter name
#: boxes/generators/phoneholder.py
msgid "tab_size"
msgstr ""
#. help for parameter tab_size
#: boxes/generators/phoneholder.py
msgid "Length of the tabs holding the phone"
msgstr ""
#. parameter name
#: boxes/generators/phoneholder.py
msgid "bottom_support_spacing"
msgstr ""
#. help for parameter bottom_support_spacing
#: boxes/generators/phoneholder.py
msgid ""
"Spacing between the two bottom support. Choose a value big enough for the "
"charging cable, without getting in the way of other ports."
msgstr ""
#. name of box generator
#: boxes/generators/piratechest.py
msgid "PirateChest"
msgstr ""
#. description of PirateChest
#: boxes/generators/piratechest.py
msgid "Box with polygon lid with chest hinges."
msgstr ""
#. long description of PirateChest in markdown
#: boxes/generators/piratechest.py
msgid ""
"Do not assemble sides before attaching the lid! \n"
" Hinge of the lid has to be placed first because it is impossible \n"
" to get it in position without removing the side wall. The lid can \n"
" be a bit tricky to assemble. Keep track of how the parts fit together. \n"
" Part with label \"lid back\" is placed in the hinges"
msgstr ""
msgid "PirateChest Settings"
msgstr ""
#. help for parameter n
#: boxes/generators/piratechest.py
msgid "number of sides on the lid. n ≥ 3"
msgstr ""
#. name of box generator
#: boxes/generators/planetary.py
msgid "Planetary"
msgstr ""
#. description of Planetary
#: boxes/generators/planetary.py
msgid "Planetary Gear with possibly multiple identical stages"
msgstr ""
msgid "Planetary Settings"
msgstr ""
#. parameter name
#: boxes/generators/planetary.py
msgid "sunteeth"
msgstr ""
#. help for parameter sunteeth
#: boxes/generators/planetary.py
msgid "number of teeth on sun gear"
msgstr ""
#. parameter name
#: boxes/generators/planetary.py
msgid "planetteeth"
msgstr ""
#. help for parameter planetteeth
#: boxes/generators/planetary.py
msgid "number of teeth on planets"
msgstr ""
#. parameter name
#: boxes/generators/planetary.py
msgid "maxplanets"
msgstr ""
#. help for parameter maxplanets
#: boxes/generators/planetary.py
msgid "limit the number of planets (0 for as much as fit)"
msgstr ""
#. parameter name
#: boxes/generators/planetary.py
msgid "deltateeth"
msgstr ""
#. help for parameter deltateeth
#: boxes/generators/planetary.py
msgid "enable secondary ring with given delta to the ring gear"
msgstr ""
#. help for parameter modulus
#: boxes/generators/planetary.py
msgid "modulus of the theeth in mm"
msgstr ""
#. name of box generator
#: boxes/generators/planetary2.py
msgid "Planetary2"
msgstr ""
#. description of Planetary2
#: boxes/generators/planetary2.py
msgid "Balanced force Difference Planetary Gear (not yet working properly)"
msgstr ""
#. long description of Planetary2 in markdown
#: boxes/generators/planetary2.py
msgid ""
"Still has issues. The middle planetary gears set must not have a mashing sun "
"gear as it can't be a proper gear set."
msgstr ""
msgid "Planetary2 Settings"
msgstr ""
#. parameter name
#: boxes/generators/planetary2.py
msgid "profile"
msgstr ""
#. help for parameter profile
#: boxes/generators/planetary2.py
msgid "profile of the teeth/belt"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "40DP"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "AT5"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "GT2_2mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "GT2_3mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "GT2_5mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "H"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "HTD_3mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "HTD_5mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "HTD_8mm"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "MXL"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "T10"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "T2_5"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "T5"
msgstr ""
#. possible choice for profile
#: boxes/generators/planetary2.py
msgid "XL"
msgstr ""
#. parameter name
#: boxes/generators/planetary2.py
msgid "screw1"
msgstr ""
#. help for parameter screw1
#: boxes/generators/planetary2.py
msgid "diameter of lower part of the screw hole"
msgstr ""
#. parameter name
#: boxes/generators/planetary2.py
msgid "screw2"
msgstr ""
#. help for parameter screw2
#: boxes/generators/planetary2.py
msgid "diameter of upper part of the screw hole"
msgstr ""
#. parameter name
#: boxes/generators/planetary2.py
msgid "pinsize"
msgstr ""
#. help for parameter pinsize
#: boxes/generators/planetary2.py
msgid "diameter of alignment pins"
msgstr ""
#. name of box generator
#: boxes/generators/platonic.py
msgid "Platonic"
msgstr ""
#. description of Platonic
#: boxes/generators/platonic.py
msgid "Platonic solids generator"
msgstr ""
#. long description of Platonic in markdown
#: boxes/generators/platonic.py
msgid "\n"
msgstr ""
msgid "Platonic Settings"
msgstr ""
#. parameter name
#: boxes/generators/platonic.py
msgid "type"
msgstr ""
#. help for parameter type
#: boxes/generators/platonic.py
msgid "type of platonic solid"
msgstr ""
#. possible choice for type
#: boxes/generators/platonic.py
msgid "tetrahedron"
msgstr ""
#. possible choice for type
#: boxes/generators/platonic.py
msgid "cube"
msgstr ""
#. possible choice for type
#: boxes/generators/platonic.py
msgid "octahedron"
msgstr ""
#. possible choice for type
#: boxes/generators/platonic.py
msgid "dodecahedron"
msgstr ""
#. possible choice for type
#: boxes/generators/platonic.py
msgid "icosahedro"
msgstr ""
#. name of box generator
#: boxes/generators/polehook.py
msgid "PoleHook"
msgstr ""
#. description of PoleHook
#: boxes/generators/polehook.py
msgid "Hook for pole like things to be clamped to another pole"
msgstr ""
msgid "PoleHook Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/polehook.py
msgid "diameter of the thing to hook"
msgstr ""
#. parameter name
#: boxes/generators/polehook.py
msgid "screw"
msgstr ""
#. help for parameter screw
#: boxes/generators/polehook.py
msgid "diameter of the screw in mm"
msgstr ""
#. parameter name
#: boxes/generators/polehook.py
msgid "screwhead"
msgstr ""
#. help for parameter screwhead
#: boxes/generators/polehook.py
msgid "with of the screw head in mm"
msgstr ""
#. parameter name
#: boxes/generators/polehook.py
msgid "screwheadheight"
msgstr ""
#. help for parameter screwheadheight
#: boxes/generators/polehook.py
msgid "height of the screw head in mm"
msgstr ""
#. parameter name
#: boxes/generators/polehook.py
msgid "pin"
msgstr ""
#. help for parameter pin
#: boxes/generators/polehook.py
msgid "diameter of the pin in mm"
msgstr ""
#. name of box generator
#: boxes/generators/pulley.py
msgid "Pulley"
msgstr ""
#. description of Pulley
#: boxes/generators/pulley.py
msgid "Timing belt pulleys for different profiles"
msgstr ""
msgid "Pulley Settings"
msgstr ""
#. parameter name
#: boxes/generators/pulley.py
msgid "teeth"
msgstr ""
#. help for parameter axle
#: boxes/generators/pulley.py
msgid "diameter of the axle"
msgstr ""
#. parameter name
#: boxes/generators/pulley.py
msgid "insideout"
msgstr ""
#. help for parameter insideout
#: boxes/generators/pulley.py
msgid "create a ring gear with the belt being pushed against from within"
msgstr ""
#. help for parameter top
#: boxes/generators/pulley.py
msgid "overlap of top rim (zero for none)"
msgstr ""
#. name of box generator
#: boxes/generators/rack19box.py
msgid "Rack19Box"
msgstr ""
#. description of Rack19Box
#: boxes/generators/rack19box.py
msgid "Closed box with screw on top for mounting in a 19\" rack."
msgstr ""
msgid "Rack19Box Settings"
msgstr ""
#. help for parameter height
#: boxes/generators/rack19box.py
msgid "height in rack units"
msgstr ""
#. name of box generator
#: boxes/generators/rack10box.py
msgid "Rack10Box"
msgstr ""
#. description of Rack10Box
#: boxes/generators/rack10box.py
msgid "Closed box with screw on top for mounting in a 10\" rack."
msgstr ""
msgid "Rack10Box Settings"
msgstr ""
#. name of box generator
#: boxes/generators/rack19halfwidth.py
msgid "Rack19HalfWidth"
msgstr ""
#. description of Rack19HalfWidth
#: boxes/generators/rack19halfwidth.py
msgid "Half width 19inch rack unit for musical equipment."
msgstr ""
msgid "Rack19HalfWidth Settings"
msgstr ""
#. parameter name
#: boxes/generators/rack19halfwidth.py
msgid "ru_count"
msgstr ""
#. help for parameter ru_count
#: boxes/generators/rack19halfwidth.py
msgid "number of rack units"
msgstr ""
#. help for parameter holes
#: boxes/generators/rack19halfwidth.py
msgid ""
"mounting patterns: x=xlr, m=midi, p=9v-power, w=6.5mm-wire, space=next row"
msgstr ""
#. help for parameter z
#: boxes/generators/rack19halfwidth.py
msgid "depth of the shorter (rackear) side"
msgstr ""
#. parameter name
#: boxes/generators/rack19halfwidth.py
msgid "deepz"
msgstr ""
#. help for parameter deepz
#: boxes/generators/rack19halfwidth.py
msgid "depth of the longer (screwed to another half sized thing) side"
msgstr ""
#. name of box generator
#: boxes/generators/rackbox.py
msgid "RackBox"
msgstr ""
msgid "RackBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/rectangularWall.py
msgid "RectangularWall"
msgstr ""
#. description of RectangularWall
#: boxes/generators/rectangularWall.py
msgid "Simple wall"
msgstr ""
msgid "RectangularWall Settings"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "C"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "D"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "I"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "j"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "K"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "l"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "M"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "N"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "o"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "O"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "p"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "P"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "q"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "Q"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "R"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "u"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "U"
msgstr ""
#. possible choice for bottom_edge
#: boxes/generators/rectangularWall.py
msgid "V"
msgstr ""
#. parameter name
#: boxes/generators/rectangularWall.py
msgid "right_edge"
msgstr ""
#. help for parameter right_edge
#: boxes/generators/rectangularWall.py
msgid "edge type for right edge"
msgstr ""
#. parameter name
#: boxes/generators/rectangularWall.py
msgid "left_edge"
msgstr ""
#. help for parameter left_edge
#: boxes/generators/rectangularWall.py
msgid "edge type for left edge"
msgstr ""
msgid "Settings for rack (and pinion) edge"
msgstr ""
#. help for parameter angle
#: boxes/generators/rectangularWall.py
msgid "pressure angle"
msgstr ""
#. parameter name for Gear
#: boxes/generators/rectangularWall.py
msgid "clearance"
msgstr ""
#. parameter name for Gear
#: boxes/generators/rectangularWall.py
msgid "dimension"
msgstr ""
#. help for parameter dimension
#: boxes/generators/rectangularWall.py
msgid "modulus of the gear (in mm)"
msgstr ""
#. help for parameter profile_shift
#: boxes/generators/rectangularWall.py
msgid "Profile shift"
msgstr ""
msgid "Settings for GrippingEdge"
msgstr ""
#. help for parameter outset
#: boxes/generators/rectangularWall.py
msgid "extend outward the straight edge"
msgstr ""
#. help for parameter style
#: boxes/generators/rectangularWall.py
msgid "\"wave\" or \"bumps\""
msgstr ""
#. possible choice for style
#: boxes/generators/rectangularWall.py
msgid "wave"
msgstr ""
#. possible choice for style
#: boxes/generators/rectangularWall.py
msgid "bumps"
msgstr ""
#. help for parameter depth
#: boxes/generators/rectangularWall.py
msgid "depth of the grooves"
msgstr ""
#. name of box generator
#: boxes/generators/regularbox.py
msgid "RegularBox"
msgstr ""
#. description of RegularBox
#: boxes/generators/regularbox.py
msgid "Box with regular polygon as base"
msgstr ""
#. long description of RegularBox in markdown
#: boxes/generators/regularbox.py
msgid ""
"For short side walls that don't fit a connecting finger reduce "
"*surroundingspaces* and *finger* in the Finger Joint Settings.\n"
"\n"
"The lids needs to be glued. For the bayonet lid all outside rings attach to "
"the bottom, all inside rings to the top.\n"
msgstr ""
msgid "RegularBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/regularbox.py
msgid "radius_bottom"
msgstr ""
#. help for parameter radius_bottom
#: boxes/generators/regularbox.py
msgid "inner radius of the box bottom (at the corners)"
msgstr ""
#. parameter name
#: boxes/generators/regularbox.py
msgid "radius_top"
msgstr ""
#. help for parameter radius_top
#: boxes/generators/regularbox.py
msgid "inner radius of the box top (at the corners)"
msgstr ""
#. help for parameter n
#: boxes/generators/regularbox.py
msgid "number of sides"
msgstr ""
#. possible choice for top
#: boxes/generators/regularbox.py
msgid "round lid"
msgstr ""
#. possible choice for top
#: boxes/generators/regularbox.py
msgid "bayonet mount"
msgstr ""
#. help for parameter alignment_pins
#: boxes/generators/regularbox.py
msgid "diameter of the alignment pins for bayonet lid"
msgstr ""
#. help for parameter bottom
#: boxes/generators/regularbox.py
msgid "style of the bottom and bottom lid"
msgstr ""
#. name of box generator
#: boxes/generators/regularstarbox.py
msgid "RegularStarBox"
msgstr ""
#. description of RegularStarBox
#: boxes/generators/regularstarbox.py
msgid "Regular polygon boxes that form a star when closed"
msgstr ""
#. long description of RegularStarBox in markdown
#: boxes/generators/regularstarbox.py
msgid ""
msgstr ""
msgid "RegularStarBox Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/regularstarbox.py
msgid "inner radius if the box (center to corners)"
msgstr ""
#. name of box generator
#: boxes/generators/robotarm.py
msgid "RobotArm"
msgstr ""
#. description of RobotArm
#: boxes/generators/robotarm.py
msgid "Segments of servo powered robot arm"
msgstr ""
msgid "RobotArm Settings"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "type1"
msgstr ""
#. help for parameter type1
#: boxes/generators/robotarm.py
msgid "type of arm segment"
msgstr ""
#. possible choice for type1
#: boxes/generators/robotarm.py
msgid "RobotArmMM"
msgstr ""
#. possible choice for type1
#: boxes/generators/robotarm.py
msgid "RobotArmMm"
msgstr ""
#. possible choice for type1
#: boxes/generators/robotarm.py
msgid "RobotArmUU"
msgstr ""
#. possible choice for type1
#: boxes/generators/robotarm.py
msgid "RobotArmUu"
msgstr ""
#. possible choice for type1
#: boxes/generators/robotarm.py
msgid "RobotArmMu"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo1a"
msgstr ""
#. help for parameter servo1a
#: boxes/generators/robotarm.py
msgid "type of servo to use"
msgstr ""
#. possible choice for servo1a
#: boxes/generators/robotarm.py
msgid "Servo9g"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo1b"
msgstr ""
#. help for parameter servo1b
#: boxes/generators/robotarm.py
msgid "type of servo to use on second side (if different is supported)"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "length1"
msgstr ""
#. help for parameter length1
#: boxes/generators/robotarm.py
msgid "length of segment axle to axle"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "type2"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo2a"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo2b"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "length2"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "type3"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo3a"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo3b"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "length3"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "type4"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo4a"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo4b"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "length4"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "type5"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo5a"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "servo5b"
msgstr ""
#. parameter name
#: boxes/generators/robotarm.py
msgid "length5"
msgstr ""
#. name of box generator
#: boxes/generators/rollholder.py
msgid "RollHolder"
msgstr ""
#. description of RollHolder
#: boxes/generators/rollholder.py
msgid "Holder for kitchen rolls or other rolls"
msgstr ""
#. long description of RollHolder in markdown
#: boxes/generators/rollholder.py
msgid "Needs a dowel or pipe as axle."
msgstr ""
msgid "RollHolder Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/rollholder.py
msgid "length of the axle in mm"
msgstr ""
#. help for parameter diameter
#: boxes/generators/rollholder.py
msgid "maximum diameter of the roll in mm (choose generously)"
msgstr ""
#. help for parameter height
#: boxes/generators/rollholder.py
msgid "height of mounting plate in mm"
msgstr ""
#. help for parameter axle
#: boxes/generators/rollholder.py
msgid "diameter of the axle in mm including play"
msgstr ""
#. parameter name
#: boxes/generators/rollholder.py
msgid "screw_holes"
msgstr ""
#. help for parameter screw_holes
#: boxes/generators/rollholder.py
msgid "diameter of mounting holes in mm"
msgstr ""
#. parameter name
#: boxes/generators/rollholder.py
msgid "one_piece"
msgstr ""
#. help for parameter one_piece
#: boxes/generators/rollholder.py
msgid "have a continuous back plate instead of two separate holders"
msgstr ""
#. name of box generator
#: boxes/generators/rotary.py
msgid "Rotary"
msgstr ""
#. description of Rotary
#: boxes/generators/rotary.py
msgid "Rotary Attachment for engraving cylindrical objects in a laser cutter"
msgstr ""
msgid "Rotary Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/rotary.py
msgid "outer diameter of the wheels (including O rings)"
msgstr ""
#. parameter name
#: boxes/generators/rotary.py
msgid "rubberthickness"
msgstr ""
#. help for parameter rubberthickness
#: boxes/generators/rotary.py
msgid "diameter of the strings of the O rings"
msgstr ""
#. help for parameter axle
#: boxes/generators/rotary.py
msgid "diameter of the axles"
msgstr ""
#. parameter name
#: boxes/generators/rotary.py
msgid "knifethickness"
msgstr ""
#. help for parameter knifethickness
#: boxes/generators/rotary.py
msgid "thickness of the knives in mm. Use 0 for use with honey comb table."
msgstr ""
#. parameter name
#: boxes/generators/rotary.py
msgid "beamwidth"
msgstr ""
#. help for parameter beamwidth
#: boxes/generators/rotary.py
msgid "width of the (aluminium) profile connecting the parts"
msgstr ""
#. parameter name
#: boxes/generators/rotary.py
msgid "beamheight"
msgstr ""
#. help for parameter beamheight
#: boxes/generators/rotary.py
msgid "height of the (aluminium) profile connecting the parts"
msgstr ""
#. name of box generator
#: boxes/generators/roundedbox.py
msgid "RoundedBox"
msgstr ""
#. description of RoundedBox
#: boxes/generators/roundedbox.py
msgid "Box with vertical edges rounded"
msgstr ""
#. long description of RoundedBox in markdown
#: boxes/generators/roundedbox.py
msgid ""
"\n"
"Default: edge_style = f Finger Joint:\n"
"\n"
"\n"
"Alternative: edge_style = h Edge (parallel Finger Joint Holes):\n"
"\n"
"\n"
"With lid:\n"
msgstr ""
msgid "RoundedBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/roundedbox.py
msgid "wallpieces"
msgstr ""
#. help for parameter wallpieces
#: boxes/generators/roundedbox.py
msgid "number of pieces for outer wall"
msgstr ""
#. parameter name
#: boxes/generators/roundedbox.py
msgid "edge_style"
msgstr ""
#. help for parameter edge_style
#: boxes/generators/roundedbox.py
msgid "edge type for top and bottom edges"
msgstr ""
#. name of box generator
#: boxes/generators/royalgame.py
msgid "RoyalGame"
msgstr ""
#. description of RoyalGame
#: boxes/generators/royalgame.py
msgid "The Royal Game of Ur"
msgstr ""
#. long description of RoyalGame in markdown
#: boxes/generators/royalgame.py
msgid ""
"Most of the blue lines need to be engraved by cutting with high speed and "
"low power. But there are three blue holes that actually need to be cut: The "
"grip hole in the lid and two tiny rectangles on the top and bottom for the "
"lid to grip into.\n"
"\n"
"\n"
"\n"
"\n"
"\n"
msgstr ""
msgid "RoyalGame Settings"
msgstr ""
#. name of box generator
#: boxes/generators/sevensegment.py
msgid "SevenSegmentPattern"
msgstr ""
#. description of SevenSegmentPattern
#: boxes/generators/sevensegment.py
msgid "Holepatterns and walls for a seven segment digit"
msgstr ""
#. long description of SevenSegmentPattern in markdown
#: boxes/generators/sevensegment.py
msgid ""
"This pattern is indented to be used with a LED stripe that is wound through "
"alls segments in an S pattern while the stripe being upright on its side. It "
"can probably also be used for small pieces of LED stripes connected with "
"short wires for large enough sizes.\n"
"\n"
"Both is currently untested.\n"
msgstr ""
msgid "SevenSegmentPattern Settings"
msgstr ""
#. parameter name
#: boxes/generators/sevensegment.py
msgid "digit"
msgstr ""
#. help for parameter digit
#: boxes/generators/sevensegment.py
msgid "height of the digit (without walls) in mm"
msgstr ""
#. help for parameter h
#: boxes/generators/sevensegment.py
msgid "height separation walls in mm"
msgstr ""
#. name of box generator
#: boxes/generators/sevensegmentclock.py
msgid "SevenSegmentClock"
msgstr ""
#. description of SevenSegmentClock
#: boxes/generators/sevensegmentclock.py
msgid "Seven segment clock build with LED stripe"
msgstr ""
#. long description of SevenSegmentClock in markdown
#: boxes/generators/sevensegmentclock.py
msgid ""
"You need a LED stripe that is wound through alls segments in an S pattern "
"and then continuing to the next digit while the stripe being upright on its "
"side. Selecting *debug* gives a better idea how things fit together. \n"
"\n"
"Adding a defusor on top or at the bottom of the segment holes will probably "
"enhance the visuals. Just using paper may be enough.\n"
"\n"
"There is currently not a lot of space for elecrtonics and this generator is "
"still untested. Good luck!\n"
msgstr ""
msgid "SevenSegmentClock Settings"
msgstr ""
#. help for parameter height
#: boxes/generators/sevensegmentclock.py
msgid "height of the front panel (with walls if outside is selected) in mm"
msgstr ""
#. help for parameter h
#: boxes/generators/sevensegmentclock.py
msgid "depth (with walls if outside is selected) in mm"
msgstr ""
#. name of box generator
#: boxes/generators/sharpeningjig.py
msgid "SharpeningJig"
msgstr ""
msgid "SharpeningJig Settings"
msgstr ""
#. name of box generator
#: boxes/generators/shoe.py
msgid "Shoe"
msgstr ""
#. description of Shoe
#: boxes/generators/shoe.py
msgid "Shoe shaped box"
msgstr ""
#. long description of Shoe in markdown
#: boxes/generators/shoe.py
msgid ""
"Shoe shaped box with flat sides and rounded top. \n"
" Works best if flex if under slight compression. \n"
" Make sure that the following conditions are met: \n"
" y > tophole + r + fronttop; \n"
" height > frontheight."
msgstr ""
msgid "Shoe Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/shoe.py
msgid "width of the shoe"
msgstr ""
#. help for parameter length
#: boxes/generators/shoe.py
msgid "length front to back"
msgstr ""
#. help for parameter height
#: boxes/generators/shoe.py
msgid "height at the back of the shoe"
msgstr ""
#. parameter name
#: boxes/generators/shoe.py
msgid "frontheight"
msgstr ""
#. help for parameter frontheight
#: boxes/generators/shoe.py
msgid "height at the front of the shoe"
msgstr ""
#. parameter name
#: boxes/generators/shoe.py
msgid "fronttop"
msgstr ""
#. help for parameter fronttop
#: boxes/generators/shoe.py
msgid "length of the flat part at the front of the shoe"
msgstr ""
#. parameter name
#: boxes/generators/shoe.py
msgid "tophole"
msgstr ""
#. help for parameter tophole
#: boxes/generators/shoe.py
msgid "length of the opening at the top"
msgstr ""
#. help for parameter radius
#: boxes/generators/shoe.py
msgid "radius of the bend"
msgstr ""
#. name of box generator
#: boxes/generators/shutterbox.py
msgid "ShutterBox"
msgstr ""
#. description of ShutterBox
#: boxes/generators/shutterbox.py
msgid "Box with a rolling shutter made of flex"
msgstr ""
#. long description of ShutterBox in markdown
#: boxes/generators/shutterbox.py
msgid ""
"Beware of the rolling shutter effect! Use wax on sliding surfaces.\n"
"\n"
"\n"
"\n"
"\n"
msgstr ""
msgid "ShutterBox Settings"
msgstr ""
#. help for parameter style
#: boxes/generators/shutterbox.py
msgid "Number of rounded top corners"
msgstr ""
#. possible choice for style
#: boxes/generators/shutterbox.py
msgid "single"
msgstr ""
#. name of box generator
#: boxes/generators/sidedoorhousing.py
msgid "SideDoorHousing"
msgstr ""
#. description of SideDoorHousing
#: boxes/generators/sidedoorhousing.py
msgid "A box with service hatches at the sides"
msgstr ""
#. long description of SideDoorHousing in markdown
#: boxes/generators/sidedoorhousing.py
msgid ""
"\n"
"This box is designed as a housing for electronic projects. It has hatches "
"that can be re-opened with simple tools. It intentionally cannot be opened "
"with bare hands - if build with thin enough material. The hatches are at the "
"x sides.\n"
"\n"
"#### Assembly instructions\n"
"The main body is easy to assemble by starting with the floor and then adding "
"the four walls and the top piece.\n"
"\n"
"For the removable walls you need to add the lips and latches. The U-shaped "
"clamps holding the latches in place need to be clued in place without also "
"gluing the latches themselves. Make sure the springs on the latches point "
"inwards and the angled ends point to the side walls as shown here (showing a "
"different box type):\n"
"\n"
"\n"
"\n"
"#### Re-Opening\n"
"\n"
"The latches lock in place when closed. To open them they need to be pressed "
"in and can then be moved aside.\n"
msgstr ""
msgid "SideDoorHousing Settings"
msgstr ""
#. parameter name
#: boxes/generators/sidedoorhousing.py
msgid "double_door"
msgstr ""
#. help for parameter double_door
#: boxes/generators/sidedoorhousing.py
msgid "allow removing the backwall, too"
msgstr ""
#. name of box generator
#: boxes/generators/silverwarebox.py
msgid "Silverware"
msgstr ""
#. description of Silverware
#: boxes/generators/silverwarebox.py
msgid ""
"\n"
" Cuttlery stand with carrying grip\n"
" using flex for rounded corners\n"
" "
msgstr ""
msgid "Silverware Settings"
msgstr ""
#. parameter name
#: boxes/generators/silverwarebox.py
msgid "cornerradius"
msgstr ""
#. help for parameter cornerradius
#: boxes/generators/silverwarebox.py
msgid "Radius of the corners"
msgstr ""
#. parameter name
#: boxes/generators/silverwarebox.py
msgid "handleheight"
msgstr ""
#. help for parameter handleheight
#: boxes/generators/silverwarebox.py
msgid "Height of the handle"
msgstr ""
#. parameter name
#: boxes/generators/silverwarebox.py
msgid "handlewidth"
msgstr ""
#. help for parameter handlewidth
#: boxes/generators/silverwarebox.py
msgid "Width of the handle"
msgstr ""
#. name of box generator
#: boxes/generators/simpleflexure.py
msgid "SimpleFlexure"
msgstr ""
#. description of SimpleFlexure
#: boxes/generators/simpleflexure.py
msgid "Simple XY stage without separation of axis"
msgstr ""
msgid "SimpleFlexure Settings"
msgstr ""
#. parameter name
#: boxes/generators/simpleflexure.py
msgid "border"
msgstr ""
#. help for parameter border
#: boxes/generators/simpleflexure.py
msgid "width of the outside ground frame"
msgstr ""
#. name of box generator
#: boxes/generators/slantedtray.py
msgid "SlantedTray"
msgstr ""
#. description of SlantedTray
#: boxes/generators/slantedtray.py
msgid "One row tray with high back wall and low front wall"
msgstr ""
#. long description of SlantedTray in markdown
#: boxes/generators/slantedtray.py
msgid ""
"Can be used as a display or for cards or gaming tokens. Lay on the side to "
"get piles to draw from.\n"
" "
msgstr ""
msgid "SlantedTray Settings"
msgstr ""
#. help for parameter front_height
#: boxes/generators/slantedtray.py
msgid "height of the front as fraction of the total height"
msgstr ""
#. name of box generator
#: boxes/generators/slidelock.py
msgid "SlideLock"
msgstr ""
msgid "SlideLock Settings"
msgstr ""
#. parameter name
#: boxes/generators/slidelock.py
msgid "finger_width"
msgstr ""
#. help for parameter finger_width
#: boxes/generators/slidelock.py
msgid "width of the locking fingers in multiple of thickness"
msgstr ""
#. name of box generator
#: boxes/generators/slidingdrawer.py
msgid "SlidingDrawer"
msgstr ""
#. description of SlidingDrawer
#: boxes/generators/slidingdrawer.py
msgid "Sliding drawer box"
msgstr ""
msgid "SlidingDrawer Settings"
msgstr ""
#. help for parameter play
#: boxes/generators/slidingdrawer.py
msgid "play between the two parts as multiple of the wall thickness"
msgstr ""
#. name of box generator
#: boxes/generators/smallpartstray.py
msgid "SmallPartsTray"
msgstr ""
#. description of SmallPartsTray
#: boxes/generators/smallpartstray.py
msgid "Tray with slants to easier get out game tokens or screws"
msgstr ""
msgid "SmallPartsTray Settings"
msgstr ""
#. help for parameter angle
#: boxes/generators/smallpartstray.py
msgid "angle of the ramps"
msgstr ""
#. parameter name
#: boxes/generators/smallpartstray.py
msgid "rampheight"
msgstr ""
#. help for parameter rampheight
#: boxes/generators/smallpartstray.py
msgid "height of the ramps relative to to total height"
msgstr ""
#. parameter name
#: boxes/generators/smallpartstray.py
msgid "two_sided"
msgstr ""
#. help for parameter two_sided
#: boxes/generators/smallpartstray.py
msgid "have ramps on both sides. Enables sliding dividers"
msgstr ""
#. parameter name
#: boxes/generators/smallpartstray.py
msgid "front_panel"
msgstr ""
#. help for parameter front_panel
#: boxes/generators/smallpartstray.py
msgid "have a vertical wall at the ramp"
msgstr ""
#. name of box generator
#: boxes/generators/smallpartstray2.py
msgid "SmallPartsTray2"
msgstr ""
#. description of SmallPartsTray2
#: boxes/generators/smallpartstray2.py
msgid "A Type Tray variant with slopes toward the front"
msgstr ""
#. long description of SmallPartsTray2 in markdown
#: boxes/generators/smallpartstray2.py
msgid ""
"Assemble inside out. If there are inner front to back walls start with "
"attaching the floor boards to them. Then add the vertical inner left to "
"right walls. After sliding in the slopes attach the outer wall to fix "
"everything in place.\n"
"\n"
"If there are no inner front to back walls just add everything to one side "
"wall and then add the other one after that. Possibly saving the front and "
"back as last step."
msgstr ""
msgid "SmallPartsTray2 Settings"
msgstr ""
#. parameter name
#: boxes/generators/smallpartstray2.py
msgid "back_height"
msgstr ""
#. help for parameter back_height
#: boxes/generators/smallpartstray2.py
msgid "additional height of the back wall - e top edge only"
msgstr ""
#. help for parameter radius
#: boxes/generators/smallpartstray2.py
msgid "radius for strengthening side walls with back_height"
msgstr ""
#. help for parameter handle
#: boxes/generators/smallpartstray2.py
msgid "add handle to the bottom (changes bottom edge in the front)"
msgstr ""
#. name of box generator
#: boxes/generators/spicesrack.py
msgid "SpicesRack"
msgstr ""
#. description of SpicesRack
#: boxes/generators/spicesrack.py
msgid "Rack for cans of spices"
msgstr ""
msgid "SpicesRack Settings"
msgstr ""
#. help for parameter diameter
#: boxes/generators/spicesrack.py
msgid "diameter of spice cans"
msgstr ""
#. help for parameter height
#: boxes/generators/spicesrack.py
msgid "height of the cans that needs to be supported"
msgstr ""
#. help for parameter space
#: boxes/generators/spicesrack.py
msgid "space between cans"
msgstr ""
#. parameter name
#: boxes/generators/spicesrack.py
msgid "numx"
msgstr ""
#. help for parameter numx
#: boxes/generators/spicesrack.py
msgid "number of cans in a row"
msgstr ""
#. parameter name
#: boxes/generators/spicesrack.py
msgid "numy"
msgstr ""
#. help for parameter numy
#: boxes/generators/spicesrack.py
msgid "number of cans in a column"
msgstr ""
#. parameter name
#: boxes/generators/spicesrack.py
msgid "in_place_supports"
msgstr ""
#. help for parameter in_place_supports
#: boxes/generators/spicesrack.py
msgid "place supports pieces in holes (check for fit yourself)"
msgstr ""
#. help for parameter feet
#: boxes/generators/spicesrack.py
msgid "add feet so the rack can stand on the ground"
msgstr ""
#. name of box generator
#: boxes/generators/spool.py
msgid "Spool"
msgstr ""
#. description of Spool
#: boxes/generators/spool.py
msgid "A simple spool"
msgstr ""
msgid "Spool Settings"
msgstr ""
#. help for parameter axle_diameter
#: boxes/generators/spool.py
msgid "diameter of the axle hole (axle not part of drawing)"
msgstr ""
#. parameter name
#: boxes/generators/spool.py
msgid "reinforcements"
msgstr ""
#. help for parameter reinforcements
#: boxes/generators/spool.py
msgid "number of reinforcement ribs per side"
msgstr ""
#. parameter name
#: boxes/generators/spool.py
msgid "reinforcement_height"
msgstr ""
#. help for parameter reinforcement_height
#: boxes/generators/spool.py
msgid "height of reinforcement ribs on the flanges"
msgstr ""
#. name of box generator
#: boxes/generators/stachel.py
msgid "Stachel"
msgstr ""
#. description of Stachel
#: boxes/generators/stachel.py
msgid "Bass Recorder Endpin"
msgstr ""
msgid "Stachel Settings"
msgstr ""
#. parameter name
#: boxes/generators/stachel.py
msgid "flutediameter"
msgstr ""
#. help for parameter flutediameter
#: boxes/generators/stachel.py
msgid "diameter of the flutes bottom in mm"
msgstr ""
#. parameter name
#: boxes/generators/stachel.py
msgid "polediameter"
msgstr ""
#. help for parameter polediameter
#: boxes/generators/stachel.py
msgid "diameter if the pin in mm"
msgstr ""
#. parameter name
#: boxes/generators/stachel.py
msgid "wall"
msgstr ""
#. help for parameter wall
#: boxes/generators/stachel.py
msgid "width of the surrounding wall in mm"
msgstr ""
#. name of box generator
#: boxes/generators/storagerack.py
msgid "StorageRack"
msgstr ""
#. description of StorageRack
#: boxes/generators/storagerack.py
msgid "StorageRack to store boxes and trays which have their own floor"
msgstr ""
#. long description of StorageRack in markdown
#: boxes/generators/storagerack.py
msgid ""
"\n"
"\n"
"Drawers are not included:\n"
"\n"
"\n"
"\n"
"\n"
msgstr ""
msgid "StorageRack Settings"
msgstr ""
#. help for parameter depth
#: boxes/generators/storagerack.py
msgid "depth of the rack"
msgstr ""
#. parameter name
#: boxes/generators/storagerack.py
msgid "rail"
msgstr ""
#. name of box generator
#: boxes/generators/storageshelf.py
msgid "StorageShelf"
msgstr ""
#. description of StorageShelf
#: boxes/generators/storageshelf.py
msgid "StorageShelf can be used to store Typetray"
msgstr ""
#. long description of StorageShelf in markdown
#: boxes/generators/storageshelf.py
msgid "This is a simple shelf box."
msgstr ""
msgid "StorageShelf Settings"
msgstr ""
#. parameter name
#: boxes/generators/storageshelf.py
msgid "retainer"
msgstr ""
#. help for parameter retainer
#: boxes/generators/storageshelf.py
msgid "height of retaining wall at the front edges"
msgstr ""
#. parameter name
#: boxes/generators/storageshelf.py
msgid "retainer_hole_edge"
msgstr ""
#. help for parameter retainer_hole_edge
#: boxes/generators/storageshelf.py
msgid "use finger hole edge for retainer walls"
msgstr ""
#. name of box generator
#: boxes/generators/testcorners.py
msgid "TestCorners"
msgstr ""
msgid "TestCorners Settings"
msgstr ""
#. name of box generator
#: boxes/generators/tetris.py
msgid "Tetris"
msgstr ""
#. description of Tetris
#: boxes/generators/tetris.py
msgid "3D Tetris shapes"
msgstr ""
msgid "Tetris Settings"
msgstr ""
#. parameter name
#: boxes/generators/tetris.py
msgid "blocksize"
msgstr ""
#. help for parameter blocksize
#: boxes/generators/tetris.py
msgid "size of a square"
msgstr ""
#. help for parameter shape
#: boxes/generators/tetris.py
msgid "shape of the piece"
msgstr ""
#. possible choice for shape
#: boxes/generators/tetris.py
msgid "T"
msgstr ""
#. name of box generator
#: boxes/generators/trafficlight.py
msgid "TrafficLight"
msgstr ""
#. description of TrafficLight
#: boxes/generators/trafficlight.py
msgid "Traffic light"
msgstr ""
#. long description of TrafficLight in markdown
#: boxes/generators/trafficlight.py
msgid ""
"The traffic light was created to visualize the status of a Icinga monitored "
"system.\n"
"\n"
"When turned by 90°, it can be also used to create a bottle holder."
msgstr ""
msgid "TrafficLight Settings"
msgstr ""
#. help for parameter depth
#: boxes/generators/trafficlight.py
msgid "inner depth not including the shades"
msgstr ""
#. parameter name
#: boxes/generators/trafficlight.py
msgid "shades"
msgstr ""
#. help for parameter shades
#: boxes/generators/trafficlight.py
msgid "depth of the shaders"
msgstr ""
#. help for parameter n
#: boxes/generators/trafficlight.py
msgid "number of lights"
msgstr ""
#. parameter name
#: boxes/generators/trafficlight.py
msgid "upright"
msgstr ""
#. help for parameter upright
#: boxes/generators/trafficlight.py
msgid "stack lights upright (or side by side)"
msgstr ""
#. name of box generator
#: boxes/generators/trayinsert.py
msgid "TrayInsert"
msgstr ""
#. description of TrayInsert
#: boxes/generators/trayinsert.py
msgid ""
"Tray insert without floor and outer walls - allows only continuous walls"
msgstr ""
msgid "TrayInsert Settings"
msgstr ""
#. name of box generator
#: boxes/generators/trianglelamp.py
msgid "TriangleLamp"
msgstr ""
#. description of TriangleLamp
#: boxes/generators/trianglelamp.py
msgid "Triangle LED Lamp"
msgstr ""
msgid "TriangleLamp Settings"
msgstr ""
#. parameter name
#: boxes/generators/trianglelamp.py
msgid "cornersize"
msgstr ""
#. help for parameter cornersize
#: boxes/generators/trianglelamp.py
msgid "short side of the corner triangles"
msgstr ""
#. parameter name
#: boxes/generators/trianglelamp.py
msgid "screenholesize"
msgstr ""
#. help for parameter screenholesize
#: boxes/generators/trianglelamp.py
msgid "diameter of the holes in the screen"
msgstr ""
#. parameter name
#: boxes/generators/trianglelamp.py
msgid "screwholesize"
msgstr ""
#. help for parameter screwholesize
#: boxes/generators/trianglelamp.py
msgid "diameter of the holes in the wood"
msgstr ""
#. parameter name
#: boxes/generators/trianglelamp.py
msgid "sharpcorners"
msgstr ""
#. help for parameter sharpcorners
#: boxes/generators/trianglelamp.py
msgid "extend walls for 45° corners. Requires grinding a 22.5° bevel."
msgstr ""
#. name of box generator
#: boxes/generators/two_piece.py
msgid "TwoPiece"
msgstr ""
#. description of TwoPiece
#: boxes/generators/two_piece.py
msgid ""
"A two piece box where top slips over the bottom half to form \n"
" the enclosure.\n"
" "
msgstr ""
#. long description of TwoPiece in markdown
#: boxes/generators/two_piece.py
msgid ""
"\n"
"Set *hi* larger than *h* to leave gap between the inner and outer shell. "
"This can be used to make opening the box easier. Set *hi* smaller to only "
"have a small inner ridge that will allow the content to be more visible "
"after opening.\n"
"\n"
"\n"
msgstr ""
msgid "TwoPiece Settings"
msgstr ""
#. name of box generator
#: boxes/generators/typetray.py
msgid "TypeTray"
msgstr ""
#. description of TypeTray
#: boxes/generators/typetray.py
msgid "Type tray - allows only continuous walls"
msgstr ""
msgid "TypeTray Settings"
msgstr ""
#. parameter name
#: boxes/generators/typetray.py
msgid "gripheight"
msgstr ""
#. help for parameter gripheight
#: boxes/generators/typetray.py
msgid "height of the grip hole in mm"
msgstr ""
#. parameter name
#: boxes/generators/typetray.py
msgid "gripwidth"
msgstr ""
#. help for parameter gripwidth
#: boxes/generators/typetray.py
msgid "width of th grip hole in mm (zero for no hole)"
msgstr ""
#. name of box generator
#: boxes/generators/ubox.py
msgid "UBox"
msgstr ""
#. description of UBox
#: boxes/generators/ubox.py
msgid "Box various options for different stypes and lids"
msgstr ""
msgid "UBox Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/ubox.py
msgid "radius of bottom corners"
msgstr ""
#. name of box generator
#: boxes/generators/unevenheightbox.py
msgid "UnevenHeightBox"
msgstr ""
#. description of UnevenHeightBox
#: boxes/generators/unevenheightbox.py
msgid "Box with different height in each corner"
msgstr ""
msgid "UnevenHeightBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/unevenheightbox.py
msgid "height0"
msgstr ""
#. help for parameter height0
#: boxes/generators/unevenheightbox.py
msgid "height of the front left corner in mm"
msgstr ""
#. parameter name
#: boxes/generators/unevenheightbox.py
msgid "height1"
msgstr ""
#. help for parameter height1
#: boxes/generators/unevenheightbox.py
msgid "height of the front right corner in mm"
msgstr ""
#. parameter name
#: boxes/generators/unevenheightbox.py
msgid "height2"
msgstr ""
#. help for parameter height2
#: boxes/generators/unevenheightbox.py
msgid "height of the right back corner in mm"
msgstr ""
#. parameter name
#: boxes/generators/unevenheightbox.py
msgid "height3"
msgstr ""
#. help for parameter height3
#: boxes/generators/unevenheightbox.py
msgid "height of the left back corner in mm"
msgstr ""
#. help for parameter lid
#: boxes/generators/unevenheightbox.py
msgid "add a lid (works best with high corners opposing each other)"
msgstr ""
#. help for parameter lid_height
#: boxes/generators/unevenheightbox.py
msgid "additional height of the lid"
msgstr ""
#. parameter name
#: boxes/generators/unevenheightbox.py
msgid "edge_types"
msgstr ""
#. help for parameter edge_types
#: boxes/generators/unevenheightbox.py
msgid ""
"which edges are flat (e) or grooved (z,Z), counter-clockwise from the front"
msgstr ""
#. name of box generator
#: boxes/generators/universalbox.py
msgid "UniversalBox"
msgstr ""
#. description of UniversalBox
#: boxes/generators/universalbox.py
msgid "Box with various options for different styles and lids"
msgstr ""
msgid "UniversalBox Settings"
msgstr ""
#. parameter name
#: boxes/generators/universalbox.py
msgid "vertical_edges"
msgstr ""
#. help for parameter vertical_edges
#: boxes/generators/universalbox.py
msgid "connections used for the vertical edges"
msgstr ""
#. possible choice for vertical_edges
#: boxes/generators/universalbox.py
msgid "finger joints"
msgstr ""
#. possible choice for vertical_edges
#: boxes/generators/universalbox.py
msgid "finger holes"
msgstr ""
#. name of box generator
#: boxes/generators/wavyknob.py
msgid "WavyKnob"
msgstr ""
msgid "WavyKnob Settings"
msgstr ""
#. parameter name
#: boxes/generators/wavyknob.py
msgid "serrationangle"
msgstr ""
#. help for parameter serrationangle
#: boxes/generators/wavyknob.py
msgid "higher values for deeper serrations (degrees)"
msgstr ""
#. name of box generator
#: boxes/generators/wallcaliperholder.py
msgid "WallCaliper"
msgstr ""
#. description of WallCaliper
#: boxes/generators/wallcaliperholder.py
msgid "Holds a single caliper to a wall"
msgstr ""
msgid "WallCaliper Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallcaliperholder.py
msgid "walltype"
msgstr ""
#. help for parameter walltype
#: boxes/generators/wallcaliperholder.py
msgid "Type of wall system to attach to"
msgstr ""
#. possible choice for walltype
#: boxes/generators/wallcaliperholder.py
msgid "plain"
msgstr ""
#. possible choice for walltype
#: boxes/generators/wallcaliperholder.py
msgid "plain reinforced"
msgstr ""
#. possible choice for walltype
#: boxes/generators/wallcaliperholder.py
msgid "slatwall"
msgstr ""
#. possible choice for walltype
#: boxes/generators/wallcaliperholder.py
msgid "dinrail"
msgstr ""
#. possible choice for walltype
#: boxes/generators/wallcaliperholder.py
msgid "french cleat"
msgstr ""
#. help for parameter width
#: boxes/generators/wallcaliperholder.py
msgid "width of the long end"
msgstr ""
#. help for parameter height
#: boxes/generators/wallcaliperholder.py
msgid "height of the body"
msgstr ""
msgid "Settings for plain WallEdges"
msgstr ""
msgid "Settings for SlatWallEdges"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "bottom_hook"
msgstr ""
#. help for parameter bottom_hook
#: boxes/generators/wallcaliperholder.py
msgid "\"spring\", \"stud\" or \"none\""
msgstr ""
#. possible choice for bottom_hook
#: boxes/generators/wallcaliperholder.py
msgid "hook"
msgstr ""
#. possible choice for bottom_hook
#: boxes/generators/wallcaliperholder.py
msgid "stud"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "hook_depth"
msgstr ""
#. help for parameter hook_depth
#: boxes/generators/wallcaliperholder.py
msgid "horizontal width of the hook"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "hook_distance"
msgstr ""
#. help for parameter hook_distance
#: boxes/generators/wallcaliperholder.py
msgid "horizontal space to the hook"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "hook_height"
msgstr ""
#. help for parameter hook_height
#: boxes/generators/wallcaliperholder.py
msgid "height of the horizontal bar of the hook"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "hook_overall_height"
msgstr ""
#. help for parameter hook_overall_height
#: boxes/generators/wallcaliperholder.py
msgid "height of the hook top to bottom"
msgstr ""
#. help for parameter pitch
#: boxes/generators/wallcaliperholder.py
msgid "vertical spacing of slots middle to middle (in mm)"
msgstr ""
#. parameter name for SlatWall
#: boxes/generators/wallcaliperholder.py
msgid "hook_extra_height"
msgstr ""
#. help for parameter hook_extra_height
#: boxes/generators/wallcaliperholder.py
msgid "space surrounding connectors (multiples of thickness)"
msgstr ""
msgid "Settings for DinRailEdges"
msgstr ""
#. help for parameter bottom
#: boxes/generators/wallcaliperholder.py
msgid "\"stud\" or \"none\""
msgstr ""
msgid "Settings for FrenchCleatEdges"
msgstr ""
#. help for parameter angle
#: boxes/generators/wallcaliperholder.py
msgid "angle of the cut (0 for horizontal)"
msgstr ""
#. help for parameter bottom
#: boxes/generators/wallcaliperholder.py
msgid "\"stud\", \"hook\" or \"none\""
msgstr ""
#. help for parameter depth
#: boxes/generators/wallcaliperholder.py
msgid "horizontal width of the hook in mm"
msgstr ""
#. help for parameter spacing
#: boxes/generators/wallcaliperholder.py
msgid "distance of the cleats in mm (for bottom hook)"
msgstr ""
#. name of box generator
#: boxes/generators/wallchiselholder.py
msgid "WallChiselHolder"
msgstr ""
#. description of WallChiselHolder
#: boxes/generators/wallchiselholder.py
msgid "Wall tool holder for chisels, files and similar tools"
msgstr ""
msgid "WallChiselHolder Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallchiselholder.py
msgid "tooldiameter"
msgstr ""
#. help for parameter tooldiameter
#: boxes/generators/wallchiselholder.py
msgid "diameter of the tool including space to grab"
msgstr ""
#. parameter name
#: boxes/generators/wallchiselholder.py
msgid "holediameter"
msgstr ""
#. help for parameter holediameter
#: boxes/generators/wallchiselholder.py
msgid "diameter of the hole for the tool (handle should not fit through)"
msgstr ""
#. parameter name
#: boxes/generators/wallchiselholder.py
msgid "slot_width"
msgstr ""
#. help for parameter slot_width
#: boxes/generators/wallchiselholder.py
msgid "width of slots"
msgstr ""
#. help for parameter radius
#: boxes/generators/wallchiselholder.py
msgid "radius at the slots"
msgstr ""
#. help for parameter number
#: boxes/generators/wallchiselholder.py
msgid "number of tools/slots"
msgstr ""
#. parameter name
#: boxes/generators/wallchiselholder.py
msgid "hooks"
msgstr ""
#. help for parameter hooks
#: boxes/generators/wallchiselholder.py
msgid "amount of hooks / braces"
msgstr ""
#. possible choice for hooks
#: boxes/generators/wallchiselholder.py
msgid "all"
msgstr ""
#. possible choice for hooks
#: boxes/generators/wallchiselholder.py
msgid "odds"
msgstr ""
#. possible choice for hooks
#: boxes/generators/wallchiselholder.py
msgid "everythird"
msgstr ""
#. name of box generator
#: boxes/generators/wallconsole.py
msgid "WallConsole"
msgstr ""
#. description of WallConsole
#: boxes/generators/wallconsole.py
msgid "Outset and angled plate to mount stuff to"
msgstr ""
msgid "WallConsole Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallconsole.py
msgid "top_depth"
msgstr ""
#. help for parameter top_depth
#: boxes/generators/wallconsole.py
msgid "depth at the top"
msgstr ""
#. parameter name
#: boxes/generators/wallconsole.py
msgid "bottom_depth"
msgstr ""
#. help for parameter bottom_depth
#: boxes/generators/wallconsole.py
msgid "depth at the bottom"
msgstr ""
#. name of box generator
#: boxes/generators/walldrillbox.py
msgid "WallDrillBox"
msgstr ""
#. description of WallDrillBox
#: boxes/generators/walldrillbox.py
msgid "Box for drills with each compartment with a different height"
msgstr ""
msgid "WallDrillBox Settings"
msgstr ""
#. name of box generator
#: boxes/generators/walledges.py
msgid "WallEdges"
msgstr ""
#. description of WallEdges
#: boxes/generators/walledges.py
msgid "Shows the different edge types for wall systems"
msgstr ""
msgid "WallEdges Settings"
msgstr ""
#. name of box generator
#: boxes/generators/wallpinrow.py
msgid "WallPinRow"
msgstr ""
msgid "WallPinRow Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallpinrow.py
msgid "pins"
msgstr ""
#. help for parameter pins
#: boxes/generators/wallpinrow.py
msgid "number of pins"
msgstr ""
#. parameter name
#: boxes/generators/wallpinrow.py
msgid "pinlength"
msgstr ""
#. help for parameter pinlength
#: boxes/generators/wallpinrow.py
msgid "length of pins (in mm)"
msgstr ""
#. help for parameter pinwidth
#: boxes/generators/wallpinrow.py
msgid "width of pins (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallpinrow.py
msgid "pinspacing"
msgstr ""
#. help for parameter pinspacing
#: boxes/generators/wallpinrow.py
msgid "space from middle to middle of pins (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallpinrow.py
msgid "pinspacing_increment"
msgstr ""
#. help for parameter pinspacing_increment
#: boxes/generators/wallpinrow.py
msgid "increase spacing from left to right (in mm)"
msgstr ""
#. help for parameter angle
#: boxes/generators/wallpinrow.py
msgid "angle of the pins pointing up (in degrees)"
msgstr ""
#. help for parameter hooks
#: boxes/generators/wallpinrow.py
msgid "number of hooks into the wall"
msgstr ""
#. help for parameter h
#: boxes/generators/wallpinrow.py
msgid ""
"height of the front plate (in mm) - needs to be at least 7 time the thickness"
msgstr ""
#. name of box generator
#: boxes/generators/wallplaneholder.py
msgid "WallPlaneHolder"
msgstr ""
#. description of WallPlaneHolder
#: boxes/generators/wallplaneholder.py
msgid "Hold a plane to a wall"
msgstr ""
msgid "WallPlaneHolder Settings"
msgstr ""
#. help for parameter width
#: boxes/generators/wallplaneholder.py
msgid "width of the plane"
msgstr ""
#. help for parameter length
#: boxes/generators/wallplaneholder.py
msgid "length of the plane"
msgstr ""
#. parameter name
#: boxes/generators/wallplaneholder.py
msgid "hold_length"
msgstr ""
#. help for parameter hold_length
#: boxes/generators/wallplaneholder.py
msgid "length of the part holding the plane over the front"
msgstr ""
#. help for parameter height
#: boxes/generators/wallplaneholder.py
msgid "height of the front of plane"
msgstr ""
#. name of box generator
#: boxes/generators/wallplate.py
msgid "WallPlate"
msgstr ""
#. description of WallPlate
#: boxes/generators/wallplate.py
msgid "Simple plate with slat wall hooks"
msgstr ""
msgid "WallPlate Settings"
msgstr ""
#. help for parameter depth
#: boxes/generators/wallplate.py
msgid "depth of walls with the hooks"
msgstr ""
#. parameter name
#: boxes/generators/wallplate.py
msgid "outset_sides"
msgstr ""
#. help for parameter outset_sides
#: boxes/generators/wallplate.py
msgid "make a bit wider to allow stronger joint"
msgstr ""
#. name of box generator
#: boxes/generators/wallpliersholder.py
msgid "WallPliersHolder"
msgstr ""
#. description of WallPliersHolder
#: boxes/generators/wallpliersholder.py
msgid "Bar to hang pliers on"
msgstr ""
msgid "WallPliersHolder Settings"
msgstr ""
#. help for parameter angle
#: boxes/generators/wallpliersholder.py
msgid "bracing angle - less for more bracing"
msgstr ""
#. name of box generator
#: boxes/generators/wallrollholder.py
msgid "WallRollHolder"
msgstr ""
msgid "WallRollHolder Settings"
msgstr ""
#. name of box generator
#: boxes/generators/wallslottedholder.py
msgid "WallSlottedHolder"
msgstr ""
#. description of WallSlottedHolder
#: boxes/generators/wallslottedholder.py
msgid "Wall tool holder with slots"
msgstr ""
msgid "WallSlottedHolder Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallslottedholder.py
msgid "slot_depth"
msgstr ""
#. help for parameter slot_depth
#: boxes/generators/wallslottedholder.py
msgid "depth of slots from the front"
msgstr ""
#. parameter name
#: boxes/generators/wallslottedholder.py
msgid "additional_depth"
msgstr ""
#. help for parameter additional_depth
#: boxes/generators/wallslottedholder.py
msgid "depth behind the lots"
msgstr ""
#. parameter name
#: boxes/generators/wallslottedholder.py
msgid "tool_width"
msgstr ""
#. help for parameter tool_width
#: boxes/generators/wallslottedholder.py
msgid "overall width for the tools"
msgstr ""
#. help for parameter radius
#: boxes/generators/wallslottedholder.py
msgid "radius of the slots at the front"
msgstr ""
#. name of box generator
#: boxes/generators/wallstairs.py
msgid "WallStairs"
msgstr ""
#. description of WallStairs
#: boxes/generators/wallstairs.py
msgid "Platforms in different heights e.g. for screw drivers"
msgstr ""
#. long description of WallStairs in markdown
#: boxes/generators/wallstairs.py
msgid ""
"You are supposed to add holes or slots to the stair tops yourself using "
"Inkscape or another vector drawing or CAD program.\n"
"\n"
"sh gives height of the stairs from front to back. Note that the overall "
"width and height is bigger than the nominal values as walls and the "
"protrusions are not included in the measurements.\n"
msgstr ""
msgid "WallStairs Settings"
msgstr ""
#. parameter name
#: boxes/generators/wallstairs.py
msgid "braceheight"
msgstr ""
#. help for parameter braceheight
#: boxes/generators/wallstairs.py
msgid "height of the brace at the bottom back (in mm). Zero for none"
msgstr ""
#. name of box generator
#: boxes/generators/walltypetray.py
msgid "WallTypeTray"
msgstr ""
msgid "WallTypeTray Settings"
msgstr ""
#. help for parameter back_height
#: boxes/generators/walltypetray.py
msgid "additional height of the back wall"
msgstr ""
#. help for parameter radius
#: boxes/generators/walltypetray.py
msgid "radius for strengthening walls with the hooks"
msgstr ""
#. name of box generator
#: boxes/generators/wallwrenchholder.py
msgid "WallWrenchHolder"
msgstr ""
#. description of WallWrenchHolder
#: boxes/generators/wallwrenchholder.py
msgid "Hold a set of wrenches at a wall"
msgstr ""
msgid "WallWrenchHolder Settings"
msgstr ""
#. help for parameter depth
#: boxes/generators/wallwrenchholder.py
msgid "depth of the sides (in mm)"
msgstr ""
#. help for parameter number
#: boxes/generators/wallwrenchholder.py
msgid "number of wrenches (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallwrenchholder.py
msgid "min_width"
msgstr ""
#. help for parameter min_width
#: boxes/generators/wallwrenchholder.py
msgid "width of smallest wrench (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallwrenchholder.py
msgid "max_width"
msgstr ""
#. help for parameter max_width
#: boxes/generators/wallwrenchholder.py
msgid "width of largest wrench (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallwrenchholder.py
msgid "min_strength"
msgstr ""
#. help for parameter min_strength
#: boxes/generators/wallwrenchholder.py
msgid "strength of smallest wrench (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallwrenchholder.py
msgid "max_strength"
msgstr ""
#. help for parameter max_strength
#: boxes/generators/wallwrenchholder.py
msgid "strength of largest wrench (in mm)"
msgstr ""
#. parameter name
#: boxes/generators/wallwrenchholder.py
msgid "extra_distance"
msgstr ""
#. help for parameter extra_distance
#: boxes/generators/wallwrenchholder.py
msgid "additional distance between wrenches (in mm)"
msgstr ""
#. name of box generator
#: boxes/generators/winerack.py
msgid "WineRack"
msgstr ""
#. description of WineRack
#: boxes/generators/winerack.py
msgid "Honey Comb Style Wine Rack"
msgstr ""
msgid "WineRack Settings"
msgstr ""
#. help for parameter radius
#: boxes/generators/winerack.py
msgid "Radius of comb"
msgstr ""
#. parameter name
#: boxes/generators/winerack.py
msgid "walls"
msgstr ""
#. help for parameter walls
#: boxes/generators/winerack.py
msgid "which of the honey comb walls to add"
msgstr ""
#. possible choice for walls
#: boxes/generators/winerack.py
msgid "minimal"
msgstr ""
#. possible choice for walls
#: boxes/generators/winerack.py
msgid "no_verticals"
msgstr ""
#. name of box generator
#: boxes/generators/wirestraightener.py
msgid "WireStraightener"
msgstr ""
msgid "WireStraightener Settings"
msgstr ""
#. parameter name
#: boxes/generators/wirestraightener.py
msgid "roller_diameter"
msgstr ""
#. help for parameter roller_diameter
#: boxes/generators/wirestraightener.py
msgid "outer diameter of the rollers in mm"
msgstr ""
#. help for parameter axle_diameter
#: boxes/generators/wirestraightener.py
msgid "diameter of the bolts holding the rollers in mm"
msgstr ""
#. parameter name
#: boxes/generators/wirestraightener.py
msgid "roller_width"
msgstr ""
#. help for parameter roller_width
#: boxes/generators/wirestraightener.py
msgid "width of the rollers including washers in mm"
msgstr ""
#. parameter name
#: boxes/generators/wirestraightener.py
msgid "rollers"
msgstr ""
#. help for parameter rollers
#: boxes/generators/wirestraightener.py
msgid "number of rollers (top and bottom combined)"
msgstr ""
#: scripts/boxesserver:286
msgid ""
"There is no image yet. Please donate an image of your project on <a "
"href="https://github.com/florianfesti/boxes/issues/628" "
"target="_blank" rel="noopener">GitHub</a>!"
msgstr ""
#: scripts/boxesserver:455
msgid "Help"
msgstr ""
#: scripts/boxesserver:456
msgid "Home Page"
msgstr ""
#: scripts/boxesserver:457
msgid "Documentation"
msgstr ""
#: scripts/boxesserver:458
msgid "Sources"
msgstr ""
| 185,890 | Python | .py | 6,974 | 25.411815 | 82 | 0.776021 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,709 | test_svg.py | florianfesti_boxes/tests/test_svg.py | from __future__ import annotations
import sys
from pathlib import Path
import pytest
from lxml import etree
try:
import boxes
except ImportError:
sys.path.append(Path(__file__).resolve().parent.parent.__str__())
import boxes
import boxes.generators
class TestSVG:
"""Test SVG creation of box generators.
Just test generators which have a default output without an input requirement.
Uses files from examples folder as reference.
"""
all_generators = boxes.generators.getAllBoxGenerators().values()
# Ignore multistep generators and generators which require input.
notTestGenerators = ('GridfinityTrayLayout', 'TrayLayout', 'TrayLayoutFile', 'TypeTray', 'Edges',)
brokenGenerators = ()
avoidGenerator = notTestGenerators + brokenGenerators
def test_generators_available(self) -> None:
assert len(self.all_generators) != 0
# svgcheck currently do not allow inkscape custom tags.
# @staticmethod
# def is_valid_svg(file_path: str) -> bool:
# result = subprocess.run(['svgcheck', file_path], capture_output=True, text=True)
# return "INFO: File conforms to SVG requirements." in result.stdout
@staticmethod
def is_valid_xml_by_lxml(xml_string: str) -> bool:
try:
etree.fromstring(xml_string)
return True
except etree.XMLSyntaxError:
return False
@staticmethod
def idfunc(val) -> str:
return f"{val.__name__}"
@pytest.mark.parametrize(
"generator",
all_generators,
ids=idfunc.__func__,
)
def test_generator(self, generator: type[boxes.Boxes], capsys) -> None:
boxName = generator.__name__
if boxName in self.avoidGenerator:
pytest.skip("Skipped generator")
box = generator()
box.parseArgs("")
box.metadata["reproducible"] = True
box.open()
box.render()
boxData = box.close()
out, err = capsys.readouterr()
assert 100 < boxData.__sizeof__(), "No data generated."
assert 0 == len(out), "Console output generated."
assert 0 == len(err), "Console error generated."
# Use external library lxml as cross-check.
assert self.is_valid_xml_by_lxml(boxData.getvalue()) is True, "Invalid XML according to library lxml."
file = Path(__file__).resolve().parent / 'data' / (boxName + '.svg')
file.write_bytes(boxData.getvalue())
# Use example data from repository as reference data.
referenceData = Path(__file__).resolve().parent.parent / 'examples' / (boxName + '.svg')
assert referenceData.exists() is True, "Reference file for comparison does not exist."
assert referenceData.is_file() is True, "Reference file for comparison does not exist."
assert referenceData.read_bytes() == boxData.getvalue(), "SVG files are not equal. If change is intended, please update example files."
| 2,960 | Python | .py | 66 | 37.954545 | 143 | 0.668405 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,710 | conf.py | florianfesti_boxes/documentation/src/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
from datetime import datetime
sys.path.append(os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'boxes.py'
project_copyright = datetime.now().year.__str__() + ', Florian Festi'
author = 'Florian Festi'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
#exclude_patterns = []
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The suffix of source filenames.
source_suffix = '.rst'
# The root toctree document.
root_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'nature'
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../static/boxes-logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "../../static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = 'boxespydoc'
# If this is not None, a ‘Last updated on:’ timestamp is inserted at every page bottom.
html_last_updated_fmt = ''
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'boxespy.tex', 'boxes.py Documentation',
'Florian Festi', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'boxespy', 'boxes.py Documentation',
['Florian Festi'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'boxespy', 'boxes.py Documentation',
'Florian Festi', 'boxespy', 'One line description of project.',
'Miscellaneous'),
]
| 3,037 | Python | .py | 71 | 40.661972 | 87 | 0.660769 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,711 | boxes2rst.py | florianfesti_boxes/documentation/src/boxes2rst.py | #!/usr/bin/env python3
# Copyright (C) 2017 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import sys
try:
import boxes.generators
except ImportError:
sys.path.append(os.path.dirname(__file__) + "/../..")
import boxes.generators
class Boxes2rst:
def __init__(self) -> None:
self.boxes = {b.__name__: b() for b in boxes.generators.getAllBoxGenerators().values() if b.webinterface}
self.groups = boxes.generators.ui_groups
self.groups_by_name = boxes.generators.ui_groups_by_name
for name, box in self.boxes.items():
self.groups_by_name.get(box.ui_group, self.groups_by_name["Misc"]).add(box)
def write(self, targetFile: str) -> None:
with open(targetFile, "w") as f:
for name, group in self.groups_by_name.items():
f.write(f"{name}\n----------------\n\n")
for box in group.generators:
f.write(box.__class__.__name__)
f.write("\n..........................................\n\n")
f.write(f"\n\n.. autoclass:: {box.__class__.__module__}.{box.__class__.__name__}")
f.write("\n\n")
if os.path.exists(f"../../static/samples/{box.__class__.__name__}.jpg"):
f.write(f".. image:: ../../static/samples/{box.__class__.__name__}.jpg\n\n")
def main() -> None:
if len(sys.argv) != 2:
print("Usage: boxes2rst.py TARGETFILE")
return
b = Boxes2rst()
b.write(sys.argv[1])
if __name__ == "__main__":
main()
| 2,206 | Python | .py | 48 | 39.083333 | 113 | 0.601024 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,712 | boxes_proxy.py | florianfesti_boxes/scripts/boxes_proxy.py | #!/usr/bin/env python3
"""
Extension for InkScape 1.0+
boxes.py wrapper script to make it work on Windows and Linux systems without duplicating .inx files
Author: Mario Voigt / FabLab Chemnitz
Mail: mario.voigt@stadtfabrikanten.org
Date: 27.04.2021
Last patch: 27.04.2021
License: GNU GPL v3
"""
pass
import subprocess
import sys
pass
from shlex import quote
from inkex.extensions import GenerateExtension
from lxml import etree
import inkex
class boxesPyWrapper(GenerateExtension):
def add_arguments(self, pars):
args = sys.argv[1:]
for arg in args:
key = arg.split("=")[0]
if key == "--id":
continue
if len(arg.split("=")) == 2:
value = arg.split("=")[1]
pars.add_argument(key, default=key)
def generate(self):
cmd = "boxes" # boxes.exe in this local dir (or if present in %PATH%), or boxes from $PATH in linux
for arg in vars(self.options):
if arg in (
"output", "id", "ids", "selected_nodes",
"input_file", "tab"):
continue
# fix behaviour of "original" arg which does not correctly gets
# interpreted if set to false
if arg == "original" and str(getattr(self.options, arg)) == "false":
continue
cmd += f" --{arg} {quote(str(getattr(self.options, arg)))}"
cmd += f" --output -"
cmd = cmd.replace("boxes --generator", "boxes")
# run boxes with the parameters provided
result = subprocess.run(cmd.split(), capture_output=True)
if result.returncode:
inkex.utils.debug("Generating box svg failed. Cannot continue. Command was:")
inkex.utils.debug(str(cmd))
inkex.utils.debug(str(result.stderr))
exit(1)
# write the generated SVG into Inkscape's canvas
p = etree.XMLParser(huge_tree=True)
doc = etree.fromstring(result.stdout, parser=etree.XMLParser(huge_tree=True))
group = inkex.Group(id="boxes.py")
for element in doc:
group.append(element)
return group
def main() -> None:
boxesPyWrapper().run()
if __name__ == '__main__':
main()
| 2,264 | Python | .py | 60 | 29.583333 | 108 | 0.608139 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,713 | boxes_example.ipynb | florianfesti_boxes/scripts/boxes_example.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Boxes.py example\n",
"\n",
"This notebook is an interactive example of a Boxes.py generator. Feel free to play around and see how the result changes.\n",
"\n",
"Check out http://florianfesti.github.io/boxes/html/index.html for documentation."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import SVG, display\n",
"\n",
"import sys\n",
"# sys.path.append('..') # uncomments and adjust if your Boxes.py copy in not in the Python path\n",
"from boxes import *"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"image/svg+xml": [
"<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:cc=\"http://creativecommons.org/ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:inkscape=\"http://www.inkscape.org/namespaces/inkscape\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:svg=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" height=\"208.80mm\" viewBox=\"0.0 0.0 182.20 208.80\" width=\"182.20mm\">\n",
"<!--\n",
"Example - Example: Single Shelve to screw to the wall\n",
"\n",
"Created with Boxes.py (https://festi.info/boxes.py)\n",
"Creation date: 2024-02-11 17:45:59\n",
"Command line (remove spaces between dashes): boxes Example - -reference=0\n",
"-->\n",
"<title>Example</title>\n",
"<metadata>\n",
"<rdf:RDF><cc:Work>\n",
"<dc:title>Shelves - Example</dc:title>\n",
"<dc:date>2024-02-11 17:45:59</dc:date>\n",
"<dc:source>boxes Example --reference=0 --debug=0</dc:source>\n",
"<dc:description>Example: Single Shelve to screw to the wall\n",
"\n",
"Created with Boxes.py (https://festi.info/boxes.py)\n",
"Command line: boxes Example --reference=0 --debug=0\n",
"Command line short: boxes Example --reference=0\n",
"</dc:description>\n",
"</cc:Work></rdf:RDF></metadata>\n",
"<g id=\"p-0\" style=\"fill:none;stroke-linecap:round;stroke-linejoin:round;\">\n",
" <path d=\"M 13.100 198.800 H 163.100 H 166.100 C 166.155 198.800 166.200 198.755 166.200 198.700 V 185.700 C 166.200 185.645 166.155 185.600 166.100 185.600 H 163.200 C 163.100 185.600 163.200 185.700 163.200 185.600 V 176.800 C 163.200 176.700 163.100 176.800 163.200 176.800 H 166.100 C 166.155 176.800 166.200 176.755 166.200 176.700 V 170.700 C 166.200 170.645 166.155 170.600 166.100 170.600 H 163.200 C 163.100 170.600 163.200 170.700 163.200 170.600 V 161.800 C 163.200 161.700 163.100 161.800 163.200 161.800 H 166.100 C 166.155 161.800 166.200 161.755 166.200 161.700 V 148.700 V 142.700 C 166.200 142.645 166.155 142.600 166.100 142.600 H 163.100 H 13.100 H 10.100 C 10.045 142.600 10.000 142.645 10.000 142.700 V 148.700 V 161.700 C 10.000 161.755 10.045 161.800 10.100 161.800 H 13.000 C 13.100 161.800 13.000 161.700 13.000 161.800 V 170.600 C 13.000 170.700 13.100 170.600 13.000 170.600 H 10.100 C 10.045 170.600 10.000 170.645 10.000 170.700 V 176.700 C 10.000 176.755 10.045 176.800 10.100 176.800 H 13.000 C 13.100 176.800 13.000 176.700 13.000 176.800 V 185.600 C 13.000 185.700 13.100 185.600 13.000 185.600 H 10.100 C 10.045 185.600 10.000 185.645 10.000 185.700 V 198.700 C 10.000 198.755 10.045 198.800 10.100 198.800 H 13.100 Z\" stroke=\"rgb(0,0,0)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 151.700 158.700 C 151.700 158.432 151.777 158.169 151.922 157.943 C 152.067 157.717 152.274 157.538 152.518 157.427 C 152.763 157.315 153.034 157.276 153.299 157.314 C 153.565 157.352 153.814 157.466 154.017 157.642 C 154.220 157.818 154.368 158.048 154.443 158.306 C 154.519 158.563 154.519 158.837 154.443 159.094 C 154.368 159.352 154.220 159.582 154.017 159.758 C 153.814 159.934 153.565 160.048 153.299 160.086 C 153.034 160.124 152.763 160.085 152.518 159.973 C 152.274 159.862 152.067 159.683 151.922 159.457 C 151.777 159.231 151.700 158.968 151.700 158.700 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 148.100 145.800 H 152.500 C 152.600 145.800 152.500 145.700 152.500 145.800 V 148.600 C 152.500 148.700 152.600 148.600 152.500 148.600 H 143.700 C 143.600 148.600 143.700 148.700 143.700 148.600 V 145.800 C 143.700 145.700 143.600 145.800 143.700 145.800 H 148.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 133.100 145.800 H 137.500 C 137.600 145.800 137.500 145.700 137.500 145.800 V 148.600 C 137.500 148.700 137.600 148.600 137.500 148.600 H 128.700 C 128.600 148.600 128.700 148.700 128.700 148.600 V 145.800 C 128.700 145.700 128.600 145.800 128.700 145.800 H 133.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 118.100 145.800 H 122.500 C 122.600 145.800 122.500 145.700 122.500 145.800 V 148.600 C 122.500 148.700 122.600 148.600 122.500 148.600 H 113.700 C 113.600 148.600 113.700 148.700 113.700 148.600 V 145.800 C 113.700 145.700 113.600 145.800 113.700 145.800 H 118.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 103.100 145.800 H 107.500 C 107.600 145.800 107.500 145.700 107.500 145.800 V 148.600 C 107.500 148.700 107.600 148.600 107.500 148.600 H 98.700 C 98.600 148.600 98.700 148.700 98.700 148.600 V 145.800 C 98.700 145.700 98.600 145.800 98.700 145.800 H 103.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 88.100 145.800 H 92.500 C 92.600 145.800 92.500 145.700 92.500 145.800 V 148.600 C 92.500 148.700 92.600 148.600 92.500 148.600 H 83.700 C 83.600 148.600 83.700 148.700 83.700 148.600 V 145.800 C 83.700 145.700 83.600 145.800 83.700 145.800 H 88.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 73.100 145.800 H 77.500 C 77.600 145.800 77.500 145.700 77.500 145.800 V 148.600 C 77.500 148.700 77.600 148.600 77.500 148.600 H 68.700 C 68.600 148.600 68.700 148.700 68.700 148.600 V 145.800 C 68.700 145.700 68.600 145.800 68.700 145.800 H 73.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 58.100 145.800 H 62.500 C 62.600 145.800 62.500 145.700 62.500 145.800 V 148.600 C 62.500 148.700 62.600 148.600 62.500 148.600 H 53.700 C 53.600 148.600 53.700 148.700 53.700 148.600 V 145.800 C 53.700 145.700 53.600 145.800 53.700 145.800 H 58.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 43.100 145.800 H 47.500 C 47.600 145.800 47.500 145.700 47.500 145.800 V 148.600 C 47.500 148.700 47.600 148.600 47.500 148.600 H 38.700 C 38.600 148.600 38.700 148.700 38.700 148.600 V 145.800 C 38.700 145.700 38.600 145.800 38.700 145.800 H 43.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 28.100 145.800 H 32.500 C 32.600 145.800 32.500 145.700 32.500 145.800 V 148.600 C 32.500 148.700 32.600 148.600 32.500 148.600 H 23.700 C 23.600 148.600 23.700 148.700 23.700 148.600 V 145.800 C 23.700 145.700 23.600 145.800 23.700 145.800 H 28.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 23.100 160.100 C 22.832 160.100 22.569 160.023 22.343 159.878 C 22.117 159.733 21.938 159.526 21.827 159.282 C 21.715 159.037 21.676 158.766 21.714 158.501 C 21.752 158.235 21.866 157.986 22.042 157.783 C 22.218 157.580 22.448 157.432 22.706 157.357 C 22.963 157.281 23.237 157.281 23.494 157.357 C 23.752 157.432 23.982 157.580 24.158 157.783 C 24.334 157.986 24.448 158.235 24.486 158.501 C 24.524 158.766 24.485 159.037 24.373 159.282 C 24.262 159.526 24.083 159.733 23.857 159.878 C 23.631 160.023 23.368 160.100 23.100 160.100 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
"</g>\n",
"<g id=\"p-1\" style=\"fill:none;stroke-linecap:round;stroke-linejoin:round;\">\n",
" <path d=\"M 16.100 141.100 H 166.100 H 172.100 C 172.155 141.100 172.200 141.055 172.200 141.000 V 71.000 C 172.200 70.945 172.155 70.900 172.100 70.900 H 166.100 H 155.700 C 155.600 70.900 155.700 71.000 155.700 70.900 V 68.000 C 155.700 67.945 155.655 67.900 155.600 67.900 H 146.600 C 146.545 67.900 146.500 67.945 146.500 68.000 V 70.900 C 146.500 71.000 146.600 70.900 146.500 70.900 H 140.700 C 140.600 70.900 140.700 71.000 140.700 70.900 V 68.000 C 140.700 67.945 140.655 67.900 140.600 67.900 H 131.600 C 131.545 67.900 131.500 67.945 131.500 68.000 V 70.900 C 131.500 71.000 131.600 70.900 131.500 70.900 H 125.700 C 125.600 70.900 125.700 71.000 125.700 70.900 V 68.000 C 125.700 67.945 125.655 67.900 125.600 67.900 H 116.600 C 116.545 67.900 116.500 67.945 116.500 68.000 V 70.900 C 116.500 71.000 116.600 70.900 116.500 70.900 H 110.700 C 110.600 70.900 110.700 71.000 110.700 70.900 V 68.000 C 110.700 67.945 110.655 67.900 110.600 67.900 H 101.600 C 101.545 67.900 101.500 67.945 101.500 68.000 V 70.900 C 101.500 71.000 101.600 70.900 101.500 70.900 H 95.700 C 95.600 70.900 95.700 71.000 95.700 70.900 V 68.000 C 95.700 67.945 95.655 67.900 95.600 67.900 H 86.600 C 86.545 67.900 86.500 67.945 86.500 68.000 V 70.900 C 86.500 71.000 86.600 70.900 86.500 70.900 H 80.700 C 80.600 70.900 80.700 71.000 80.700 70.900 V 68.000 C 80.700 67.945 80.655 67.900 80.600 67.900 H 71.600 C 71.545 67.900 71.500 67.945 71.500 68.000 V 70.900 C 71.500 71.000 71.600 70.900 71.500 70.900 H 65.700 C 65.600 70.900 65.700 71.000 65.700 70.900 V 68.000 C 65.700 67.945 65.655 67.900 65.600 67.900 H 56.600 C 56.545 67.900 56.500 67.945 56.500 68.000 V 70.900 C 56.500 71.000 56.600 70.900 56.500 70.900 H 50.700 C 50.600 70.900 50.700 71.000 50.700 70.900 V 68.000 C 50.700 67.945 50.655 67.900 50.600 67.900 H 41.600 C 41.545 67.900 41.500 67.945 41.500 68.000 V 70.900 C 41.500 71.000 41.600 70.900 41.500 70.900 H 35.700 C 35.600 70.900 35.700 71.000 35.700 70.900 V 68.000 C 35.700 67.945 35.655 67.900 35.600 67.900 H 26.600 C 26.545 67.900 26.500 67.945 26.500 68.000 V 70.900 C 26.500 71.000 26.600 70.900 26.500 70.900 H 16.100 H 10.100 C 10.045 70.900 10.000 70.945 10.000 71.000 V 141.000 C 10.000 141.055 10.045 141.100 10.100 141.100 H 16.100 Z\" stroke=\"rgb(0,0,0)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 169.000 128.500 V 132.900 C 169.000 133.000 169.100 132.900 169.000 132.900 H 166.200 C 166.100 132.900 166.200 133.000 166.200 132.900 V 124.100 C 166.200 124.000 166.100 124.100 166.200 124.100 H 169.000 C 169.100 124.100 169.000 124.000 169.000 124.100 V 128.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 169.000 113.500 V 117.900 C 169.000 118.000 169.100 117.900 169.000 117.900 H 166.200 C 166.100 117.900 166.200 118.000 166.200 117.900 V 109.100 C 166.200 109.000 166.100 109.100 166.200 109.100 H 169.000 C 169.100 109.100 169.000 109.000 169.000 109.100 V 113.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 169.000 98.500 V 102.900 C 169.000 103.000 169.100 102.900 169.000 102.900 H 166.200 C 166.100 102.900 166.200 103.000 166.200 102.900 V 94.100 C 166.200 94.000 166.100 94.100 166.200 94.100 H 169.000 C 169.100 94.100 169.000 94.000 169.000 94.100 V 98.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 169.000 83.500 V 87.900 C 169.000 88.000 169.100 87.900 169.000 87.900 H 166.200 C 166.100 87.900 166.200 88.000 166.200 87.900 V 79.100 C 166.200 79.000 166.100 79.100 166.200 79.100 H 169.000 C 169.100 79.100 169.000 79.000 169.000 79.100 V 83.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 13.200 83.500 V 79.100 C 13.200 79.000 13.100 79.100 13.200 79.100 H 16.000 C 16.100 79.100 16.000 79.000 16.000 79.100 V 87.900 C 16.000 88.000 16.100 87.900 16.000 87.900 H 13.200 C 13.100 87.900 13.200 88.000 13.200 87.900 V 83.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 13.200 98.500 V 94.100 C 13.200 94.000 13.100 94.100 13.200 94.100 H 16.000 C 16.100 94.100 16.000 94.000 16.000 94.100 V 102.900 C 16.000 103.000 16.100 102.900 16.000 102.900 H 13.200 C 13.100 102.900 13.200 103.000 13.200 102.900 V 98.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 13.200 113.500 V 109.100 C 13.200 109.000 13.100 109.100 13.200 109.100 H 16.000 C 16.100 109.100 16.000 109.000 16.000 109.100 V 117.900 C 16.000 118.000 16.100 117.900 16.000 117.900 H 13.200 C 13.100 117.900 13.200 118.000 13.200 117.900 V 113.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 13.200 128.500 V 124.100 C 13.200 124.000 13.100 124.100 13.200 124.100 H 16.000 C 16.100 124.100 16.000 124.000 16.000 124.100 V 132.900 C 16.000 133.000 16.100 132.900 16.000 132.900 H 13.200 C 13.100 132.900 13.200 133.000 13.200 132.900 V 128.500 Z\" stroke=\"rgb(0,0,255)\" stroke-width=\"0.20\"/>\n",
"</g>\n",
"<g id=\"p-2\" style=\"fill:none;stroke-linecap:round;stroke-linejoin:round;\">\n",
" <path d=\"M 18.950 63.400 H 26.850 C 26.950 63.400 26.850 63.300 26.850 63.400 V 66.300 C 26.850 66.355 26.894 66.400 26.950 66.400 H 35.950 C 36.005 66.400 36.050 66.355 36.050 66.300 V 63.400 C 36.050 63.300 35.950 63.400 36.050 63.400 H 41.850 C 41.950 63.400 41.850 63.300 41.850 63.400 V 66.300 C 41.850 66.355 41.894 66.400 41.950 66.400 H 50.950 C 51.005 66.400 51.050 66.355 51.050 66.300 V 63.400 C 51.050 63.300 50.950 63.400 51.050 63.400 H 56.850 C 56.950 63.400 56.850 63.300 56.850 63.400 V 66.300 C 56.850 66.355 56.894 66.400 56.950 66.400 H 65.950 C 66.005 66.400 66.050 66.355 66.050 66.300 V 63.400 C 66.050 63.300 65.950 63.400 66.050 63.400 H 71.850 C 71.950 63.400 71.850 63.300 71.850 63.400 V 66.300 C 71.850 66.355 71.894 66.400 71.950 66.400 H 80.950 C 81.005 66.400 81.050 66.355 81.050 66.300 V 63.400 C 81.050 63.300 80.950 63.400 81.050 63.400 H 88.950 C 89.005 63.400 89.050 63.355 89.050 63.300 V 50.400 C 89.050 50.300 88.950 50.400 89.050 50.400 H 91.950 C 92.005 50.400 92.050 50.355 92.050 50.300 V 41.300 C 92.050 41.245 92.005 41.200 91.950 41.200 H 89.050 C 88.950 41.200 89.050 41.300 89.050 41.200 V 35.400 C 89.050 35.300 88.950 35.400 89.050 35.400 H 91.950 C 92.005 35.400 92.050 35.355 92.050 35.300 V 26.300 C 92.050 26.245 92.005 26.200 91.950 26.200 H 89.050 C 88.950 26.200 89.050 26.300 89.050 26.200 V 13.300 C 89.050 13.245 89.005 13.200 88.950 13.200 C 88.929 13.200 88.908 13.207 88.891 13.219 L 18.891 63.219 C 18.865 63.237 18.850 63.268 18.850 63.300 C 18.850 63.355 18.894 63.400 18.950 63.400 Z\" stroke=\"rgb(0,0,0)\" stroke-width=\"0.20\"/>\n",
" <path d=\"M 83.100 13.000 H 75.200 C 75.100 13.000 75.200 13.100 75.200 13.000 V 10.100 C 75.200 10.045 75.155 10.000 75.100 10.000 H 66.100 C 66.045 10.000 66.000 10.045 66.000 10.100 V 13.000 C 66.000 13.100 66.100 13.000 66.000 13.000 H 60.200 C 60.100 13.000 60.200 13.100 60.200 13.000 V 10.100 C 60.200 10.045 60.155 10.000 60.100 10.000 H 51.100 C 51.045 10.000 51.000 10.045 51.000 10.100 V 13.000 C 51.000 13.100 51.100 13.000 51.000 13.000 H 45.200 C 45.100 13.000 45.200 13.100 45.200 13.000 V 10.100 C 45.200 10.045 45.155 10.000 45.100 10.000 H 36.100 C 36.045 10.000 36.000 10.045 36.000 10.100 V 13.000 C 36.000 13.100 36.100 13.000 36.000 13.000 H 30.200 C 30.100 13.000 30.200 13.100 30.200 13.000 V 10.100 C 30.200 10.045 30.155 10.000 30.100 10.000 H 21.100 C 21.045 10.000 21.000 10.045 21.000 10.100 V 13.000 C 21.000 13.100 21.100 13.000 21.000 13.000 H 13.100 C 13.045 13.000 13.000 13.045 13.000 13.100 V 26.000 C 13.000 26.100 13.100 26.000 13.000 26.000 H 10.100 C 10.045 26.000 10.000 26.045 10.000 26.100 V 35.100 C 10.000 35.155 10.045 35.200 10.100 35.200 H 13.000 C 13.100 35.200 13.000 35.100 13.000 35.200 V 41.000 C 13.000 41.100 13.100 41.000 13.000 41.000 H 10.100 C 10.045 41.000 10.000 41.045 10.000 41.100 V 50.100 C 10.000 50.155 10.045 50.200 10.100 50.200 H 13.000 C 13.100 50.200 13.000 50.100 13.000 50.200 V 63.100 C 13.000 63.155 13.045 63.200 13.100 63.200 C 13.121 63.200 13.141 63.193 13.158 63.181 L 83.158 13.181 C 83.184 13.163 83.200 13.132 83.200 13.100 C 83.200 13.045 83.155 13.000 83.100 13.000 Z\" stroke=\"rgb(0,0,0)\" stroke-width=\"0.20\"/>\n",
"</g>\n",
"</svg>"
],
"text/plain": [
"<IPython.core.display.SVG object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"class Example(Boxes): # Adjust class name and call below\n",
" \"\"\"Example: Single Shelve to screw to the wall\"\"\"\n",
"\n",
" ui_group = \"Shelves\" # change for generators belonging in another group\n",
" \n",
" def __init__(self):\n",
" Boxes.__init__(self)\n",
" # arguments\n",
" self.addSettingsArgs(edges.FingerJointSettings, finger=3.0) # arguments for finger joints\n",
" self.buildArgParser(x=150, y=70, h=50)\n",
" self.argparser.add_argument(\n",
" \"--hole_dist\", action=\"store\", type=float, default=10.,\n",
" help=\"distance of the mounting holes to the boards\")\n",
" self.argparser.add_argument(\n",
" \"--hole_dia\", action=\"store\", type=float, default=3., # can't use \"hole\" as param name as it is a method\n",
" help=\"diameter of the mounting holes\")\n",
" \n",
" def render(self):\n",
" x, y, h = self.x, self.y, self.h\n",
" t = self.thickness\n",
" \n",
" # render-magic goes here\n",
" \n",
" hole = lambda: self.hole(self.hole_dist, self.hole_dist, d=self.hole_dia) # use lambda as a callback\n",
" # holes are placed relative to the inner rectangle of the back wall. The top part with the finger holes and \n",
" # the finges at the sides do not count.\n",
" # Callbacks start in the bottom left corner. Place holes in the third and forth corners only. \n",
" self.rectangularWall(x, h, \"eFhF\", move=\"up\", callback=[None, None, hole, hole]) # back board\n",
"\n",
" self.rectangularWall(x, y, \"ehfh\", move=\"up\") # top board\n",
" self.rectangularTriangle(y, h, \"ff\", num=2) # braces \n",
" \n",
"\n",
"b = Example()\n",
"b.parseArgs(['--reference=0', '--debug=0'])\n",
"b.open()\n",
"b.render()\n",
"data = b.close()\n",
"\n",
"display(SVG(data=data.getvalue()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 19,234 | Python | .py | 176 | 103.857955 | 2,318 | 0.645975 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,714 | stackablebin.py | florianfesti_boxes/boxes/generators/stackablebin.py | # Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class StackableBinEdge(edges.BaseEdge):
char = "B"
def __call__(self, length, **kw):
f = self.settings.front
a1 = math.degrees(math.atan(f/(1-f)))
a2 = 45 + a1
self.corner(-a1)
self.edges["e"](self.settings.h* (f**2+(1-f)**2)**0.5)
self.corner(a2)
self.edges["f"](self.settings.h*f*2**0.5)
self.corner(-45)
def margin(self) -> float:
return self.settings.h * self.settings.front
class StackableBinSideEdge(StackableBinEdge):
char = 'b'
class StackableBin(Boxes):
"""Stackable bin base on bintray"""
ui_group = "Shelf"
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.StackableSettings, bottom_stabilizers=2.4)
self.addSettingsArgs(edges.FingerJointSettings, surroundingspaces=0.5)
self.buildArgParser("outside")
self.buildArgParser(x=70, h=50)
self.argparser.add_argument(
"--d", action="store", type=float, default=100,
help="bin (d)epth")
self.argparser.add_argument(
"--front", action="store", type=float, default=0.4,
help="fraction of bin height covered with slope")
def render(self):
self.front = min(self.front, 0.999)
self.addPart(StackableBinEdge(self, self))
self.addPart(StackableBinSideEdge(self, self))
angledsettings = copy.deepcopy(self.edges["f"].settings)
angledsettings.setValues(self.thickness, True, angle=45)
angledsettings.edgeObjects(self, chars="gGH")
if self.outside:
self.x = self.adjustSize(self.x)
self.h = self.adjustSize(self.h, "s", "S")
self.d = self.adjustSize(self.d, "h", "b")
with self.saved_context():
self.rectangularWall(self.x, self.d, "ffGf", label="bottom", move="up")
self.rectangularWall(self.x, self.h, "hfef", label="back", move="up ")
self.rectangularWall(self.x, self.h*self.front*2**0.5, "gFeF", label="retainer", move="up")
self.rectangularWall(self.x, 3, "EEEE", label="for label (optional)")
self.rectangularWall(self.x, 3, "EEEE", label="movement", move="right only")
self.rectangularWall(self.d, self.h, "shSb", label="left", move="up")
self.rectangularWall(self.d, self.h, "shSb", label="right", move="mirror up")
| 3,112 | Python | .tac | 64 | 41.375 | 103 | 0.650925 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,715 | stachel.py | florianfesti_boxes/boxes/generators/stachel.py | # Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class Stachel(Boxes):
"""Bass Recorder Endpin"""
ui_group = "Misc"
def __init__(self) -> None:
Boxes.__init__(self)
self.argparser.add_argument(
"--flutediameter", action="store", type=float, default=115.0,
help="diameter of the flutes bottom in mm")
self.argparser.add_argument(
"--polediameter", action="store", type=float, default=25.,
help="diameter if the pin in mm")
self.argparser.add_argument(
"--wall", action="store", type=float, default=7.,
help="width of the surrounding wall in mm")
def layer(self, ri, ro, rp, holes=False, move=""):
r = 2.5 # radius
l = 25 # depth of clamp
w = 20 # width of clamp
wp = rp+8 # width pole
tw = 2*ro + 2*rp
th = 2*ro + l
if self.move(tw, th, move, True):
return
self.moveTo(ro, r, 90)
a1 = math.degrees(math.asin(w / ro))
a2 = math.degrees(math.asin(wp / ro))
l1 = ro*(1-math.cos(math.radians(a1)))
a3 = math.degrees(math.asin(1./rp))
self.polyline(ro-ri+l-r, 90, 0, (-355, ri), 0, 90, ro-ri+l-r, # inside
(90, r), w-2*r, (90, r))
if holes: # right side main clamp
poly1 = [(l+l1-2)/2-r, 90, w-2, -90, 2, -90, w-2, 90,
(l+l1-2)/2]
self.polyline(*poly1)
else:
self.polyline(l+l1-r)
self.polyline(0, -90+a1, 0 , (90-a1-a2, ro), 0, -90+a2)
if holes:
poly2 = [2*rp+15, 90, wp-2, -90, 2, -90, wp-2, 90, 10-2-r]
self.polyline(*poly2)
else:
self.polyline(25+2*rp-r)
self.polyline(0, (90, r), wp-1-r, 90, 20, 90-a3, 0, (-360+2*a3, rp), 0, 90-a3, 20, 90, wp-1-r, (90, r))
if holes:
self.polyline(*list(reversed(poly2)))
else:
self.polyline(25+2*rp-r)
self.polyline(0, -90+a2, 0, (270-a2-a1-5, ro), 0, (-90+a1))
if holes: # left sidemain clamp
self.polyline(*list(reversed(poly1)))
else:
self.polyline(l+l1-r)
self.polyline(0, (90, r), w-2*r, (90, r))
self.move(tw, th, move)
def render(self):
ri = self.flutediameter / 2.0
ro = ri + self.wall
rp = self.polediameter / 2.0
w = self.wall
self.layer(ri-20, ro, rp, move="up")
self.layer(ri, ro, rp, True, move="up")
self.layer(ri, ro, rp, move="up")
| 3,238 | Python | .tac | 77 | 33.441558 | 111 | 0.563156 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,716 | bottlestack.py | florianfesti_boxes/boxes/generators/bottlestack.py | # Copyright (C) 2013-2020 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
class BottleStack(Boxes):
"""Stack bottles in a fridge"""
description = """When rendered with the "double" option the parts with the double slots get connected the shorter beams in the asymmetrical slots.
Without the "double" option the stand is a bit more narrow.
"""
ui_group = "Misc"
def __init__(self) -> None:
Boxes.__init__(self)
self.argparser.add_argument(
"--diameter", action="store", type=float, default=80,
help="diameter of the bottles in mm")
self.argparser.add_argument(
"--number", action="store", type=int, default=3,
help="number of bottles to hold in the bottom row")
self.argparser.add_argument(
"--depth", action="store", type=float, default=80,
help="depth of the stand along the base of the bottles")
self.argparser.add_argument(
"--double", action="store", type=boolarg, default=True,
help="two pieces that can be combined to up to double the width")
def front(self, h_sides, offset=0, move=None):
t = self.thickness
a = 60
nr = self.number
r1 = self.diameter / 2.0 # bottle
r2 = r1 / math.cos(math.radians(90-a)) - r1 # in between
if self.double:
r3 = 1.5*t # upper corners
else:
r3 = .5*t
h = (r1+r2) * (1-math.cos(math.radians(a)))
h_extra = 1*t
h_s = h_sides - t
p = 0.05*t # play
tw, th = nr * r1 * 2 + 2*r3, h + 2*t
if self.move(tw, th, move, True):
return
open_sides = r3 <= 0.5*t
if offset == 0:
slot = [0, 90, h_s, -90, t, -90, h_s, 90]
if open_sides:
self.moveTo(0, h_s)
self.polyline(r3-0.5*t)
self.polyline(*slot[4:])
else:
self.polyline(r3-0.5*t)
self.polyline(*slot)
for i in range(nr-open_sides):
self.polyline(2*r1-t)
self.polyline(*slot)
if open_sides:
self.polyline(2*r1-t)
self.polyline(*slot[:-3])
self.polyline(r3-0.5*t)
else:
self.polyline(r3-0.5*t)
else:
slot = [0, 90, h_s, -90, t, -90, h_s, 90]
h_s += t
slot2 = [0, 90, h_s, -90, t+2*p, -90, h_s, 90]
if open_sides:
self.moveTo(0, h_s)
self.polyline(t+p, -90, h_s, 90)
else:
self.polyline(r3-0.5*t-p)
self.polyline(*slot2)
self.polyline(t-p)
self.polyline(*slot)
self.polyline(2*r1-5*t)
self.polyline(*slot)
self.polyline(t-p)
self.polyline(*slot2)
for i in range(1, nr-open_sides):
self.polyline(2*r1-3*t-p)
self.polyline(*slot)
self.polyline(t-p)
self.polyline(*slot2)
if open_sides:
self.polyline(2*r1-3*t-p)
self.polyline(*slot)
self.polyline(t-p)
self.polyline(0, 90, h_s, -90, t+p)
else:
self.polyline(r3-0.5*t-p)
if open_sides:
h_extra -= h_s
self.polyline(0, 90, h_extra+h-r3, (90, r3))
for i in range(nr):
self.polyline(0, (a, r2), 0, (-2*a, r1), 0, (a, r2))
self.polyline(0, (90, r3), h_extra+h-r3, 90)
self.move(tw, th, move)
def side(self, l, h, short=False, move=None):
t = self.thickness
short = bool(short)
tw, th = l + 2*t - 4*t*short, h
if self.move(tw, th, move, True):
return
self.moveTo(t, 0)
self.polyline(l-3*t*short)
if short:
end = [90, h-t, 90, t, -90, t, 90]
else:
end = [(90, t), h-2*t, (90, t), 0, 90, t, -90, t, -90, t, 90]
self.polyline(0, *end)
self.polyline(l-2*t- 3*t*short)
self.polyline(0, *reversed(end))
self.move(tw, th, move)
def render(self):
t = self.thickness
d = self.depth
nr = self.number
h_sides = 2*t
pieces = 2 if self.double else 1
for offset in range(pieces):
self.front(h_sides, offset, move="up")
self.front(h_sides, offset, move="up")
for short in range(pieces):
for i in range(nr+1):
self.side(d, h_sides, short, move="up")
| 5,283 | Python | .tac | 134 | 28.835821 | 150 | 0.530859 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,717 | BottleStack.svg | florianfesti_boxes/examples/BottleStack.svg | <?xml version='1.0' encoding='utf-8'?>
<svg height="214.98mm" viewBox="0.0 0.0 269.20 214.98" width="269.20mm" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!--
BottleStack - Stack bottles in a fridge
When rendered with the "double" option the parts with the double slots get connected the shorter beams in the asymmetrical slots.
Without the "double" option the stand is a bit more narrow.
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line (remove spaces between dashes): boxes BottleStack
-->
<title>BottleStack</title>
<metadata>
<rdf:RDF><cc:Work>
<dc:title>Misc - BottleStack</dc:title>
<dc:source>boxes BottleStack</dc:source>
<dc:description>Stack bottles in a fridge
When rendered with the "double" option the parts with the double slots get connected the shorter beams in the asymmetrical slots.
Without the "double" option the stand is a bit more narrow.
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line: boxes BottleStack
Command line short: boxes BottleStack
</dc:description>
</cc:Work></rdf:RDF></metadata>
<g id="p-0" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 204.976 H 110.100 V 194.976 H 10.100 V 204.976 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
<text dominant-baseline="hanging" font-size="6px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 60.100 196.976 )">100.0mm, burn:0.10mm</text>
</g>
<g id="p-1" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 193.276 H 13.100 C 13.155 193.276 13.200 193.231 13.200 193.176 V 190.276 C 13.200 190.176 13.100 190.276 13.200 190.276 H 16.000 C 16.100 190.276 16.000 190.176 16.000 190.276 V 193.176 C 16.000 193.231 16.045 193.276 16.100 193.276 H 93.100 C 93.155 193.276 93.200 193.231 93.200 193.176 V 190.276 C 93.200 190.176 93.100 190.276 93.200 190.276 H 96.000 C 96.100 190.276 96.000 190.176 96.000 190.276 V 193.176 C 96.000 193.231 96.045 193.276 96.100 193.276 H 173.100 C 173.155 193.276 173.200 193.231 173.200 193.176 V 190.276 C 173.200 190.176 173.100 190.276 173.200 190.276 H 176.000 C 176.100 190.276 176.000 190.176 176.000 190.276 V 193.176 C 176.000 193.231 176.045 193.276 176.100 193.276 H 253.100 C 253.155 193.276 253.200 193.231 253.200 193.176 V 190.276 C 253.200 190.176 253.100 190.276 253.200 190.276 H 256.000 C 256.100 190.276 256.000 190.176 256.000 190.276 V 193.176 C 256.000 193.231 256.045 193.276 256.100 193.276 H 259.100 C 259.155 193.276 259.200 193.231 259.200 193.176 V 171.582 C 259.200 170.775 258.987 169.981 258.584 169.282 C 258.180 168.583 257.599 168.002 256.900 167.598 C 256.201 167.195 255.407 166.982 254.600 166.982 C 253.496 166.982 252.412 167.273 251.456 167.824 C 250.500 168.376 249.706 169.170 249.154 170.126 C 245.652 176.192 240.616 181.229 234.550 184.730 C 228.484 188.232 221.604 190.076 214.600 190.076 C 207.596 190.076 200.716 188.232 194.650 184.730 C 188.584 181.229 183.548 176.192 180.046 170.126 C 179.494 169.170 178.700 168.376 177.744 167.824 C 176.788 167.273 175.704 166.982 174.600 166.982 C 173.496 166.982 172.412 167.273 171.456 167.824 C 170.500 168.376 169.706 169.170 169.154 170.126 C 165.652 176.192 160.616 181.229 154.550 184.730 C 148.484 188.232 141.604 190.076 134.600 190.076 C 127.596 190.076 120.716 188.232 114.650 184.730 C 108.584 181.229 103.548 176.192 100.046 170.126 C 99.494 169.170 98.700 168.376 97.744 167.824 C 96.788 167.273 95.704 166.982 94.600 166.982 C 93.496 166.982 92.412 167.273 91.456 167.824 C 90.500 168.376 89.706 169.170 89.154 170.126 C 85.652 176.192 80.616 181.229 74.550 184.730 C 68.484 188.232 61.604 190.076 54.600 190.076 C 47.596 190.076 40.716 188.232 34.650 184.730 C 28.584 181.229 23.548 176.192 20.046 170.126 C 19.494 169.170 18.700 168.376 17.744 167.824 C 16.788 167.273 15.704 166.982 14.600 166.982 C 13.793 166.982 12.999 167.195 12.300 167.598 C 11.601 168.002 11.020 168.583 10.616 169.282 C 10.213 169.981 10.000 170.775 10.000 171.582 V 193.176 C 10.000 193.231 10.045 193.276 10.100 193.276 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-2" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 162.482 H 13.100 C 13.155 162.482 13.200 162.437 13.200 162.382 V 159.482 C 13.200 159.382 13.100 159.482 13.200 159.482 H 16.000 C 16.100 159.482 16.000 159.382 16.000 159.482 V 162.382 C 16.000 162.437 16.045 162.482 16.100 162.482 H 93.100 C 93.155 162.482 93.200 162.437 93.200 162.382 V 159.482 C 93.200 159.382 93.100 159.482 93.200 159.482 H 96.000 C 96.100 159.482 96.000 159.382 96.000 159.482 V 162.382 C 96.000 162.437 96.045 162.482 96.100 162.482 H 173.100 C 173.155 162.482 173.200 162.437 173.200 162.382 V 159.482 C 173.200 159.382 173.100 159.482 173.200 159.482 H 176.000 C 176.100 159.482 176.000 159.382 176.000 159.482 V 162.382 C 176.000 162.437 176.045 162.482 176.100 162.482 H 253.100 C 253.155 162.482 253.200 162.437 253.200 162.382 V 159.482 C 253.200 159.382 253.100 159.482 253.200 159.482 H 256.000 C 256.100 159.482 256.000 159.382 256.000 159.482 V 162.382 C 256.000 162.437 256.045 162.482 256.100 162.482 H 259.100 C 259.155 162.482 259.200 162.437 259.200 162.382 V 140.788 C 259.200 139.981 258.987 139.187 258.584 138.488 C 258.180 137.789 257.599 137.208 256.900 136.804 C 256.201 136.401 255.407 136.188 254.600 136.188 C 253.496 136.188 252.412 136.479 251.456 137.030 C 250.500 137.582 249.706 138.376 249.154 139.332 C 245.652 145.398 240.616 150.434 234.550 153.936 C 228.484 157.438 221.604 159.282 214.600 159.282 C 207.596 159.282 200.716 157.438 194.650 153.936 C 188.584 150.434 183.548 145.398 180.046 139.332 C 179.494 138.376 178.700 137.582 177.744 137.030 C 176.788 136.479 175.704 136.188 174.600 136.188 C 173.496 136.188 172.412 136.479 171.456 137.030 C 170.500 137.582 169.706 138.376 169.154 139.332 C 165.652 145.398 160.616 150.434 154.550 153.936 C 148.484 157.438 141.604 159.282 134.600 159.282 C 127.596 159.282 120.716 157.438 114.650 153.936 C 108.584 150.434 103.548 145.398 100.046 139.332 C 99.494 138.376 98.700 137.582 97.744 137.030 C 96.788 136.479 95.704 136.188 94.600 136.188 C 93.496 136.188 92.412 136.479 91.456 137.030 C 90.500 137.582 89.706 138.376 89.154 139.332 C 85.652 145.398 80.616 150.434 74.550 153.936 C 68.484 157.438 61.604 159.282 54.600 159.282 C 47.596 159.282 40.716 157.438 34.650 153.936 C 28.584 150.434 23.548 145.398 20.046 139.332 C 19.494 138.376 18.700 137.582 17.744 137.030 C 16.788 136.479 15.704 136.188 14.600 136.188 C 13.793 136.188 12.999 136.401 12.300 136.804 C 11.601 137.208 11.020 137.789 10.616 138.488 C 10.213 139.187 10.000 139.981 10.000 140.788 V 162.382 C 10.000 162.437 10.045 162.482 10.100 162.482 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-3" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 131.688 H 12.950 C 13.005 131.688 13.050 131.643 13.050 131.588 V 125.688 C 13.050 125.588 12.950 125.688 13.050 125.688 H 16.150 C 16.250 125.688 16.150 125.588 16.150 125.688 V 131.588 C 16.150 131.643 16.195 131.688 16.250 131.688 H 19.100 C 19.155 131.688 19.200 131.643 19.200 131.588 V 128.688 C 19.200 128.588 19.100 128.688 19.200 128.688 H 22.000 C 22.100 128.688 22.000 128.588 22.000 128.688 V 131.588 C 22.000 131.643 22.045 131.688 22.100 131.688 H 87.100 C 87.155 131.688 87.200 131.643 87.200 131.588 V 128.688 C 87.200 128.588 87.100 128.688 87.200 128.688 H 90.000 C 90.100 128.688 90.000 128.588 90.000 128.688 V 131.588 C 90.000 131.643 90.045 131.688 90.100 131.688 H 92.950 C 93.005 131.688 93.050 131.643 93.050 131.588 V 125.688 C 93.050 125.588 92.950 125.688 93.050 125.688 H 96.150 C 96.250 125.688 96.150 125.588 96.150 125.688 V 131.588 C 96.150 131.643 96.195 131.688 96.250 131.688 H 167.100 C 167.155 131.688 167.200 131.643 167.200 131.588 V 128.688 C 167.200 128.588 167.100 128.688 167.200 128.688 H 170.000 C 170.100 128.688 170.000 128.588 170.000 128.688 V 131.588 C 170.000 131.643 170.045 131.688 170.100 131.688 H 172.950 C 173.005 131.688 173.050 131.643 173.050 131.588 V 125.688 C 173.050 125.588 172.950 125.688 173.050 125.688 H 176.150 C 176.250 125.688 176.150 125.588 176.150 125.688 V 131.588 C 176.150 131.643 176.195 131.688 176.250 131.688 H 247.100 C 247.155 131.688 247.200 131.643 247.200 131.588 V 128.688 C 247.200 128.588 247.100 128.688 247.200 128.688 H 250.000 C 250.100 128.688 250.000 128.588 250.000 128.688 V 131.588 C 250.000 131.643 250.045 131.688 250.100 131.688 H 252.950 C 253.005 131.688 253.050 131.643 253.050 131.588 V 125.688 C 253.050 125.588 252.950 125.688 253.050 125.688 H 256.150 C 256.250 125.688 256.150 125.588 256.150 125.688 V 131.588 C 256.150 131.643 256.195 131.688 256.250 131.688 H 259.100 C 259.155 131.688 259.200 131.643 259.200 131.588 V 109.994 C 259.200 109.187 258.987 108.393 258.584 107.694 C 258.180 106.995 257.599 106.414 256.900 106.010 C 256.201 105.607 255.407 105.394 254.600 105.394 C 253.496 105.394 252.412 105.685 251.456 106.236 C 250.500 106.788 249.706 107.582 249.154 108.538 C 245.652 114.604 240.616 119.640 234.550 123.142 C 228.484 126.644 221.604 128.488 214.600 128.488 C 207.596 128.488 200.716 126.644 194.650 123.142 C 188.584 119.640 183.548 114.604 180.046 108.538 C 179.494 107.582 178.700 106.788 177.744 106.236 C 176.788 105.685 175.704 105.394 174.600 105.394 C 173.496 105.394 172.412 105.685 171.456 106.236 C 170.500 106.788 169.706 107.582 169.154 108.538 C 165.652 114.604 160.616 119.640 154.550 123.142 C 148.484 126.644 141.604 128.488 134.600 128.488 C 127.596 128.488 120.716 126.644 114.650 123.142 C 108.584 119.640 103.548 114.604 100.046 108.538 C 99.494 107.582 98.700 106.788 97.744 106.236 C 96.788 105.685 95.704 105.394 94.600 105.394 C 93.496 105.394 92.412 105.685 91.456 106.236 C 90.500 106.788 89.706 107.582 89.154 108.538 C 85.652 114.604 80.616 119.640 74.550 123.142 C 68.484 126.644 61.604 128.488 54.600 128.488 C 47.596 128.488 40.716 126.644 34.650 123.142 C 28.584 119.640 23.548 114.604 20.046 108.538 C 19.494 107.582 18.700 106.788 17.744 106.236 C 16.788 105.685 15.704 105.394 14.600 105.394 C 13.793 105.394 12.999 105.607 12.300 106.010 C 11.601 106.414 11.020 106.995 10.616 107.694 C 10.213 108.393 10.000 109.187 10.000 109.994 V 131.588 C 10.000 131.643 10.045 131.688 10.100 131.688 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-4" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 100.894 H 12.950 C 13.005 100.894 13.050 100.849 13.050 100.794 V 94.894 C 13.050 94.794 12.950 94.894 13.050 94.894 H 16.150 C 16.250 94.894 16.150 94.794 16.150 94.894 V 100.794 C 16.150 100.849 16.195 100.894 16.250 100.894 H 19.100 C 19.155 100.894 19.200 100.849 19.200 100.794 V 97.894 C 19.200 97.794 19.100 97.894 19.200 97.894 H 22.000 C 22.100 97.894 22.000 97.794 22.000 97.894 V 100.794 C 22.000 100.849 22.045 100.894 22.100 100.894 H 87.100 C 87.155 100.894 87.200 100.849 87.200 100.794 V 97.894 C 87.200 97.794 87.100 97.894 87.200 97.894 H 90.000 C 90.100 97.894 90.000 97.794 90.000 97.894 V 100.794 C 90.000 100.849 90.045 100.894 90.100 100.894 H 92.950 C 93.005 100.894 93.050 100.849 93.050 100.794 V 94.894 C 93.050 94.794 92.950 94.894 93.050 94.894 H 96.150 C 96.250 94.894 96.150 94.794 96.150 94.894 V 100.794 C 96.150 100.849 96.195 100.894 96.250 100.894 H 167.100 C 167.155 100.894 167.200 100.849 167.200 100.794 V 97.894 C 167.200 97.794 167.100 97.894 167.200 97.894 H 170.000 C 170.100 97.894 170.000 97.794 170.000 97.894 V 100.794 C 170.000 100.849 170.045 100.894 170.100 100.894 H 172.950 C 173.005 100.894 173.050 100.849 173.050 100.794 V 94.894 C 173.050 94.794 172.950 94.894 173.050 94.894 H 176.150 C 176.250 94.894 176.150 94.794 176.150 94.894 V 100.794 C 176.150 100.849 176.195 100.894 176.250 100.894 H 247.100 C 247.155 100.894 247.200 100.849 247.200 100.794 V 97.894 C 247.200 97.794 247.100 97.894 247.200 97.894 H 250.000 C 250.100 97.894 250.000 97.794 250.000 97.894 V 100.794 C 250.000 100.849 250.045 100.894 250.100 100.894 H 252.950 C 253.005 100.894 253.050 100.849 253.050 100.794 V 94.894 C 253.050 94.794 252.950 94.894 253.050 94.894 H 256.150 C 256.250 94.894 256.150 94.794 256.150 94.894 V 100.794 C 256.150 100.849 256.195 100.894 256.250 100.894 H 259.100 C 259.155 100.894 259.200 100.849 259.200 100.794 V 79.200 C 259.200 78.393 258.987 77.599 258.584 76.900 C 258.180 76.201 257.599 75.620 256.900 75.216 C 256.201 74.813 255.407 74.600 254.600 74.600 C 253.496 74.600 252.412 74.891 251.456 75.442 C 250.500 75.994 249.706 76.788 249.154 77.744 C 245.652 83.810 240.616 88.846 234.550 92.348 C 228.484 95.850 221.604 97.694 214.600 97.694 C 207.596 97.694 200.716 95.850 194.650 92.348 C 188.584 88.846 183.548 83.810 180.046 77.744 C 179.494 76.788 178.700 75.994 177.744 75.442 C 176.788 74.891 175.704 74.600 174.600 74.600 C 173.496 74.600 172.412 74.891 171.456 75.442 C 170.500 75.994 169.706 76.788 169.154 77.744 C 165.652 83.810 160.616 88.846 154.550 92.348 C 148.484 95.850 141.604 97.694 134.600 97.694 C 127.596 97.694 120.716 95.850 114.650 92.348 C 108.584 88.846 103.548 83.810 100.046 77.744 C 99.494 76.788 98.700 75.994 97.744 75.442 C 96.788 74.891 95.704 74.600 94.600 74.600 C 93.496 74.600 92.412 74.891 91.456 75.442 C 90.500 75.994 89.706 76.788 89.154 77.744 C 85.652 83.810 80.616 88.846 74.550 92.348 C 68.484 95.850 61.604 97.694 54.600 97.694 C 47.596 97.694 40.716 95.850 34.650 92.348 C 28.584 88.846 23.548 83.810 20.046 77.744 C 19.494 76.788 18.700 75.994 17.744 75.442 C 16.788 74.891 15.704 74.600 14.600 74.600 C 13.793 74.600 12.999 74.813 12.300 75.216 C 11.601 75.620 11.020 76.201 10.616 76.900 C 10.213 77.599 10.000 78.393 10.000 79.200 V 100.794 C 10.000 100.849 10.045 100.894 10.100 100.894 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-5" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 70.100 H 93.100 C 93.644 70.100 94.179 69.957 94.650 69.685 C 95.121 69.413 95.513 69.021 95.785 68.550 C 96.057 68.079 96.200 67.544 96.200 67.000 C 96.200 66.456 96.057 65.921 95.785 65.450 C 95.513 64.979 95.121 64.587 94.650 64.315 C 94.179 64.043 93.644 63.900 93.100 63.900 C 93.045 63.900 93.000 63.945 93.000 64.000 V 66.900 C 93.000 67.000 93.100 66.900 93.000 66.900 H 90.200 C 90.100 66.900 90.200 67.000 90.200 66.900 V 64.000 C 90.200 63.945 90.155 63.900 90.100 63.900 H 16.100 C 16.045 63.900 16.000 63.945 16.000 64.000 V 66.900 C 16.000 67.000 16.100 66.900 16.000 66.900 H 13.200 C 13.100 66.900 13.200 67.000 13.200 66.900 V 64.000 C 13.200 63.945 13.155 63.900 13.100 63.900 C 12.556 63.900 12.021 64.043 11.550 64.315 C 11.079 64.587 10.687 64.979 10.415 65.450 C 10.143 65.921 10.000 66.456 10.000 67.000 C 10.000 67.544 10.143 68.079 10.415 68.550 C 10.687 69.021 11.079 69.413 11.550 69.685 C 12.021 69.957 12.556 70.100 13.100 70.100 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-6" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 62.400 H 93.100 C 93.644 62.400 94.179 62.257 94.650 61.985 C 95.121 61.713 95.513 61.321 95.785 60.850 C 96.057 60.379 96.200 59.844 96.200 59.300 C 96.200 58.756 96.057 58.221 95.785 57.750 C 95.513 57.279 95.121 56.887 94.650 56.615 C 94.179 56.343 93.644 56.200 93.100 56.200 C 93.045 56.200 93.000 56.245 93.000 56.300 V 59.200 C 93.000 59.300 93.100 59.200 93.000 59.200 H 90.200 C 90.100 59.200 90.200 59.300 90.200 59.200 V 56.300 C 90.200 56.245 90.155 56.200 90.100 56.200 H 16.100 C 16.045 56.200 16.000 56.245 16.000 56.300 V 59.200 C 16.000 59.300 16.100 59.200 16.000 59.200 H 13.200 C 13.100 59.200 13.200 59.300 13.200 59.200 V 56.300 C 13.200 56.245 13.155 56.200 13.100 56.200 C 12.556 56.200 12.021 56.343 11.550 56.615 C 11.079 56.887 10.687 57.279 10.415 57.750 C 10.143 58.221 10.000 58.756 10.000 59.300 C 10.000 59.844 10.143 60.379 10.415 60.850 C 10.687 61.321 11.079 61.713 11.550 61.985 C 12.021 62.257 12.556 62.400 13.100 62.400 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-7" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 54.700 H 93.100 C 93.644 54.700 94.179 54.557 94.650 54.285 C 95.121 54.013 95.513 53.621 95.785 53.150 C 96.057 52.679 96.200 52.144 96.200 51.600 C 96.200 51.056 96.057 50.521 95.785 50.050 C 95.513 49.579 95.121 49.187 94.650 48.915 C 94.179 48.643 93.644 48.500 93.100 48.500 C 93.045 48.500 93.000 48.545 93.000 48.600 V 51.500 C 93.000 51.600 93.100 51.500 93.000 51.500 H 90.200 C 90.100 51.500 90.200 51.600 90.200 51.500 V 48.600 C 90.200 48.545 90.155 48.500 90.100 48.500 H 16.100 C 16.045 48.500 16.000 48.545 16.000 48.600 V 51.500 C 16.000 51.600 16.100 51.500 16.000 51.500 H 13.200 C 13.100 51.500 13.200 51.600 13.200 51.500 V 48.600 C 13.200 48.545 13.155 48.500 13.100 48.500 C 12.556 48.500 12.021 48.643 11.550 48.915 C 11.079 49.187 10.687 49.579 10.415 50.050 C 10.143 50.521 10.000 51.056 10.000 51.600 C 10.000 52.144 10.143 52.679 10.415 53.150 C 10.687 53.621 11.079 54.013 11.550 54.285 C 12.021 54.557 12.556 54.700 13.100 54.700 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-8" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 47.000 H 93.100 C 93.644 47.000 94.179 46.857 94.650 46.585 C 95.121 46.313 95.513 45.921 95.785 45.450 C 96.057 44.979 96.200 44.444 96.200 43.900 C 96.200 43.356 96.057 42.821 95.785 42.350 C 95.513 41.879 95.121 41.487 94.650 41.215 C 94.179 40.943 93.644 40.800 93.100 40.800 C 93.045 40.800 93.000 40.845 93.000 40.900 V 43.800 C 93.000 43.900 93.100 43.800 93.000 43.800 H 90.200 C 90.100 43.800 90.200 43.900 90.200 43.800 V 40.900 C 90.200 40.845 90.155 40.800 90.100 40.800 H 16.100 C 16.045 40.800 16.000 40.845 16.000 40.900 V 43.800 C 16.000 43.900 16.100 43.800 16.000 43.800 H 13.200 C 13.100 43.800 13.200 43.900 13.200 43.800 V 40.900 C 13.200 40.845 13.155 40.800 13.100 40.800 C 12.556 40.800 12.021 40.943 11.550 41.215 C 11.079 41.487 10.687 41.879 10.415 42.350 C 10.143 42.821 10.000 43.356 10.000 43.900 C 10.000 44.444 10.143 44.979 10.415 45.450 C 10.687 45.921 11.079 46.313 11.550 46.585 C 12.021 46.857 12.556 47.000 13.100 47.000 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-9" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 39.300 H 84.100 C 84.155 39.300 84.200 39.255 84.200 39.200 V 36.200 C 84.200 36.145 84.155 36.100 84.100 36.100 H 81.200 C 81.100 36.100 81.200 36.200 81.200 36.100 V 33.200 C 81.200 33.145 81.155 33.100 81.100 33.100 H 16.100 C 16.045 33.100 16.000 33.145 16.000 33.200 V 36.100 C 16.000 36.200 16.100 36.100 16.000 36.100 H 13.100 C 13.045 36.100 13.000 36.145 13.000 36.200 V 39.200 C 13.000 39.255 13.045 39.300 13.100 39.300 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-10" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 31.600 H 84.100 C 84.155 31.600 84.200 31.555 84.200 31.500 V 28.500 C 84.200 28.445 84.155 28.400 84.100 28.400 H 81.200 C 81.100 28.400 81.200 28.500 81.200 28.400 V 25.500 C 81.200 25.445 81.155 25.400 81.100 25.400 H 16.100 C 16.045 25.400 16.000 25.445 16.000 25.500 V 28.400 C 16.000 28.500 16.100 28.400 16.000 28.400 H 13.100 C 13.045 28.400 13.000 28.445 13.000 28.500 V 31.500 C 13.000 31.555 13.045 31.600 13.100 31.600 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-11" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 23.900 H 84.100 C 84.155 23.900 84.200 23.855 84.200 23.800 V 20.800 C 84.200 20.745 84.155 20.700 84.100 20.700 H 81.200 C 81.100 20.700 81.200 20.800 81.200 20.700 V 17.800 C 81.200 17.745 81.155 17.700 81.100 17.700 H 16.100 C 16.045 17.700 16.000 17.745 16.000 17.800 V 20.700 C 16.000 20.800 16.100 20.700 16.000 20.700 H 13.100 C 13.045 20.700 13.000 20.745 13.000 20.800 V 23.800 C 13.000 23.855 13.045 23.900 13.100 23.900 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-12" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 13.100 16.200 H 84.100 C 84.155 16.200 84.200 16.155 84.200 16.100 V 13.100 C 84.200 13.045 84.155 13.000 84.100 13.000 H 81.200 C 81.100 13.000 81.200 13.100 81.200 13.000 V 10.100 C 81.200 10.045 81.155 10.000 81.100 10.000 H 16.100 C 16.045 10.000 16.000 10.045 16.000 10.100 V 13.000 C 16.000 13.100 16.100 13.000 16.000 13.000 H 13.100 C 13.045 13.000 13.000 13.045 13.000 13.100 V 16.100 C 13.000 16.155 13.045 16.200 13.100 16.200 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
</svg> | 20,957 | Python | .tac | 63 | 331.063492 | 3,525 | 0.715346 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,718 | StackableBin.svg | florianfesti_boxes/examples/StackableBin.svg | <?xml version='1.0' encoding='utf-8'?>
<svg height="178.41mm" viewBox="0.0 0.0 191.90 178.41" width="191.90mm" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!--
StackableBin - Stackable bin base on bintray
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line (remove spaces between dashes): boxes StackableBin
-->
<title>StackableBin</title>
<metadata>
<rdf:RDF><cc:Work>
<dc:title>Shelf - StackableBin</dc:title>
<dc:source>boxes StackableBin</dc:source>
<dc:description>Stackable bin base on bintray
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line: boxes StackableBin
Command line short: boxes StackableBin
</dc:description>
</cc:Work></rdf:RDF></metadata>
<g id="p-0" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.100 168.413 H 110.100 V 158.413 H 10.100 V 168.413 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
<text dominant-baseline="hanging" font-size="6px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 60.100 160.413 )">100.0mm, burn:0.10mm</text>
</g>
<g id="p-1" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 45.100 110.211 )">bottom</text><path d="M 13.100 153.713 H 18.000 C 18.100 153.713 18.000 153.613 18.000 153.713 V 156.613 C 18.000 156.668 18.045 156.713 18.100 156.713 H 24.100 C 24.155 156.713 24.200 156.668 24.200 156.613 V 153.713 C 24.200 153.613 24.100 153.713 24.200 153.713 H 30.000 C 30.100 153.713 30.000 153.613 30.000 153.713 V 156.613 C 30.000 156.668 30.045 156.713 30.100 156.713 H 36.100 C 36.155 156.713 36.200 156.668 36.200 156.613 V 153.713 C 36.200 153.613 36.100 153.713 36.200 153.713 H 42.000 C 42.100 153.713 42.000 153.613 42.000 153.713 V 156.613 C 42.000 156.668 42.045 156.713 42.100 156.713 H 48.100 C 48.155 156.713 48.200 156.668 48.200 156.613 V 153.713 C 48.200 153.613 48.100 153.713 48.200 153.713 H 54.000 C 54.100 153.713 54.000 153.613 54.000 153.713 V 156.613 C 54.000 156.668 54.045 156.713 54.100 156.713 H 60.100 C 60.155 156.713 60.200 156.668 60.200 156.613 V 153.713 C 60.200 153.613 60.100 153.713 60.200 153.713 H 66.000 C 66.100 153.713 66.000 153.613 66.000 153.713 V 156.613 C 66.000 156.668 66.045 156.713 66.100 156.713 H 72.100 C 72.155 156.713 72.200 156.668 72.200 156.613 V 153.713 C 72.200 153.613 72.100 153.713 72.200 153.713 H 77.100 C 77.155 153.713 77.200 153.668 77.200 153.613 V 150.333 C 77.200 150.233 77.100 150.333 77.200 150.333 H 80.100 C 80.155 150.333 80.200 150.288 80.200 150.233 V 144.233 C 80.200 144.177 80.155 144.133 80.100 144.133 H 77.200 C 77.100 144.133 77.200 144.233 77.200 144.133 V 138.333 C 77.200 138.233 77.100 138.333 77.200 138.333 H 80.100 C 80.155 138.333 80.200 138.288 80.200 138.233 V 132.233 C 80.200 132.177 80.155 132.133 80.100 132.133 H 77.200 C 77.100 132.133 77.200 132.233 77.200 132.133 V 126.333 C 77.200 126.233 77.100 126.333 77.200 126.333 H 80.100 C 80.155 126.333 80.200 126.288 80.200 126.233 V 120.233 C 80.200 120.177 80.155 120.133 80.100 120.133 H 77.200 C 77.100 120.133 77.200 120.233 77.200 120.133 V 114.333 C 77.200 114.233 77.100 114.333 77.200 114.333 H 80.100 C 80.155 114.333 80.200 114.288 80.200 114.233 V 108.233 C 80.200 108.177 80.155 108.133 80.100 108.133 H 77.200 C 77.100 108.133 77.200 108.233 77.200 108.133 V 102.333 C 77.200 102.233 77.100 102.333 77.200 102.333 H 80.100 C 80.155 102.333 80.200 102.288 80.200 102.233 V 96.233 C 80.200 96.177 80.155 96.133 80.100 96.133 H 77.200 C 77.100 96.133 77.200 96.233 77.200 96.133 V 90.333 C 77.200 90.233 77.100 90.333 77.200 90.333 H 80.100 C 80.155 90.333 80.200 90.288 80.200 90.233 V 84.233 C 80.200 84.177 80.155 84.133 80.100 84.133 H 77.200 C 77.100 84.133 77.200 84.233 77.200 84.133 V 78.333 C 77.200 78.233 77.100 78.333 77.200 78.333 H 80.100 C 80.155 78.333 80.200 78.288 80.200 78.233 V 72.233 C 80.200 72.177 80.155 72.133 80.100 72.133 H 77.200 C 77.100 72.133 77.200 72.233 77.200 72.133 V 68.853 V 67.610 C 77.200 67.555 77.155 67.510 77.100 67.510 H 72.100 C 72.045 67.510 72.000 67.555 72.000 67.610 V 69.631 C 72.000 69.731 72.100 69.631 72.000 69.631 H 66.200 C 66.100 69.631 66.200 69.731 66.200 69.631 V 67.610 C 66.200 67.555 66.155 67.510 66.100 67.510 H 60.100 C 60.045 67.510 60.000 67.555 60.000 67.610 V 69.631 C 60.000 69.731 60.100 69.631 60.000 69.631 H 54.200 C 54.100 69.631 54.200 69.731 54.200 69.631 V 67.610 C 54.200 67.555 54.155 67.510 54.100 67.510 H 48.100 C 48.045 67.510 48.000 67.555 48.000 67.610 V 69.631 C 48.000 69.731 48.100 69.631 48.000 69.631 H 42.200 C 42.100 69.631 42.200 69.731 42.200 69.631 V 67.610 C 42.200 67.555 42.155 67.510 42.100 67.510 H 36.100 C 36.045 67.510 36.000 67.555 36.000 67.610 V 69.631 C 36.000 69.731 36.100 69.631 36.000 69.631 H 30.200 C 30.100 69.631 30.200 69.731 30.200 69.631 V 67.610 C 30.200 67.555 30.155 67.510 30.100 67.510 H 24.100 C 24.045 67.510 24.000 67.555 24.000 67.610 V 69.631 C 24.000 69.731 24.100 69.631 24.000 69.631 H 18.200 C 18.100 69.631 18.200 69.731 18.200 69.631 V 67.610 C 18.200 67.555 18.155 67.510 18.100 67.510 H 13.100 C 13.045 67.510 13.000 67.555 13.000 67.610 V 68.853 V 72.133 C 13.000 72.233 13.100 72.133 13.000 72.133 H 10.100 C 10.045 72.133 10.000 72.177 10.000 72.233 V 78.233 C 10.000 78.288 10.045 78.333 10.100 78.333 H 13.000 C 13.100 78.333 13.000 78.233 13.000 78.333 V 84.133 C 13.000 84.233 13.100 84.133 13.000 84.133 H 10.100 C 10.045 84.133 10.000 84.177 10.000 84.233 V 90.233 C 10.000 90.288 10.045 90.333 10.100 90.333 H 13.000 C 13.100 90.333 13.000 90.233 13.000 90.333 V 96.133 C 13.000 96.233 13.100 96.133 13.000 96.133 H 10.100 C 10.045 96.133 10.000 96.177 10.000 96.233 V 102.233 C 10.000 102.288 10.045 102.333 10.100 102.333 H 13.000 C 13.100 102.333 13.000 102.233 13.000 102.333 V 108.133 C 13.000 108.233 13.100 108.133 13.000 108.133 H 10.100 C 10.045 108.133 10.000 108.177 10.000 108.233 V 114.233 C 10.000 114.288 10.045 114.333 10.100 114.333 H 13.000 C 13.100 114.333 13.000 114.233 13.000 114.333 V 120.133 C 13.000 120.233 13.100 120.133 13.000 120.133 H 10.100 C 10.045 120.133 10.000 120.177 10.000 120.233 V 126.233 C 10.000 126.288 10.045 126.333 10.100 126.333 H 13.000 C 13.100 126.333 13.000 126.233 13.000 126.333 V 132.133 C 13.000 132.233 13.100 132.133 13.000 132.133 H 10.100 C 10.045 132.133 10.000 132.177 10.000 132.233 V 138.233 C 10.000 138.288 10.045 138.333 10.100 138.333 H 13.000 C 13.100 138.333 13.000 138.233 13.000 138.333 V 144.133 C 13.000 144.233 13.100 144.133 13.000 144.133 H 10.100 C 10.045 144.133 10.000 144.177 10.000 144.233 V 150.233 C 10.000 150.288 10.045 150.333 10.100 150.333 H 13.000 C 13.100 150.333 13.000 150.233 13.000 150.333 V 153.613 C 13.000 153.668 13.045 153.713 13.100 153.713" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-2" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 21.100 62.810 H 18.200 C 18.100 62.810 18.200 62.910 18.200 62.810 V 60.010 C 18.200 59.910 18.100 60.010 18.200 60.010 H 24.000 C 24.100 60.010 24.000 59.910 24.000 60.010 V 62.810 C 24.000 62.910 24.100 62.810 24.000 62.810 H 21.100 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 33.100 62.810 H 30.200 C 30.100 62.810 30.200 62.910 30.200 62.810 V 60.010 C 30.200 59.910 30.100 60.010 30.200 60.010 H 36.000 C 36.100 60.010 36.000 59.910 36.000 60.010 V 62.810 C 36.000 62.910 36.100 62.810 36.000 62.810 H 33.100 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 45.100 62.810 H 42.200 C 42.100 62.810 42.200 62.910 42.200 62.810 V 60.010 C 42.200 59.910 42.100 60.010 42.200 60.010 H 48.000 C 48.100 60.010 48.000 59.910 48.000 60.010 V 62.810 C 48.000 62.910 48.100 62.810 48.000 62.810 H 45.100 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 57.100 62.810 H 54.200 C 54.100 62.810 54.200 62.910 54.200 62.810 V 60.010 C 54.200 59.910 54.100 60.010 54.200 60.010 H 60.000 C 60.100 60.010 60.000 59.910 60.000 60.010 V 62.810 C 60.000 62.910 60.100 62.810 60.000 62.810 H 57.100 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 69.100 62.810 H 66.200 C 66.100 62.810 66.200 62.910 66.200 62.810 V 60.010 C 66.200 59.910 66.100 60.010 66.200 60.010 H 72.000 C 72.100 60.010 72.000 59.910 72.000 60.010 V 62.810 C 72.000 62.910 72.100 62.810 72.000 62.810 H 69.100 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 45.100 49.460 )">back</text><path d="M 13.100 66.010 H 77.100 C 77.155 66.010 77.200 65.965 77.200 65.910 V 59.910 V 57.460 C 77.200 57.360 77.100 57.460 77.200 57.460 H 80.100 C 80.155 57.460 80.200 57.415 80.200 57.360 V 51.360 C 80.200 51.305 80.155 51.260 80.100 51.260 H 77.200 C 77.100 51.260 77.200 51.360 77.200 51.260 V 45.460 C 77.200 45.360 77.100 45.460 77.200 45.460 H 80.100 C 80.155 45.460 80.200 45.415 80.200 45.360 V 39.360 C 80.200 39.305 80.155 39.260 80.100 39.260 H 77.200 C 77.100 39.260 77.200 39.360 77.200 39.260 V 36.810 C 77.200 36.755 77.155 36.710 77.100 36.710 H 13.100 C 13.045 36.710 13.000 36.755 13.000 36.810 V 39.260 C 13.000 39.360 13.100 39.260 13.000 39.260 H 10.100 C 10.045 39.260 10.000 39.305 10.000 39.360 V 45.360 C 10.000 45.415 10.045 45.460 10.100 45.460 H 13.000 C 13.100 45.460 13.000 45.360 13.000 45.460 V 51.260 C 13.000 51.360 13.100 51.260 13.000 51.260 H 10.100 C 10.045 51.260 10.000 51.305 10.000 51.360 V 57.360 C 10.000 57.415 10.045 57.460 10.100 57.460 H 13.000 C 13.100 57.460 13.000 57.360 13.000 57.460 V 59.910 V 65.910 C 13.000 65.965 13.045 66.010 13.100 66.010" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-3" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 45.100 26.055 )">retainer</text><path d="M 13.100 33.089 H 18.000 C 18.100 33.089 18.000 32.989 18.000 33.089 V 35.110 C 18.000 35.165 18.045 35.210 18.100 35.210 H 24.100 C 24.155 35.210 24.200 35.165 24.200 35.110 V 33.089 C 24.200 32.989 24.100 33.089 24.200 33.089 H 30.000 C 30.100 33.089 30.000 32.989 30.000 33.089 V 35.110 C 30.000 35.165 30.045 35.210 30.100 35.210 H 36.100 C 36.155 35.210 36.200 35.165 36.200 35.110 V 33.089 C 36.200 32.989 36.100 33.089 36.200 33.089 H 42.000 C 42.100 33.089 42.000 32.989 42.000 33.089 V 35.110 C 42.000 35.165 42.045 35.210 42.100 35.210 H 48.100 C 48.155 35.210 48.200 35.165 48.200 35.110 V 33.089 C 48.200 32.989 48.100 33.089 48.200 33.089 H 54.000 C 54.100 33.089 54.000 32.989 54.000 33.089 V 35.110 C 54.000 35.165 54.045 35.210 54.100 35.210 H 60.100 C 60.155 35.210 60.200 35.165 60.200 35.110 V 33.089 C 60.200 32.989 60.100 33.089 60.200 33.089 H 66.000 C 66.100 33.089 66.000 32.989 66.000 33.089 V 35.110 C 66.000 35.165 66.045 35.210 66.100 35.210 H 72.100 C 72.155 35.210 72.200 35.165 72.200 35.110 V 33.089 C 72.200 32.989 72.100 33.089 72.200 33.089 H 77.100 H 80.100 C 80.155 33.089 80.200 33.044 80.200 32.989 V 33.867 V 30.334 C 80.200 30.278 80.155 30.234 80.100 30.234 H 77.200 C 77.100 30.234 77.200 30.334 77.200 30.234 V 24.434 C 77.200 24.334 77.100 24.434 77.200 24.434 H 80.100 C 80.155 24.434 80.200 24.389 80.200 24.334 V 20.800 C 80.200 20.745 80.155 20.700 80.100 20.700 H 77.100 H 13.100 H 10.100 C 10.045 20.700 10.000 20.745 10.000 20.800 V 24.334 C 10.000 24.389 10.045 24.434 10.100 24.434 H 13.000 C 13.100 24.434 13.000 24.334 13.000 24.434 V 30.234 C 13.000 30.334 13.100 30.234 13.000 30.234 H 10.100 C 10.045 30.234 10.000 30.278 10.000 30.334 V 33.867 V 32.989 C 10.000 33.044 10.045 33.089 10.100 33.089 H 13.100" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-4" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 45.100 12.700 )">for label (optional)</text><path d="M 13.100 19.200 H 77.100 H 80.100 C 80.155 19.200 80.200 19.155 80.200 19.100 V 16.100 V 13.100 V 10.100 C 80.200 10.045 80.155 10.000 80.100 10.000 H 77.100 H 13.100 H 10.100 C 10.045 10.000 10.000 10.045 10.000 10.100 V 13.100 V 16.100 V 19.100 C 10.000 19.155 10.045 19.200 10.100 19.200 H 13.100" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-5" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 97.420 138.713 H 94.520 C 94.420 138.713 94.520 138.813 94.520 138.713 V 135.913 C 94.520 135.813 94.420 135.913 94.520 135.913 H 100.320 C 100.420 135.913 100.320 135.813 100.320 135.913 V 138.713 C 100.320 138.813 100.420 138.713 100.320 138.713 H 97.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 109.420 138.713 H 106.520 C 106.420 138.713 106.520 138.813 106.520 138.713 V 135.913 C 106.520 135.813 106.420 135.913 106.520 135.913 H 112.320 C 112.420 135.913 112.320 135.813 112.320 135.913 V 138.713 C 112.320 138.813 112.420 138.713 112.320 138.713 H 109.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 121.420 138.713 H 118.520 C 118.420 138.713 118.520 138.813 118.520 138.713 V 135.913 C 118.520 135.813 118.420 135.913 118.520 135.913 H 124.320 C 124.420 135.913 124.320 135.813 124.320 135.913 V 138.713 C 124.320 138.813 124.420 138.713 124.320 138.713 H 121.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 133.420 138.713 H 130.520 C 130.420 138.713 130.520 138.813 130.520 138.713 V 135.913 C 130.520 135.813 130.420 135.913 130.520 135.913 H 136.320 C 136.420 135.913 136.320 135.813 136.320 135.913 V 138.713 C 136.320 138.813 136.420 138.713 136.320 138.713 H 133.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 145.420 138.713 H 142.520 C 142.420 138.713 142.520 138.813 142.520 138.713 V 135.913 C 142.520 135.813 142.420 135.913 142.520 135.913 H 148.320 C 148.420 135.913 148.320 135.813 148.320 135.913 V 138.713 C 148.320 138.813 148.420 138.713 148.320 138.713 H 145.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 157.420 138.713 H 154.520 C 154.420 138.713 154.520 138.813 154.520 138.713 V 135.913 C 154.520 135.813 154.420 135.913 154.520 135.913 H 160.320 C 160.420 135.913 160.320 135.813 160.320 135.913 V 138.713 C 160.320 138.813 160.420 138.713 160.320 138.713 H 157.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 169.420 138.713 H 166.520 C 166.420 138.713 166.520 138.813 166.520 138.713 V 135.913 C 166.520 135.813 166.420 135.913 166.520 135.913 H 172.320 C 172.420 135.913 172.320 135.813 172.320 135.913 V 138.713 C 172.320 138.813 172.420 138.713 172.320 138.713 H 169.420 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
</g>
<g id="p-6" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 91.040 155.863 H 172.650 C 172.705 155.863 172.750 155.818 172.750 155.763 V 148.563 C 172.750 148.507 172.705 148.463 172.650 148.463 H 91.040 C 90.985 148.463 90.940 148.507 90.940 148.563 V 155.763 C 90.940 155.818 90.985 155.863 91.040 155.863 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-7" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 131.800 129.713 )">left</text><path d="M 91.040 147.813 H 103.040 C 104.111 147.813 105.163 147.531 106.090 146.995 C 107.017 146.460 107.787 145.690 108.323 144.763 C 108.841 143.866 109.585 143.121 110.482 142.603 C 111.379 142.085 112.397 141.813 113.432 141.813 H 153.408 C 154.443 141.813 155.461 142.085 156.358 142.603 C 157.255 143.121 157.999 143.866 158.517 144.763 C 159.053 145.690 159.823 146.460 160.750 146.995 C 161.677 147.531 162.729 147.813 163.800 147.813 H 175.800 H 181.800 C 181.855 147.813 181.900 147.768 181.900 147.713 V 135.713 V 112.613 C 181.900 112.557 181.855 112.513 181.800 112.513 H 175.800 H 163.800 C 162.764 112.513 161.747 112.240 160.850 111.722 C 159.953 111.204 159.208 110.460 158.690 109.563 C 158.155 108.635 157.385 107.865 156.458 107.330 C 155.530 106.794 154.478 106.513 153.408 106.513 H 113.432 C 112.362 106.513 111.310 106.794 110.382 107.330 C 109.455 107.865 108.685 108.635 108.150 109.563 C 107.632 110.460 106.887 111.204 105.990 111.722 C 105.093 112.240 104.076 112.513 103.040 112.513 H 91.040 C 90.985 112.513 90.940 112.557 90.940 112.613 C 90.940 112.593 90.946 112.574 90.957 112.557 L 81.717 126.417 C 81.690 126.457 81.696 126.510 81.729 126.543 L 84.157 128.971 C 84.228 129.042 84.228 128.901 84.157 128.971 L 82.107 131.022 C 82.068 131.061 82.068 131.124 82.107 131.163 L 86.349 135.406 C 86.388 135.445 86.452 135.445 86.491 135.406 L 88.541 133.355 C 88.612 133.285 88.471 133.285 88.541 133.355 L 90.940 135.754 C 90.969 135.783 90.940 135.713 90.940 135.754 V 147.713 C 90.940 147.768 90.985 147.813 91.040 147.813" stroke="rgb(0,0,0)" stroke-width="0.20" />
<path d="M 178.700 130.163 V 133.063 C 178.700 133.163 178.800 133.063 178.700 133.063 H 175.900 C 175.800 133.063 175.900 133.163 175.900 133.063 V 127.263 C 175.900 127.163 175.800 127.263 175.900 127.263 H 178.700 C 178.800 127.263 178.700 127.163 178.700 127.263 V 130.163 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 178.700 118.163 V 121.063 C 178.700 121.163 178.800 121.063 178.700 121.063 H 175.900 C 175.800 121.063 175.900 121.163 175.900 121.063 V 115.263 C 175.900 115.163 175.800 115.263 175.900 115.263 H 178.700 C 178.800 115.263 178.700 115.163 178.700 115.263 V 118.163 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
</g>
<g id="p-8" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 166.180 87.013 H 169.080 C 169.180 87.013 169.080 87.113 169.080 87.013 V 84.213 C 169.080 84.113 169.180 84.213 169.080 84.213 H 163.280 C 163.180 84.213 163.280 84.113 163.280 84.213 V 87.013 C 163.280 87.113 163.180 87.013 163.280 87.013 H 166.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 154.180 87.013 H 157.080 C 157.180 87.013 157.080 87.113 157.080 87.013 V 84.213 C 157.080 84.113 157.180 84.213 157.080 84.213 H 151.280 C 151.180 84.213 151.280 84.113 151.280 84.213 V 87.013 C 151.280 87.113 151.180 87.013 151.280 87.013 H 154.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 142.180 87.013 H 145.080 C 145.180 87.013 145.080 87.113 145.080 87.013 V 84.213 C 145.080 84.113 145.180 84.213 145.080 84.213 H 139.280 C 139.180 84.213 139.280 84.113 139.280 84.213 V 87.013 C 139.280 87.113 139.180 87.013 139.280 87.013 H 142.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 130.180 87.013 H 133.080 C 133.180 87.013 133.080 87.113 133.080 87.013 V 84.213 C 133.080 84.113 133.180 84.213 133.080 84.213 H 127.280 C 127.180 84.213 127.280 84.113 127.280 84.213 V 87.013 C 127.280 87.113 127.180 87.013 127.280 87.013 H 130.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 118.180 87.013 H 121.080 C 121.180 87.013 121.080 87.113 121.080 87.013 V 84.213 C 121.080 84.113 121.180 84.213 121.080 84.213 H 115.280 C 115.180 84.213 115.280 84.113 115.280 84.213 V 87.013 C 115.280 87.113 115.180 87.013 115.280 87.013 H 118.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 106.180 87.013 H 109.080 C 109.180 87.013 109.080 87.113 109.080 87.013 V 84.213 C 109.080 84.113 109.180 84.213 109.080 84.213 H 103.280 C 103.180 84.213 103.280 84.113 103.280 84.213 V 87.013 C 103.280 87.113 103.180 87.013 103.280 87.013 H 106.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 94.180 87.013 H 97.080 C 97.180 87.013 97.080 87.113 97.080 87.013 V 84.213 C 97.080 84.113 97.180 84.213 97.080 84.213 H 91.280 C 91.180 84.213 91.280 84.113 91.280 84.213 V 87.013 C 91.280 87.113 91.180 87.013 91.280 87.013 H 94.180 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
</g>
<g id="p-9" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 172.560 104.163 H 90.950 C 90.895 104.163 90.850 104.118 90.850 104.063 V 96.863 C 90.850 96.807 90.895 96.763 90.950 96.763 H 172.560 C 172.615 96.763 172.660 96.807 172.660 96.863 V 104.063 C 172.660 104.118 172.615 104.163 172.560 104.163 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-10" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<text dominant-baseline="hanging" font-size="4px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 131.800 78.013 )">right</text><path d="M 172.560 96.113 H 160.560 C 159.489 96.113 158.437 95.831 157.510 95.295 C 156.583 94.760 155.813 93.990 155.277 93.063 C 154.759 92.166 154.015 91.421 153.118 90.903 C 152.221 90.385 151.203 90.113 150.168 90.113 H 110.192 C 109.157 90.113 108.139 90.385 107.242 90.903 C 106.345 91.421 105.601 92.166 105.083 93.063 C 104.547 93.990 103.777 94.760 102.850 95.295 C 101.923 95.831 100.871 96.113 99.800 96.113 H 87.800 H 81.800 C 81.745 96.113 81.700 96.068 81.700 96.013 V 84.013 V 60.913 C 81.700 60.857 81.745 60.813 81.800 60.813 H 87.800 H 99.800 C 100.836 60.813 101.853 60.540 102.750 60.022 C 103.647 59.504 104.392 58.760 104.910 57.863 C 105.445 56.935 106.215 56.165 107.142 55.630 C 108.070 55.094 109.122 54.813 110.192 54.813 H 150.168 C 151.238 54.813 152.290 55.094 153.218 55.630 C 154.145 56.165 154.915 56.935 155.450 57.863 C 155.968 58.760 156.713 59.504 157.610 60.022 C 158.507 60.540 159.524 60.813 160.560 60.813 H 172.560 C 172.615 60.813 172.660 60.857 172.660 60.913 C 172.660 60.893 172.654 60.874 172.643 60.857 L 181.883 74.717 C 181.910 74.757 181.904 74.810 181.871 74.843 L 179.443 77.271 C 179.372 77.342 179.372 77.201 179.443 77.271 L 181.493 79.322 C 181.532 79.361 181.532 79.424 181.493 79.463 L 177.251 83.706 C 177.212 83.745 177.148 83.745 177.109 83.706 L 175.059 81.655 C 174.988 81.585 175.129 81.585 175.059 81.655 L 172.660 84.054 C 172.631 84.083 172.660 84.013 172.660 84.054 V 96.013 C 172.660 96.068 172.615 96.113 172.560 96.113" stroke="rgb(0,0,0)" stroke-width="0.20" />
<path d="M 84.900 78.463 V 81.363 C 84.900 81.463 84.800 81.363 84.900 81.363 H 87.700 C 87.800 81.363 87.700 81.463 87.700 81.363 V 75.563 C 87.700 75.463 87.800 75.563 87.700 75.563 H 84.900 C 84.800 75.563 84.900 75.463 84.900 75.563 V 78.463 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
<path d="M 84.900 66.463 V 69.363 C 84.900 69.463 84.800 69.363 84.900 69.363 H 87.700 C 87.800 69.363 87.700 69.463 87.700 69.363 V 63.563 C 87.700 63.463 87.800 63.563 87.700 63.563 H 84.900 C 84.800 63.563 84.900 63.463 84.900 63.563 V 66.463 Z" stroke="rgb(0,0,255)" stroke-width="0.20" />
</g>
</svg> | 23,530 | Python | .tac | 74 | 316.067568 | 5,831 | 0.704242 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,719 | Stachel.svg | florianfesti_boxes/examples/Stachel.svg | <?xml version='1.0' encoding='utf-8'?>
<svg height="497.19mm" viewBox="0.0 0.0 196.80 497.19" width="196.80mm" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!--
Stachel - Bass Recorder Endpin
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line (remove spaces between dashes): boxes Stachel
-->
<title>Stachel</title>
<metadata>
<rdf:RDF><cc:Work>
<dc:title>Misc - Stachel</dc:title>
<dc:source>boxes Stachel</dc:source>
<dc:description>Bass Recorder Endpin
Created with Boxes.py (https://boxes.hackerspace-bamberg.de/)
Command line: boxes Stachel
Command line short: boxes Stachel
</dc:description>
</cc:Work></rdf:RDF></metadata>
<g id="p-0" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 10.000 487.186 H 110.000 V 477.186 H 10.000 V 487.186 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
<text dominant-baseline="hanging" font-size="6px" style="font-family: sans-serif ; font-weight: normal; font-style: normal; fill: rgb(255,0,0)" text-anchor="middle" transform="matrix( 1.000 0.000 0.000 1.000 60.000 479.186 )">100.0mm, burn:0.10mm</text>
</g>
<g id="p-1" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 74.500 472.986 V 423.486 C 74.500 423.430 74.455 423.386 74.400 423.386 C 66.613 423.386 59.021 420.955 52.682 416.434 C 46.343 411.912 41.573 405.524 39.038 398.162 C 36.503 390.799 36.329 382.829 38.540 375.363 C 40.752 367.897 45.238 361.308 51.374 356.514 C 57.510 351.720 64.989 348.961 72.769 348.621 C 80.548 348.282 88.239 350.378 94.769 354.619 C 101.300 358.860 106.344 365.034 109.198 372.278 C 112.051 379.523 112.573 387.478 110.689 395.033 C 108.805 402.589 104.610 409.368 98.689 414.425 C 92.768 419.482 85.417 422.565 77.660 423.243 C 77.605 423.248 77.564 423.297 77.569 423.352 L 81.883 472.663 C 81.923 473.118 82.081 473.554 82.343 473.928 C 82.605 474.302 82.961 474.600 83.374 474.793 C 83.788 474.986 84.245 475.067 84.700 475.027 L 99.643 473.719 C 100.097 473.680 100.533 473.521 100.907 473.259 C 101.281 472.997 101.579 472.642 101.772 472.228 C 101.965 471.814 102.046 471.357 102.006 470.903 L 99.768 445.321 C 99.772 445.364 99.747 445.405 99.708 445.422 C 108.983 441.473 117.205 435.407 123.717 427.711 C 130.228 420.015 134.848 410.902 137.207 401.101 C 137.197 401.143 137.161 401.173 137.118 401.177 L 184.437 397.037 C 184.892 396.997 185.328 396.839 185.702 396.577 C 186.076 396.315 186.374 395.960 186.567 395.546 C 186.760 395.132 186.841 394.675 186.801 394.221 L 185.319 377.285 C 185.315 377.230 185.266 377.190 185.211 377.194 L 165.287 378.937 C 165.235 378.942 165.195 378.986 165.196 379.038 C 165.214 381.589 164.445 384.083 162.994 386.181 C 161.543 388.279 159.481 389.878 157.087 390.761 C 154.694 391.643 152.087 391.767 149.621 391.113 C 147.155 390.460 144.951 389.063 143.309 387.111 C 141.666 385.159 140.666 382.749 140.443 380.208 C 140.221 377.666 140.788 375.119 142.066 372.911 C 143.345 370.704 145.273 368.945 147.588 367.874 C 149.903 366.802 152.492 366.471 155.002 366.925 C 157.512 367.378 159.821 368.595 161.614 370.409 C 163.408 372.223 164.598 374.546 165.023 377.061 C 165.032 377.113 165.078 377.149 165.130 377.144 L 185.054 375.401 C 185.109 375.396 185.150 375.348 185.145 375.293 L 183.663 358.358 C 183.624 357.903 183.465 357.467 183.203 357.093 C 182.941 356.719 182.586 356.421 182.172 356.228 C 181.758 356.035 181.301 355.954 180.847 355.994 L 133.527 360.134 C 133.570 360.130 133.611 360.154 133.628 360.194 C 128.698 348.874 120.628 339.204 110.373 332.329 C 100.118 325.453 88.108 321.661 75.764 321.400 C 63.420 321.139 51.260 324.421 40.724 330.858 C 30.188 337.294 21.717 346.614 16.314 357.716 C 10.911 368.818 8.803 381.235 10.238 393.498 C 11.674 405.760 16.594 417.355 24.415 426.908 C 32.236 436.461 42.631 443.573 54.369 447.402 C 54.328 447.388 54.300 447.350 54.300 447.306 V 472.986 C 54.300 473.442 54.420 473.890 54.648 474.286 C 54.877 474.681 55.205 475.009 55.600 475.237 C 55.995 475.465 56.444 475.586 56.900 475.586 H 71.900 C 72.356 475.586 72.805 475.465 73.200 475.237 C 73.595 475.009 73.923 474.681 74.152 474.286 C 74.380 473.890 74.500 473.442 74.500 472.986 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-2" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 74.500 317.286 V 287.786 C 74.500 287.730 74.455 287.686 74.400 287.686 C 62.449 287.686 50.797 283.956 41.068 277.016 C 31.338 270.076 24.018 260.273 20.127 248.973 C 16.237 237.674 15.970 225.442 19.364 213.983 C 22.758 202.525 29.644 192.411 39.061 185.054 C 48.478 177.696 59.957 173.462 71.896 172.940 C 83.835 172.419 95.640 175.637 105.662 182.146 C 115.685 188.655 123.426 198.129 127.806 209.248 C 132.186 220.367 132.986 232.576 130.095 244.172 C 127.204 255.768 120.766 266.172 111.678 273.933 C 102.591 281.694 91.308 286.426 79.403 287.467 C 79.348 287.472 79.307 287.520 79.312 287.576 L 81.883 316.963 C 81.923 317.418 82.081 317.854 82.343 318.228 C 82.605 318.602 82.961 318.900 83.374 319.093 C 83.788 319.286 84.245 319.367 84.700 319.327 L 99.643 318.019 C 100.097 317.980 100.533 317.821 100.907 317.559 C 101.281 317.297 101.579 316.942 101.772 316.528 C 101.965 316.114 102.046 315.657 102.006 315.203 L 101.083 304.653 C 101.078 304.598 101.030 304.558 100.975 304.563 L 83.143 306.123 C 83.043 306.131 83.152 306.222 83.143 306.123 L 82.986 304.329 C 82.977 304.230 82.886 304.338 82.986 304.329 L 100.818 302.769 C 100.873 302.765 100.914 302.716 100.909 302.661 L 99.768 289.621 C 99.772 289.664 99.747 289.705 99.708 289.722 C 108.983 285.773 117.205 279.707 123.717 272.011 C 130.228 264.315 134.848 255.202 137.207 245.401 C 137.197 245.443 137.161 245.473 137.118 245.477 L 176.966 241.991 C 177.021 241.986 177.062 241.938 177.057 241.883 L 175.453 223.553 C 175.445 223.453 175.354 223.561 175.453 223.553 L 177.246 223.396 C 177.346 223.387 177.238 223.296 177.246 223.396 L 178.850 241.726 C 178.855 241.781 178.903 241.821 178.958 241.817 L 184.437 241.337 C 184.892 241.297 185.328 241.139 185.702 240.877 C 186.076 240.615 186.374 240.260 186.567 239.846 C 186.760 239.432 186.841 238.975 186.801 238.521 L 185.319 221.585 C 185.315 221.530 185.266 221.490 185.211 221.494 L 165.287 223.237 C 165.235 223.242 165.195 223.286 165.196 223.338 C 165.214 225.889 164.445 228.383 162.994 230.481 C 161.543 232.579 159.481 234.178 157.087 235.061 C 154.694 235.943 152.087 236.067 149.621 235.413 C 147.155 234.760 144.951 233.363 143.309 231.411 C 141.666 229.459 140.666 227.049 140.443 224.508 C 140.221 221.966 140.788 219.419 142.066 217.211 C 143.345 215.004 145.273 213.245 147.588 212.174 C 149.903 211.102 152.492 210.771 155.002 211.225 C 157.512 211.678 159.821 212.895 161.614 214.709 C 163.408 216.523 164.598 218.846 165.023 221.361 C 165.032 221.413 165.078 221.449 165.130 221.444 L 185.054 219.701 C 185.109 219.696 185.150 219.648 185.145 219.593 L 183.663 202.658 C 183.624 202.203 183.465 201.767 183.203 201.393 C 182.941 201.019 182.586 200.721 182.172 200.528 C 181.758 200.335 181.301 200.254 180.847 200.294 L 175.368 200.773 C 175.313 200.778 175.272 200.827 175.277 200.882 L 176.880 219.212 C 176.889 219.311 176.980 219.203 176.880 219.212 L 175.087 219.369 C 174.988 219.377 175.096 219.468 175.087 219.369 L 173.484 201.039 C 173.479 200.984 173.430 200.943 173.375 200.948 L 133.527 204.434 C 133.570 204.430 133.611 204.454 133.628 204.494 C 128.698 193.174 120.628 183.504 110.373 176.629 C 100.118 169.753 88.108 165.961 75.764 165.700 C 63.420 165.439 51.260 168.721 40.724 175.158 C 30.188 181.594 21.717 190.914 16.314 202.016 C 10.911 213.118 8.803 225.535 10.238 237.798 C 11.674 250.060 16.594 261.655 24.415 271.208 C 32.236 280.761 42.631 287.873 54.369 291.702 C 54.328 291.688 54.300 291.650 54.300 291.606 V 304.696 C 54.300 304.751 54.345 304.796 54.400 304.796 H 72.300 C 72.400 304.796 72.300 304.696 72.300 304.796 V 306.596 C 72.300 306.696 72.400 306.596 72.300 306.596 H 54.400 C 54.345 306.596 54.300 306.641 54.300 306.696 V 317.286 C 54.300 317.742 54.420 318.190 54.648 318.586 C 54.877 318.981 55.205 319.309 55.600 319.537 C 55.995 319.765 56.444 319.886 56.900 319.886 H 71.900 C 72.356 319.886 72.805 319.765 73.200 319.537 C 73.595 319.309 73.923 318.981 74.152 318.586 C 74.380 318.190 74.500 317.742 74.500 317.286 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
<g id="p-3" style="fill:none;stroke-linecap:round;stroke-linejoin:round;">
<path d="M 74.500 161.586 V 132.086 C 74.500 132.030 74.455 131.986 74.400 131.986 C 62.449 131.986 50.797 128.256 41.068 121.316 C 31.338 114.376 24.018 104.573 20.127 93.273 C 16.237 81.974 15.970 69.742 19.364 58.283 C 22.758 46.825 29.644 36.711 39.061 29.354 C 48.478 21.996 59.957 17.762 71.896 17.240 C 83.835 16.719 95.640 19.937 105.662 26.446 C 115.685 32.955 123.426 42.429 127.806 53.548 C 132.186 64.667 132.986 76.876 130.095 88.472 C 127.204 100.068 120.766 110.472 111.678 118.233 C 102.591 125.994 91.308 130.726 79.403 131.767 C 79.348 131.772 79.307 131.820 79.312 131.876 L 81.883 161.263 C 81.923 161.718 82.081 162.154 82.343 162.528 C 82.605 162.902 82.961 163.200 83.374 163.393 C 83.788 163.586 84.245 163.667 84.700 163.627 L 99.643 162.319 C 100.097 162.280 100.533 162.121 100.907 161.859 C 101.281 161.597 101.579 161.242 101.772 160.828 C 101.965 160.414 102.046 159.957 102.006 159.503 L 99.768 133.921 C 99.772 133.964 99.747 134.005 99.708 134.022 C 108.983 130.073 117.205 124.007 123.717 116.311 C 130.228 108.615 134.848 99.502 137.207 89.701 C 137.197 89.743 137.161 89.773 137.118 89.777 L 184.437 85.637 C 184.892 85.597 185.328 85.439 185.702 85.177 C 186.076 84.915 186.374 84.560 186.567 84.146 C 186.760 83.732 186.841 83.275 186.801 82.821 L 185.319 65.885 C 185.315 65.830 185.266 65.790 185.211 65.794 L 165.287 67.537 C 165.235 67.542 165.195 67.586 165.196 67.638 C 165.214 70.189 164.445 72.683 162.994 74.781 C 161.543 76.879 159.481 78.478 157.087 79.361 C 154.694 80.243 152.087 80.367 149.621 79.713 C 147.155 79.060 144.951 77.663 143.309 75.711 C 141.666 73.759 140.666 71.349 140.443 68.808 C 140.221 66.266 140.788 63.719 142.066 61.511 C 143.345 59.304 145.273 57.545 147.588 56.474 C 149.903 55.402 152.492 55.071 155.002 55.525 C 157.512 55.978 159.821 57.195 161.614 59.009 C 163.408 60.823 164.598 63.146 165.023 65.661 C 165.032 65.713 165.078 65.749 165.130 65.744 L 185.054 64.001 C 185.109 63.996 185.150 63.948 185.145 63.893 L 183.663 46.958 C 183.624 46.503 183.465 46.067 183.203 45.693 C 182.941 45.319 182.586 45.021 182.172 44.828 C 181.758 44.635 181.301 44.554 180.847 44.594 L 133.527 48.734 C 133.570 48.730 133.611 48.754 133.628 48.794 C 128.698 37.474 120.628 27.804 110.373 20.929 C 100.118 14.053 88.108 10.261 75.764 10.000 C 63.420 9.739 51.260 13.021 40.724 19.458 C 30.188 25.894 21.717 35.214 16.314 46.316 C 10.911 57.418 8.803 69.835 10.238 82.098 C 11.674 94.360 16.594 105.955 24.415 115.508 C 32.236 125.061 42.631 132.173 54.369 136.002 C 54.328 135.988 54.300 135.950 54.300 135.906 V 161.586 C 54.300 162.042 54.420 162.490 54.648 162.886 C 54.877 163.281 55.205 163.609 55.600 163.837 C 55.995 164.065 56.444 164.186 56.900 164.186 H 71.900 C 72.356 164.186 72.805 164.065 73.200 163.837 C 73.595 163.609 73.923 163.281 74.152 162.886 C 74.380 162.490 74.500 162.042 74.500 161.586 Z" stroke="rgb(0,0,0)" stroke-width="0.20" />
</g>
</svg> | 11,656 | Python | .tac | 32 | 362.90625 | 4,069 | 0.725458 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,720 | config.py | lra_mackup/mackup/config.py | """Package used to manage the .mackup.cfg config file."""
import os
import os.path
from .constants import (
CUSTOM_APPS_DIR,
MACKUP_BACKUP_PATH,
MACKUP_CONFIG_FILE,
ENGINE_DROPBOX,
ENGINE_GDRIVE,
ENGINE_ICLOUD,
ENGINE_FS,
)
from .utils import (
error,
get_dropbox_folder_location,
get_google_drive_folder_location,
get_icloud_folder_location,
)
try:
import configparser
except ImportError:
import ConfigParser as configparser
class Config(object):
"""The Mackup Config class."""
def __init__(self, filename=None):
"""
Create a Config instance.
Args:
filename (str): Optional filename of the config file. If empty,
defaults to MACKUP_CONFIG_FILE
"""
assert isinstance(filename, str) or filename is None
# Initialize the parser
self._parser = self._setup_parser(filename)
# Do we have an old config file?
self._warn_on_old_config()
# Get the storage engine
self._engine = self._parse_engine()
# Get the path where the Mackup folder is
self._path = self._parse_path()
# Get the directory replacing 'Mackup', if any
self._directory = self._parse_directory()
# Get the list of apps to ignore
self._apps_to_ignore = self._parse_apps_to_ignore()
# Get the list of apps to allow
self._apps_to_sync = self._parse_apps_to_sync()
@property
def engine(self):
"""
The engine used by the storage.
ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_ICLOUD or ENGINE_FS.
Returns:
str
"""
return str(self._engine)
@property
def path(self):
"""
Path to the Mackup configuration files.
The path to the directory where Mackup is gonna create and store his
directory.
Returns:
str
"""
return str(self._path)
@property
def directory(self):
"""
The name of the Mackup directory, named Mackup by default.
Returns:
str
"""
return str(self._directory)
@property
def fullpath(self):
"""
Full path to the Mackup configuration files.
The full path to the directory when Mackup is storing the configuration
files.
Returns:
str
"""
return str(os.path.join(self.path, self.directory))
@property
def apps_to_ignore(self):
"""
Get the list of applications ignored in the config file.
Returns:
set. Set of application names to ignore, lowercase
"""
return set(self._apps_to_ignore)
@property
def apps_to_sync(self):
"""
Get the list of applications allowed in the config file.
Returns:
set. Set of application names to allow, lowercase
"""
return set(self._apps_to_sync)
def _setup_parser(self, filename=None):
"""
Configure the ConfigParser instance the way we want it.
Args:
filename (str) or None
Returns:
ConfigParser
"""
assert isinstance(filename, str) or filename is None
# If we are not overriding the config filename
if not filename:
filename = MACKUP_CONFIG_FILE
parser = configparser.ConfigParser(
allow_no_value=True, inline_comment_prefixes=(";", "#")
)
parser.read(os.path.join(os.path.join(os.environ["HOME"], filename)))
return parser
def _warn_on_old_config(self):
"""Warn the user if an old config format is detected."""
# Is an old section in the config file?
old_sections = ["Allowed Applications", "Ignored Applications"]
for old_section in old_sections:
if self._parser.has_section(old_section):
error(
"Old config file detected. Aborting.\n"
"\n"
"An old section (e.g. [Allowed Applications]"
" or [Ignored Applications] has been detected"
" in your {} file.\n"
"I'd rather do nothing than do something you"
" do not want me to do.\n"
"\n"
"Please read the up to date documentation on"
" <https://github.com/lra/mackup> and migrate"
" your configuration file.".format(MACKUP_CONFIG_FILE)
)
def _parse_engine(self):
"""
Parse the storage engine in the config.
Returns:
str
"""
if self._parser.has_option("storage", "engine"):
engine = str(self._parser.get("storage", "engine"))
else:
engine = ENGINE_DROPBOX
assert isinstance(engine, str)
if engine not in [
ENGINE_DROPBOX,
ENGINE_GDRIVE,
ENGINE_ICLOUD,
ENGINE_FS,
]:
raise ConfigError("Unknown storage engine: {}".format(engine))
return str(engine)
def _parse_path(self):
"""
Parse the storage path in the config.
Returns:
str
"""
if self.engine == ENGINE_DROPBOX:
path = get_dropbox_folder_location()
elif self.engine == ENGINE_GDRIVE:
path = get_google_drive_folder_location()
elif self.engine == ENGINE_ICLOUD:
path = get_icloud_folder_location()
elif self.engine == ENGINE_FS:
if self._parser.has_option("storage", "path"):
cfg_path = self._parser.get("storage", "path")
path = os.path.join(os.environ["HOME"], cfg_path)
else:
raise ConfigError(
"The required 'path' can't be found while"
" the 'file_system' engine is used."
)
return str(path)
def _parse_directory(self):
"""
Parse the storage directory in the config.
Returns:
str
"""
if self._parser.has_option("storage", "directory"):
directory = self._parser.get("storage", "directory")
# Don't allow CUSTOM_APPS_DIR as a storage directory
if directory == CUSTOM_APPS_DIR:
raise ConfigError(
"{} cannot be used as a storage directory.".format(CUSTOM_APPS_DIR)
)
else:
directory = MACKUP_BACKUP_PATH
return str(directory)
def _parse_apps_to_ignore(self):
"""
Parse the applications to ignore in the config.
Returns:
set
"""
# We ignore nothing by default
apps_to_ignore = set()
# Is the "[applications_to_ignore]" in the cfg file?
section_title = "applications_to_ignore"
if self._parser.has_section(section_title):
apps_to_ignore = set(self._parser.options(section_title))
return apps_to_ignore
def _parse_apps_to_sync(self):
"""
Parse the applications to backup in the config.
Returns:
set
"""
# We allow nothing by default
apps_to_sync = set()
# Is the "[applications_to_sync]" section in the cfg file?
section_title = "applications_to_sync"
if self._parser.has_section(section_title):
apps_to_sync = set(self._parser.options(section_title))
return apps_to_sync
class ConfigError(Exception):
"""Exception used for handle errors in the configuration."""
pass
| 7,731 | Python | .py | 221 | 24.99095 | 87 | 0.570662 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,721 | constants.py | lra_mackup/mackup/constants.py | """Constants used in Mackup."""
# Current version
VERSION = "0.8.40"
# Support platforms
PLATFORM_DARWIN = "Darwin"
PLATFORM_LINUX = "Linux"
# Directory containing the application configs
APPS_DIR = "applications"
# Mackup application name
MACKUP_APP_NAME = "mackup"
# Default Mackup backup path where it stores its files in Dropbox
MACKUP_BACKUP_PATH = "Mackup"
# Mackup config file
MACKUP_CONFIG_FILE = ".mackup.cfg"
# Directory that can contains user defined app configs
CUSTOM_APPS_DIR = ".mackup"
# Supported engines
ENGINE_DROPBOX = "dropbox"
ENGINE_FS = "file_system"
ENGINE_GDRIVE = "google_drive"
ENGINE_ICLOUD = "icloud"
DOCUMENTATION_URL = "https://github.com/lra/mackup/blob/master/doc/README.md"
# Error message displayed when mackup can't find the storage specified
# in the config (or the default one).
ERROR_UNABLE_TO_FIND_STORAGE = (
"Unable to find your {provider} =(\n"
"If this is the first time you use %s, you may want "
"to use another provider.\n"
"Take a look at the documentation [1] to know more about "
"how to configure mackup.\n\n"
"[1]: %s" % (MACKUP_APP_NAME, DOCUMENTATION_URL)
)
| 1,147 | Python | .py | 32 | 33.78125 | 77 | 0.742986 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,722 | utils.py | lra_mackup/mackup/utils.py | """System static utilities being used by the modules."""
import base64
import os
import platform
import shutil
import stat
import subprocess
import sys
import sqlite3
from . import constants
# Flag that controls how user confirmation works.
# If True, the user wants to say "yes" to everything.
FORCE_YES = False
# Flag that control if mackup can be run as root
CAN_RUN_AS_ROOT = False
def confirm(question):
"""
Ask the user if he really wants something to happen.
Args:
question(str): What can happen
Returns:
(boolean): Confirmed or not
"""
if FORCE_YES:
return True
while True:
answer = input(question + " <Yes|No> ").lower()
if answer == "yes" or answer == "y":
confirmed = True
break
if answer == "no" or answer == "n":
confirmed = False
break
return confirmed
def delete(filepath):
"""
Delete the given file, directory or link.
It Should support undelete later on.
Args:
filepath (str): Absolute full path to a file. e.g. /path/to/file
"""
# Some files have ACLs, let's remove them recursively
remove_acl(filepath)
# Some files have immutable attributes, let's remove them recursively
remove_immutable_attribute(filepath)
# Finally remove the files and folders
if os.path.isfile(filepath) or os.path.islink(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath)
def copy(src, dst):
"""
Copy a file or a folder (recursively) from src to dst.
For the sake of simplicity, both src and dst must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. copy('/path/to/src_file', '/path/to/dst_file')
or copy('/path/to/src_folder', '/path/to/dst_folder')
But not: copy('/path/to/src_file', 'path/to/')
or copy('/path/to/src_folder/', '/path/to/dst_folder')
Args:
src (str): Source file or folder
dst (str): Destination file or folder
"""
assert isinstance(src, str)
assert os.path.exists(src)
assert isinstance(dst, str)
# Create the path to the dst file if it does not exist
abs_path = os.path.dirname(os.path.abspath(dst))
if not os.path.isdir(abs_path):
os.makedirs(abs_path)
# We need to copy a single file
if os.path.isfile(src):
# Copy the src file to dst
shutil.copy(src, dst)
# We need to copy a whole folder
elif os.path.isdir(src):
shutil.copytree(src, dst)
# What the heck is this?
else:
raise ValueError("Unsupported file: {}".format(src))
# Set the good mode to the file or folder recursively
chmod(dst)
def link(target, link_to):
"""
Create a link to a target file or a folder.
For the sake of simplicity, both target and link_to must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. link('/path/to/file', '/path/to/link')
But not: link('/path/to/file', 'path/to/')
or link('/path/to/folder/', '/path/to/link')
Args:
target (str): file or folder the link will point to
link_to (str): Link to create
"""
assert isinstance(target, str)
assert os.path.exists(target)
assert isinstance(link_to, str)
# Create the path to the link if it does not exist
abs_path = os.path.dirname(os.path.abspath(link_to))
if not os.path.isdir(abs_path):
os.makedirs(abs_path)
# Make sure the file or folder recursively has the good mode
chmod(target)
# Create the link to target
os.symlink(target, link_to)
def chmod(target):
"""
Recursively set the chmod for files to 0600 and 0700 for folders.
It's ok unless we need something more specific.
Args:
target (str): Root file or folder
"""
assert isinstance(target, str)
assert os.path.exists(target)
file_mode = stat.S_IRUSR | stat.S_IWUSR
folder_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
# Remove the immutable attribute recursively if there is one
remove_immutable_attribute(target)
if os.path.isfile(target):
os.chmod(target, file_mode)
elif os.path.isdir(target):
# chmod the root item
os.chmod(target, folder_mode)
# chmod recursively in the folder it it's one
for root, dirs, files in os.walk(target):
for cur_dir in dirs:
os.chmod(os.path.join(root, cur_dir), folder_mode)
for cur_file in files:
os.chmod(os.path.join(root, cur_file), file_mode)
else:
raise ValueError("Unsupported file type: {}".format(target))
def error(message):
"""
Throw an error with the given message and immediately quit.
Args:
message(str): The message to display.
"""
fail = "\033[91m"
end = "\033[0m"
sys.exit(fail + "Error: {}".format(message) + end)
def get_dropbox_folder_location():
"""
Try to locate the Dropbox folder.
Returns:
(str) Full path to the current Dropbox folder
"""
host_db_path = os.path.join(os.environ["HOME"], ".dropbox/host.db")
try:
with open(host_db_path, "r") as f_hostdb:
data = f_hostdb.read().split()
except IOError:
error(constants.ERROR_UNABLE_TO_FIND_STORAGE.format(provider="Dropbox install"))
dropbox_home = base64.b64decode(data[1]).decode()
return dropbox_home
def get_google_drive_folder_location():
"""
Try to locate the Google Drive folder.
Returns:
(str) Full path to the current Google Drive folder
"""
gdrive_db_path = "Library/Application Support/Google/Drive/sync_config.db"
yosemite_gdrive_db_path = (
"Library/Application Support/Google/Drive/" "user_default/sync_config.db"
)
yosemite_gdrive_db = os.path.join(os.environ["HOME"], yosemite_gdrive_db_path)
if os.path.isfile(yosemite_gdrive_db):
gdrive_db_path = yosemite_gdrive_db
googledrive_home = None
gdrive_db = os.path.join(os.environ["HOME"], gdrive_db_path)
if os.path.isfile(gdrive_db):
con = sqlite3.connect(gdrive_db)
if con:
cur = con.cursor()
query = (
"SELECT data_value "
"FROM data "
"WHERE entry_key = 'local_sync_root_path';"
)
cur.execute(query)
data = cur.fetchone()
googledrive_home = str(data[0])
con.close()
if not googledrive_home:
error(
constants.ERROR_UNABLE_TO_FIND_STORAGE.format(
provider="Google Drive install"
)
)
return googledrive_home
def get_icloud_folder_location():
"""
Try to locate the iCloud Drive folder.
Returns:
(str) Full path to the iCloud Drive folder.
"""
yosemite_icloud_path = "~/Library/Mobile Documents/com~apple~CloudDocs/"
icloud_home = os.path.expanduser(yosemite_icloud_path)
if not os.path.isdir(icloud_home):
error(constants.ERROR_UNABLE_TO_FIND_STORAGE.format(provider="iCloud Drive"))
return str(icloud_home)
def is_process_running(process_name):
"""
Check if a process with the given name is running.
Args:
(str): Process name, e.g. "Sublime Text"
Returns:
(bool): True if the process is running
"""
is_running = False
# On systems with pgrep, check if the given process is running
if os.path.isfile("/usr/bin/pgrep"):
dev_null = open(os.devnull, "wb")
returncode = subprocess.call(["/usr/bin/pgrep", process_name], stdout=dev_null)
is_running = bool(returncode == 0)
return is_running
def remove_acl(path):
"""
Remove the ACL of the file or folder located on the given path.
Also remove the ACL of any file and folder below the given one,
recursively.
Args:
path (str): Path to the file or folder to remove the ACL for,
recursively.
"""
# Some files have ACLs, let's remove them recursively
if platform.system() == constants.PLATFORM_DARWIN and os.path.isfile("/bin/chmod"):
subprocess.call(["/bin/chmod", "-R", "-N", path])
elif (platform.system() == constants.PLATFORM_LINUX) and os.path.isfile(
"/bin/setfacl"
):
subprocess.call(["/bin/setfacl", "-R", "-b", path])
def remove_immutable_attribute(path):
"""
Remove the immutable attribute of the given path.
Remove the immutable attribute of the file or folder located on the given
path. Also remove the immutable attribute of any file and folder below the
given one, recursively.
Args:
path (str): Path to the file or folder to remove the immutable
attribute for, recursively.
"""
# Some files have ACLs, let's remove them recursively
if (platform.system() == constants.PLATFORM_DARWIN) and os.path.isfile(
"/usr/bin/chflags"
):
subprocess.call(["/usr/bin/chflags", "-R", "nouchg", path])
elif platform.system() == constants.PLATFORM_LINUX and os.path.isfile(
"/usr/bin/chattr"
):
subprocess.call(["/usr/bin/chattr", "-R", "-f", "-i", path])
def can_file_be_synced_on_current_platform(path):
"""
Check if the given path can be synced locally.
Check if it makes sense to sync the file at the given path on the current
platform.
For now we don't sync any file in the ~/Library folder on GNU/Linux.
There might be other exceptions in the future.
Args:
(str): Path to the file or folder to check. If relative, prepend it
with the home folder.
'abc' becomes '~/abc'
'/def' stays '/def'
Returns:
(bool): True if given file can be synced
"""
can_be_synced = True
# If the given path is relative, prepend home
fullpath = os.path.join(os.environ["HOME"], path)
# Compute the ~/Library path on macOS
# End it with a slash because we are looking for this specific folder and
# not any file/folder named LibrarySomething
library_path = os.path.join(os.environ["HOME"], "Library/")
if platform.system() == constants.PLATFORM_LINUX:
if fullpath.startswith(library_path):
can_be_synced = False
return can_be_synced
| 10,488 | Python | .py | 278 | 31.014388 | 88 | 0.648187 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,723 | appsdb.py | lra_mackup/mackup/appsdb.py | """
The applications database.
The Applications Database provides an easy to use interface to load application
data from the Mackup Database (files).
"""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
from .constants import APPS_DIR
from .constants import CUSTOM_APPS_DIR
class ApplicationsDatabase(object):
"""Database containing all the configured applications."""
def __init__(self):
"""Create a ApplicationsDatabase instance."""
# Build the dict that will contain the properties of each application
self.apps = dict()
for config_file in ApplicationsDatabase.get_config_files():
config = configparser.ConfigParser(allow_no_value=True)
# Needed to not lowercase the configuration_files in the ini files
config.optionxform = str
if config.read(config_file):
# Get the filename without the directory name
filename = os.path.basename(config_file)
# The app name is the cfg filename with the extension
app_name = filename[: -len(".cfg")]
# Start building a dict for this app
self.apps[app_name] = dict()
# Add the fancy name for the app, for display purpose
app_pretty_name = config.get("application", "name")
self.apps[app_name]["name"] = app_pretty_name
# Add the configuration files to sync
self.apps[app_name]["configuration_files"] = set()
if config.has_section("configuration_files"):
for path in config.options("configuration_files"):
if path.startswith("/"):
raise ValueError(
"Unsupported absolute path: {}".format(path)
)
self.apps[app_name]["configuration_files"].add(path)
# Add the XDG configuration files to sync
home = os.path.expanduser("~/")
failobj = "{}.config".format(home)
xdg_config_home = os.environ.get("XDG_CONFIG_HOME", failobj)
if not xdg_config_home.startswith(home):
raise ValueError(
"$XDG_CONFIG_HOME: {} must be "
"somewhere within your home "
"directory: {}".format(xdg_config_home, home)
)
if config.has_section("xdg_configuration_files"):
for path in config.options("xdg_configuration_files"):
if path.startswith("/"):
raise ValueError(
"Unsupported absolute path: " "{}".format(path)
)
path = os.path.join(xdg_config_home, path)
path = path.replace(home, "")
(self.apps[app_name]["configuration_files"].add(path))
@staticmethod
def get_config_files():
"""
Return the application configuration files.
Return a list of configuration files describing the apps supported by
Mackup. The files returned are absolute full path to those files.
e.g. /usr/lib/mackup/applications/bash.cfg
Only one config file per application should be returned, custom config
having a priority over stock config.
Returns:
set of strings.
"""
# Configure the config parser
apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), APPS_DIR)
custom_apps_dir = os.path.join(os.environ["HOME"], CUSTOM_APPS_DIR)
# List of stock application config files
config_files = set()
# Temp list of user added app config file names
custom_files = set()
# Get the list of custom application config files first
if os.path.isdir(custom_apps_dir):
for filename in os.listdir(custom_apps_dir):
if filename.endswith(".cfg"):
config_files.add(os.path.join(custom_apps_dir, filename))
# Also add it to the set of custom apps, so that we don't
# add the stock config for the same app too
custom_files.add(filename)
# Add the default provided app config files, but only if those are not
# customized, as we don't want to overwrite custom app config.
for filename in os.listdir(apps_dir):
if filename.endswith(".cfg") and filename not in custom_files:
config_files.add(os.path.join(apps_dir, filename))
return config_files
def get_name(self, name):
"""
Return the fancy name of an application.
Args:
name (str)
Returns:
str
"""
return self.apps[name]["name"]
def get_files(self, name):
"""
Return the list of config files of an application.
Args:
name (str)
Returns:
set of str.
"""
return self.apps[name]["configuration_files"]
def get_app_names(self):
"""
Return application names.
Return the list of application names that are available in the
database.
Returns:
set of str.
"""
app_names = set()
for name in self.apps:
app_names.add(name)
return app_names
def get_pretty_app_names(self):
"""
Return the list of pretty app names that are available in the database.
Returns:
set of str.
"""
pretty_app_names = set()
for app_name in self.get_app_names():
pretty_app_names.add(self.get_name(app_name))
return pretty_app_names
| 5,925 | Python | .py | 133 | 31.81203 | 86 | 0.571156 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,724 | application.py | lra_mackup/mackup/application.py | """
Application Profile.
An Application Profile contains all the information about an application in
Mackup. Name, files, ...
"""
import os
from .mackup import Mackup
from . import utils
class ApplicationProfile(object):
"""Instantiate this class with application specific data."""
def __init__(self, mackup, files, dry_run, verbose):
"""
Create an ApplicationProfile instance.
Args:
mackup (Mackup)
files (list)
"""
assert isinstance(mackup, Mackup)
assert isinstance(files, set)
self.mackup = mackup
self.files = list(files)
self.dry_run = dry_run
self.verbose = verbose
def getFilepaths(self, filename):
"""
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
"""
return (
os.path.join(os.environ["HOME"], filename),
os.path.join(self.mackup.mackup_folder, filename),
)
def backup(self):
"""
Backup the application config files.
Algorithm:
if exists home/file
if home/file is a real file
if exists mackup/file
are you sure?
if sure
rm mackup/file
mv home/file mackup/file
link mackup/file home/file
else
mv home/file mackup/file
link mackup/file home/file
"""
# For each file used by the application
for filename in self.files:
(home_filepath, mackup_filepath) = self.getFilepaths(filename)
# If the file exists and is not already a link pointing to Mackup
if (os.path.isfile(home_filepath) or os.path.isdir(home_filepath)) and not (
os.path.islink(home_filepath)
and (os.path.isfile(mackup_filepath) or os.path.isdir(mackup_filepath))
and os.path.samefile(home_filepath, mackup_filepath)
):
if self.verbose:
print(
"Backing up\n {}\n to\n {} ...".format(
home_filepath, mackup_filepath
)
)
else:
print("Backing up {} ...".format(filename))
if self.dry_run:
continue
# Check if we already have a backup
if os.path.exists(mackup_filepath):
# Name it right
if os.path.isfile(mackup_filepath):
file_type = "file"
elif os.path.isdir(mackup_filepath):
file_type = "folder"
elif os.path.islink(mackup_filepath):
file_type = "link"
else:
raise ValueError("Unsupported file: {}".format(mackup_filepath))
# Ask the user if he really wants to replace it
if utils.confirm(
"A {} named {} already exists in the"
" backup.\nAre you sure that you want to"
" replace it?".format(file_type, mackup_filepath)
):
# Delete the file in Mackup
utils.delete(mackup_filepath)
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
else:
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
elif self.verbose:
if os.path.exists(home_filepath):
print(
"Doing nothing\n {}\n "
"is already backed up to\n {}".format(
home_filepath, mackup_filepath
)
)
elif os.path.islink(home_filepath):
print(
"Doing nothing\n {}\n "
"is a broken link, you might want to fix it.".format(
home_filepath
)
)
else:
print("Doing nothing\n {}\n does not exist".format(home_filepath))
def restore(self):
"""
Restore the application config files.
Algorithm:
if exists mackup/file
if exists home/file
are you sure?
if sure
rm home/file
link mackup/file home/file
else
link mackup/file home/file
"""
# For each file used by the application
for filename in self.files:
(home_filepath, mackup_filepath) = self.getFilepaths(filename)
# If the file exists and is not already pointing to the mackup file
# and the folder makes sense on the current platform (Don't sync
# any subfolder of ~/Library on GNU/Linux)
file_or_dir_exists = os.path.isfile(mackup_filepath) or os.path.isdir(
mackup_filepath
)
pointing_to_mackup = (
os.path.islink(home_filepath)
and os.path.exists(mackup_filepath)
and os.path.samefile(mackup_filepath, home_filepath)
)
supported = utils.can_file_be_synced_on_current_platform(filename)
if file_or_dir_exists and not pointing_to_mackup and supported:
if self.verbose:
print(
"Restoring\n linking {}\n to {} ...".format(
home_filepath, mackup_filepath
)
)
else:
print("Restoring {} ...".format(filename))
if self.dry_run:
continue
# Check if there is already a file in the home folder
if os.path.exists(home_filepath):
# Name it right
if os.path.isfile(home_filepath):
file_type = "file"
elif os.path.isdir(home_filepath):
file_type = "folder"
elif os.path.islink(home_filepath):
file_type = "link"
else:
raise ValueError("Unsupported file: {}".format(mackup_filepath))
if utils.confirm(
"You already have a {} named {} in your"
" home.\nDo you want to replace it with"
" your backup?".format(file_type, filename)
):
utils.delete(home_filepath)
utils.link(mackup_filepath, home_filepath)
else:
utils.link(mackup_filepath, home_filepath)
elif self.verbose:
if os.path.exists(home_filepath):
print(
"Doing nothing\n {}\n already linked by\n {}".format(
mackup_filepath, home_filepath
)
)
elif os.path.islink(home_filepath):
print(
"Doing nothing\n {}\n "
"is a broken link, you might want to fix it.".format(
home_filepath
)
)
else:
print(
"Doing nothing\n {}\n does not exist".format(mackup_filepath)
)
def uninstall(self):
"""
Uninstall Mackup.
Restore any file where it was before the 1st Mackup backup.
Algorithm:
for each file in config
if mackup/file exists
if home/file exists
delete home/file
copy mackup/file home/file
delete the mackup folder
print how to delete mackup
"""
# For each file used by the application
for filename in self.files:
(home_filepath, mackup_filepath) = self.getFilepaths(filename)
# If the mackup file exists
if os.path.isfile(mackup_filepath) or os.path.isdir(mackup_filepath):
# Check if there is a corresponding file in the home folder
if os.path.exists(home_filepath):
if self.verbose:
print(
"Reverting {}\n at {} ...".format(
mackup_filepath, home_filepath
)
)
else:
print("Reverting {} ...".format(filename))
if self.dry_run:
continue
# If there is, delete it as we are gonna copy the Dropbox
# one there
utils.delete(home_filepath)
# Copy the Dropbox file to the home folder
utils.copy(mackup_filepath, home_filepath)
elif self.verbose:
print("Doing nothing, {} does not exist".format(mackup_filepath))
| 10,035 | Python | .py | 233 | 25.652361 | 88 | 0.474261 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,725 | main.py | lra_mackup/mackup/main.py | """Mackup.
Keep your application settings in sync.
Copyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>
Usage:
mackup list
mackup [options] backup
mackup [options] restore
mackup show <application>
mackup [options] uninstall
mackup (-h | --help)
mackup --version
Options:
-h --help Show this screen.
-f --force Force every question asked to be answered with "Yes".
-r --root Allow mackup to be run as superuser.
-n --dry-run Show steps without executing.
-v --verbose Show additional details.
--version Show version.
Modes of action:
1. list: display a list of all supported applications.
2. backup: sync your conf files to your synced storage, use this the 1st time
you use Mackup.
3. restore: link the conf files already in your synced storage on your system,
use it on any new system you use.
4. uninstall: reset everything as it was before using Mackup.
By default, Mackup syncs all application data via
Dropbox, but may be configured to exclude applications or use a different
backend with a .mackup.cfg file.
See https://github.com/lra/mackup/tree/master/doc for more information.
"""
from docopt import docopt
from .appsdb import ApplicationsDatabase
from .application import ApplicationProfile
from .constants import MACKUP_APP_NAME, VERSION
from .mackup import Mackup
from . import utils
import sys
class ColorFormatCodes:
BLUE = "\033[34m"
BOLD = "\033[1m"
NORMAL = "\033[0m"
def header(str):
return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL
def bold(str):
return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL
def main():
"""Main function."""
# Get the command line arg
args = docopt(__doc__, version="Mackup {}".format(VERSION))
mckp = Mackup()
app_db = ApplicationsDatabase()
def printAppHeader(app_name):
if verbose:
print(("\n{0} {1} {0}").format(header("---"), bold(app_name)))
# If we want to answer mackup with "yes" for each question
if args["--force"]:
utils.FORCE_YES = True
# Allow mackup to be run as root
if args["--root"]:
utils.CAN_RUN_AS_ROOT = True
dry_run = args["--dry-run"]
verbose = args["--verbose"]
if args["backup"]:
# Check the env where the command is being run
mckp.check_for_usable_backup_env()
# Backup each application
for app_name in sorted(mckp.get_apps_to_backup()):
app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
printAppHeader(app_name)
app.backup()
elif args["restore"]:
# Check the env where the command is being run
mckp.check_for_usable_restore_env()
# Restore the Mackup config before any other config, as we might need
# it to know about custom settings
mackup_app = ApplicationProfile(
mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
)
printAppHeader(MACKUP_APP_NAME)
mackup_app.restore()
# Initialize again the apps db, as the Mackup config might have changed
# it
mckp = Mackup()
app_db = ApplicationsDatabase()
# Restore the rest of the app configs, using the restored Mackup config
app_names = mckp.get_apps_to_backup()
# Mackup has already been done
app_names.discard(MACKUP_APP_NAME)
for app_name in sorted(app_names):
app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
printAppHeader(app_name)
app.restore()
elif args["uninstall"]:
# Check the env where the command is being run
mckp.check_for_usable_restore_env()
if dry_run or (
utils.confirm(
"You are going to uninstall Mackup.\n"
"Every configuration file, setting and dotfile"
" managed by Mackup will be unlinked and copied back"
" to their original place, in your home folder.\n"
"Are you sure?"
)
):
# Uninstall the apps except Mackup, which we'll uninstall last, to
# keep the settings as long as possible
app_names = mckp.get_apps_to_backup()
app_names.discard(MACKUP_APP_NAME)
for app_name in sorted(app_names):
app = ApplicationProfile(
mckp, app_db.get_files(app_name), dry_run, verbose
)
printAppHeader(app_name)
app.uninstall()
# Restore the Mackup config before any other config, as we might
# need it to know about custom settings
mackup_app = ApplicationProfile(
mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
)
mackup_app.uninstall()
# Delete the Mackup folder in Dropbox
# Don't delete this as there might be other Macs that aren't
# uninstalled yet
# delete(mckp.mackup_folder)
print(
"\n"
"All your files have been put back into place. You can now"
" safely uninstall Mackup.\n"
"\n"
"Thanks for using Mackup!"
)
elif args["list"]:
# Display the list of supported applications
mckp.check_for_usable_environment()
output = "Supported applications:\n"
for app_name in sorted(app_db.get_app_names()):
output += " - {}\n".format(app_name)
output += "\n"
output += "{} applications supported in Mackup v{}".format(
len(app_db.get_app_names()), VERSION
)
print(output)
elif args["show"]:
mckp.check_for_usable_environment()
app_name = args["<application>"]
# Make sure the app exists
if app_name not in app_db.get_app_names():
sys.exit("Unsupported application: {}".format(app_name))
print("Name: {}".format(app_db.get_name(app_name)))
print("Configuration files:")
for file in app_db.get_files(app_name):
print(" - {}".format(file))
# Delete the tmp folder
mckp.clean_temp_folder()
| 6,267 | Python | .py | 154 | 32.311688 | 88 | 0.626441 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,726 | mackup.py | lra_mackup/mackup/mackup.py | """
The Mackup Class.
The Mackup class is keeping all the state that Mackup needs to keep during its
runtime. It also provides easy to use interface that is used by the Mackup UI.
The only UI for now is the command line.
"""
import os
import os.path
import shutil
import tempfile
from . import utils
from . import config
from . import appsdb
class Mackup(object):
"""Main Mackup class."""
def __init__(self):
"""Mackup Constructor."""
self._config = config.Config()
self.mackup_folder = self._config.fullpath
self.temp_folder = tempfile.mkdtemp(prefix="mackup_tmp_")
def check_for_usable_environment(self):
"""Check if the current env is usable and has everything's required."""
# Allow only explicit superuser usage
if os.geteuid() == 0 and not utils.CAN_RUN_AS_ROOT:
utils.error(
"Running Mackup as superuser can be dangerous."
" Don't do it unless you know what you're doing!"
" Run mackup --help for guidance."
)
# Do we have a folder set to save Mackup content into?
if not os.path.isdir(self._config.path):
utils.error(
"Unable to find the storage folder: {}".format(self._config.path)
)
# Is Sublime Text running?
# if is_process_running('Sublime Text'):
# error("Sublime Text is running. It is known to cause problems"
# " when Sublime Text is running while I backup or restore"
# " its configuration files. Please close Sublime Text and"
# " run me again.")
def check_for_usable_backup_env(self):
"""Check if the current env can be used to back up files."""
self.check_for_usable_environment()
self.create_mackup_home()
def check_for_usable_restore_env(self):
"""Check if the current env can be used to restore files."""
self.check_for_usable_environment()
if not os.path.isdir(self.mackup_folder):
utils.error(
"Unable to find the Mackup folder: {}\n"
"You might want to back up some files or get your"
" storage directory synced first.".format(self.mackup_folder)
)
def clean_temp_folder(self):
"""Delete the temp folder and files created while running."""
shutil.rmtree(self.temp_folder)
def create_mackup_home(self):
"""If the Mackup home folder does not exist, create it."""
if not os.path.isdir(self.mackup_folder):
if utils.confirm(
"Mackup needs a directory to store your"
" configuration files\n"
"Do you want to create it now? <{}>".format(self.mackup_folder)
):
os.makedirs(self.mackup_folder)
else:
utils.error("Mackup can't do anything without a home =(")
def get_apps_to_backup(self):
"""
Get the list of applications that should be backed up by Mackup.
It's the list of allowed apps minus the list of ignored apps.
Returns:
(set) List of application names to back up
"""
# Instantiate the app db
app_db = appsdb.ApplicationsDatabase()
# If a list of apps to sync is specify, we only allow those
# Or we allow every supported app by default
apps_to_backup = self._config.apps_to_sync or app_db.get_app_names()
# Remove the specified apps to ignore
for app_name in self._config.apps_to_ignore:
apps_to_backup.discard(app_name)
return apps_to_backup
| 3,682 | Python | .py | 83 | 35.156627 | 81 | 0.617943 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,727 | test_utils.py | lra_mackup/tests/test_utils.py | import os
import tempfile
import unittest
import stat
# from unittest.mock import patch
from mackup import utils
def convert_to_octal(file_name):
"""
Using os.stat, returns file permissions (read, write, execute) as an octal.
"""
return oct(os.stat(file_name)[stat.ST_MODE])[-3:]
class TestMackup(unittest.TestCase):
def test_confirm_yes(self):
# Override the input used in utils
def custom_input(_):
return "Yes"
utils.input = custom_input
assert utils.confirm("Answer Yes to this question")
def test_confirm_no(self):
# Override the input used in utils
def custom_input(_):
return "No"
utils.input = custom_input
assert not utils.confirm("Answer No to this question")
def test_confirm_typo(self):
# Override the input used in utils
def custom_input(_):
return "No"
utils.input = custom_input
assert not utils.confirm("Answer garbage to this question")
def test_delete_file(self):
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False)
tfpath = tfile.name
tfile.close()
# Make sure the created file exists
assert os.path.isfile(tfpath)
# Check if mackup can really delete it
utils.delete(tfpath)
assert not os.path.exists(tfpath)
def test_delete_folder_recursively(self):
# Create a tmp folder
tfpath = tempfile.mkdtemp()
# Let's put a file in it just for fun
tfile = tempfile.NamedTemporaryFile(dir=tfpath, delete=False)
filepath = tfile.name
tfile.close()
# Let's put another folder in it
subfolder_path = tempfile.mkdtemp(dir=tfpath)
# And a file in the subfolder
tfile = tempfile.NamedTemporaryFile(dir=subfolder_path, delete=False)
subfilepath = tfile.name
tfile.close()
# Make sure the created files and folders exists
assert os.path.isdir(tfpath)
assert os.path.isfile(filepath)
assert os.path.isdir(subfolder_path)
assert os.path.isfile(subfilepath)
# Check if mackup can really delete it
utils.delete(tfpath)
assert not os.path.exists(tfpath)
assert not os.path.exists(filepath)
assert not os.path.exists(subfolder_path)
assert not os.path.exists(subfilepath)
def test_copy_file(self):
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False)
srcfile = tfile.name
tfile.close()
# Create a tmp folder
dstpath = tempfile.mkdtemp()
# Set the destination filename
dstfile = os.path.join(dstpath, "subfolder", os.path.basename(srcfile))
# Make sure the source file and destination folder exist and the
# destination file doesn't yet exist
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Check if mackup can copy it
utils.copy(srcfile, dstfile)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert os.path.exists(dstfile)
# Let's clean up
utils.delete(dstpath)
def test_copy_fail(self):
# Create a tmp FIFO file
tfile = tempfile.NamedTemporaryFile()
srcfile = tfile.name
tfile.close()
os.mkfifo(srcfile)
# Create a tmp folder
dstpath = tempfile.mkdtemp()
# Set the destination filename
dstfile = os.path.join(dstpath, "subfolder", os.path.basename(srcfile))
# Make sure the source file and destination folder exist and the
# destination file doesn't yet exist
assert not os.path.isfile(srcfile)
assert stat.S_ISFIFO(os.stat(srcfile).st_mode)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Check if mackup can copy it
self.assertRaises(ValueError, utils.copy, srcfile, dstfile)
assert not os.path.isfile(srcfile)
assert stat.S_ISFIFO(os.stat(srcfile).st_mode)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Let's clean up
utils.delete(srcfile)
utils.delete(dstpath)
def test_copy_file_to_dir(self):
"""Copies a file to a destination folder that already exists."""
# Create a tmp folder
srcpath = tempfile.mkdtemp()
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False, dir=srcpath)
srcfile = tfile.name
tfile.close()
# Create a tmp folder
dstpath = tempfile.mkdtemp()
# Set the destination filename
srcpath_basename = os.path.basename(srcpath)
dstfile = os.path.join(
dstpath, "subfolder", srcpath_basename, os.path.basename(srcfile)
)
# Make sure the source file and destination folder exist and the
# destination file doesn't yet exist
assert os.path.isdir(srcpath)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Check if mackup can copy it
utils.copy(srcfile, dstfile)
assert os.path.isdir(srcpath)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert os.path.exists(dstfile)
# Let's clean up
utils.delete(srcpath)
utils.delete(dstpath)
def test_copy_dir(self):
"""Copies a directory recursively to the destination path."""
# Create a tmp folder
srcpath = tempfile.mkdtemp()
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False, dir=srcpath)
srcfile = tfile.name
tfile.close()
# Create a tmp folder
dstpath = tempfile.mkdtemp()
# Set the destination filename
srcpath_basename = os.path.basename(srcpath)
dstfile = os.path.join(dstpath, srcpath_basename, os.path.basename(srcfile))
# Make sure the source file and destination folder exist and the
# destination file doesn't yet exist
assert os.path.isdir(srcpath)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Check if mackup can copy it
utils.copy(srcpath, dstfile)
assert os.path.isdir(srcpath)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert os.path.exists(dstfile)
# Let's clean up
utils.delete(srcpath)
utils.delete(dstpath)
def test_link_file(self):
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False)
srcfile = tfile.name
tfile.close()
# Create a tmp folder
dstpath = tempfile.mkdtemp()
# Set the destination filename
dstfile = os.path.join(dstpath, "subfolder", os.path.basename(srcfile))
# Make sure the source file and destination folder exist and the
# destination file doesn't yet exist
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert not os.path.exists(dstfile)
# Check if mackup can link it and the link points to the correct place
utils.link(srcfile, dstfile)
assert os.path.isfile(srcfile)
assert os.path.isdir(dstpath)
assert os.path.exists(dstfile)
assert os.readlink(dstfile) == srcfile
# Let's clean up
utils.delete(dstpath)
def test_chmod_file(self):
# Create a tmp file
tfile = tempfile.NamedTemporaryFile(delete=False)
file_name = tfile.name
# Create a tmp directory with a sub folder
dir_name = tempfile.mkdtemp()
nested_dir = tempfile.mkdtemp(dir=dir_name)
# # File Tests
# Change the tmp file stats to S_IWRITE (200), write access only
os.chmod(file_name, stat.S_IWRITE)
assert convert_to_octal(file_name) == "200"
# Check to make sure that utils.chmod changes the bits to 600,
# which is read and write access for the owner
utils.chmod(file_name)
assert convert_to_octal(file_name) == "600"
# # Directory Tests
# Change the tmp folder stats to S_IREAD (400), read access only
os.chmod(dir_name, stat.S_IREAD)
assert convert_to_octal(dir_name) == "400"
# Check to make sure that utils.chmod changes the bits of all
# directories to 700, which is read, write, and execute access for the
# owner
utils.chmod(dir_name)
assert convert_to_octal(dir_name) == "700"
assert convert_to_octal(nested_dir) == "700"
# Use an "unsupported file type". In this case, /dev/null
self.assertRaises(ValueError, utils.chmod, os.devnull)
def test_error(self):
test_string = "Hello World"
self.assertRaises(SystemExit, utils.error, test_string)
def test_failed_backup_location(self):
"""
Tests for the error that should occur if the backup folder cannot be
found for Dropbox and Google
"""
# Hack to make our home folder some temporary folder
temp_home = tempfile.mkdtemp()
utils.os.environ["HOME"] = temp_home
# Check for the missing Dropbox folder
assert not os.path.exists(os.path.join(temp_home, ".dropbox/host.db"))
self.assertRaises(SystemExit, utils.get_dropbox_folder_location)
# Check for the missing Google Drive folder
assert not os.path.exists(
os.path.join(
temp_home, "Library/Application Support/Google/Drive/sync_config.db"
)
)
self.assertRaises(SystemExit, utils.get_google_drive_folder_location)
def test_is_process_running(self):
# A pgrep that has one letter and a wildcard will always return id 1
assert utils.is_process_running("a*")
assert not utils.is_process_running("some imaginary process")
def test_can_file_be_synced_on_current_platform(self):
# Any file path will do, even if it doesn't exist
path = "some/file"
# Force the Mac OSX Test using lambda magic
utils.platform.system = lambda *args: utils.constants.PLATFORM_DARWIN
assert utils.can_file_be_synced_on_current_platform(path)
# Force the Linux Test using lambda magic
utils.platform.system = lambda *args: utils.constants.PLATFORM_LINUX
assert utils.can_file_be_synced_on_current_platform(path)
# Try to use the library path on Linux, which shouldn't work
path = os.path.join(os.environ["HOME"], "Library/")
assert not utils.can_file_be_synced_on_current_platform(path)
| 10,847 | Python | .py | 254 | 33.874016 | 84 | 0.650361 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,728 | test_main.py | lra_mackup/tests/test_main.py | import unittest
from mackup import main
class TestMain(unittest.TestCase):
def test_main_header(self):
assert main.header("blah") == "\033[34mblah\033[0m"
def test_main_bold(self):
assert main.bold("blah") == "\033[1mblah\033[0m"
| 257 | Python | .py | 7 | 31.857143 | 59 | 0.688259 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,729 | test_config.py | lra_mackup/tests/test_config.py | import unittest
import os.path
from mackup.constants import (
ENGINE_DROPBOX,
ENGINE_GDRIVE,
ENGINE_ICLOUD,
ENGINE_FS,
)
from mackup.config import Config, ConfigError
class TestConfig(unittest.TestCase):
def setUp(self):
realpath = os.path.dirname(os.path.realpath(__file__))
os.environ["HOME"] = os.path.join(realpath, "fixtures")
def test_config_no_config(self):
cfg = Config()
# Should should do the same as the default, empty configuration
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
print(cfg.path)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/Mackup"
assert cfg.apps_to_ignore == set()
assert cfg.apps_to_sync == set()
def test_config_empty(self):
cfg = Config("mackup-empty.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/Mackup"
assert cfg.apps_to_ignore == set()
assert cfg.apps_to_sync == set()
def test_config_engine_dropbox(self):
cfg = Config("mackup-engine-dropbox.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "some_weirld_name"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/some_weirld_name"
assert cfg.apps_to_ignore == set()
assert cfg.apps_to_sync == set()
def test_config_engine_filesystem_absolute(self):
cfg = Config("mackup-engine-file_system-absolute.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_FS
assert isinstance(cfg.path, str)
assert cfg.path == "/some/absolute/folder"
assert isinstance(cfg.directory, str)
assert cfg.directory == "custom_folder"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/some/absolute/folder/custom_folder"
assert cfg.apps_to_ignore == set(["subversion", "sequel-pro"])
assert cfg.apps_to_sync == set()
def test_config_engine_filesystem(self):
cfg = Config("mackup-engine-file_system.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_FS
assert isinstance(cfg.path, str)
assert cfg.path.endswith(
os.path.join(os.environ["HOME"], "some/relative/folder")
)
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == os.path.join(
os.environ["HOME"], "some/relative/folder", "Mackup"
)
assert cfg.apps_to_ignore == set()
assert cfg.apps_to_sync == set(["sabnzbd", "sublime-text-3", "x11"])
def test_config_engine_google_drive(self):
cfg = Config("mackup-engine-google_drive.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_GDRIVE
assert isinstance(cfg.path, str)
assert cfg.path == "/Users/whatever/Google Drive"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath.endswith("/Google Drive/Mackup")
assert cfg.apps_to_ignore == set(["subversion", "sequel-pro", "sabnzbd"])
assert cfg.apps_to_sync == set(["sublime-text-3", "x11", "sabnzbd"])
def test_config_engine_icloud(self):
cfg = Config("mackup-engine-icloud.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_ICLOUD
assert isinstance(cfg.path, str)
assert cfg.path == os.path.expanduser(
"~/Library/Mobile Documents/com~apple~CloudDocs/"
)
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath.endswith("/com~apple~CloudDocs/Mackup")
assert cfg.apps_to_ignore == set(["subversion", "sequel-pro", "sabnzbd"])
assert cfg.apps_to_sync == set(["sublime-text-3", "x11", "sabnzbd"])
def test_config_engine_filesystem_no_path(self):
with self.assertRaises(ConfigError):
Config("mackup-engine-file_system-no_path.cfg")
def test_config_engine_unknown(self):
with self.assertRaises(ConfigError):
Config("mackup-engine-unknown.cfg")
def test_config_apps_to_ignore(self):
cfg = Config("mackup-apps_to_ignore.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/Mackup"
assert cfg.apps_to_ignore == set(["subversion", "sequel-pro", "sabnzbd"])
assert cfg.apps_to_sync == set()
def test_config_apps_to_sync(self):
cfg = Config("mackup-apps_to_sync.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/Mackup"
assert cfg.apps_to_ignore == set()
assert cfg.apps_to_sync == set(["sabnzbd", "sublime-text-3", "x11"])
def test_config_apps_to_ignore_and_sync(self):
cfg = Config("mackup-apps_to_ignore_and_sync.cfg")
assert isinstance(cfg.engine, str)
assert cfg.engine == ENGINE_DROPBOX
assert isinstance(cfg.path, str)
assert cfg.path == "/home/some_user/Dropbox"
assert isinstance(cfg.directory, str)
assert cfg.directory == "Mackup"
assert isinstance(cfg.fullpath, str)
assert cfg.fullpath == "/home/some_user/Dropbox/Mackup"
assert cfg.apps_to_ignore == set(["subversion", "sequel-pro", "sabnzbd"])
assert cfg.apps_to_sync == set(["sabnzbd", "sublime-text-3", "x11", "vim"])
def test_config_old_config(self):
self.assertRaises(SystemExit, Config, "mackup-old-config.cfg")
| 7,043 | Python | .py | 149 | 38.496644 | 83 | 0.646602 | lra/mackup | 14,505 | 929 | 359 | GPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,730 | setup.py | numenta_nupic-legacy/setup.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Installation script for Python nupic package."""
import os
import pkg_resources
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as BaseTestCommand
REPO_DIR = os.path.dirname(os.path.realpath(__file__))
def getVersion():
"""
Get version from local file.
"""
with open(os.path.join(REPO_DIR, "VERSION"), "r") as versionFile:
return versionFile.read().strip()
def nupicBindingsPrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
"""
try:
nupicDistribution = pkg_resources.get_distribution("nupic.bindings")
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
# A pre-release dev version of nupic.bindings is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of nupic.bindings will be handled by
# setuptools by default
return False
def parse_file(requirementFile):
try:
return [
line.strip()
for line in open(requirementFile).readlines()
if not line.startswith("#")
]
except IOError:
return []
class TestCommand(BaseTestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
BaseTestCommand.initialize_options(self)
self.pytest_args = ["unit"] # pylint: disable=W0201
def finalize_options(self):
BaseTestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
cwd = os.getcwd()
try:
os.chdir("tests")
errno = pytest.main(self.pytest_args)
finally:
os.chdir(cwd)
sys.exit(errno)
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "requirements.txt")
requirements = parse_file(requirementsPath)
if nupicBindingsPrereleaseInstalled():
# User has a pre-release version of nupic.bindings installed, which is only
# possible if the user installed and built nupic.bindings from source and
# it is up to the user to decide when to update nupic.bindings. We'll
# quietly remove the entry in requirements.txt so as to not conflate the
# two.
requirements = [req for req in requirements if "nupic.bindings" not in req]
return requirements
if __name__ == "__main__":
requirements = findRequirements()
setup(
name="nupic",
version=getVersion(),
install_requires=requirements,
package_dir = {"": "src"},
packages=find_packages("src"),
namespace_packages = ["nupic"],
package_data={
"nupic.support": ["nupic-default.xml",
"nupic-logging.conf"],
"nupic": ["README.md", "LICENSE.txt"],
"nupic.data": ["*.json"],
"nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"],
"nupic.frameworks.opf.jsonschema": ["*.json"],
"nupic.swarming.exp_generator": ["*.json", "*.tpl"],
"nupic.swarming.jsonschema": ["*.json"],
"nupic.datafiles": ["*.csv", "*.txt"],
},
cmdclass = {"test": TestCommand},
include_package_data=True,
zip_safe=False,
extras_require = {
# Default requirement based on system type
":platform_system=='Linux' or platform_system=='Darwin'":
["pycapnp==0.6.3"],
# Superseded by platform_system-conditional requirement, but keeping
# empty extra for compatibility as recommended by setuptools doc.
"capnp": [],
"viz": ["networkx", "matplotlib", "pygraphviz"]
},
description="Numenta Platform for Intelligent Computing",
author="Numenta",
author_email="help@numenta.org",
url="https://github.com/numenta/nupic",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
# It has to be "5 - Production/Stable" or else pypi rejects it!
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description=(
"Numenta Platform for Intelligent Computing: a machine intelligence "
"platform that implements the HTM learning algorithms. HTM is a "
"detailed computational theory of the neocortex. At the core of HTM "
"are time-based continuous learning algorithms that store and recall "
"spatial and temporal patterns. NuPIC is suited to a variety of "
"problems, particularly anomaly detection and prediction of streaming "
"data sources.\n\n"
"For more information, see http://numenta.org or the NuPIC wiki at "
"https://github.com/numenta/nupic/wiki.")
)
| 6,107 | Python | .py | 150 | 35.9 | 91 | 0.682317 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,731 | NuPIC Walkthrough.ipynb | numenta_nupic-legacy/examples/NuPIC Walkthrough.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Encoders\n",
"\n",
"* Scalar\n",
"* Date/time\n",
"* Category\n",
"* Multi"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"import numpy"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.encoders import ScalarEncoder\n",
"\n",
"ScalarEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"4 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"5 = [0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"# 22 bits with 3 active representing values 0 to 100\n",
"# clipInput=True makes values >100 encode the same as 100 (instead of throwing a ValueError)\n",
"# forced=True allows small values for `n` and `w`\n",
"enc = ScalarEncoder(n=22, w=3, minval=2.5, maxval=97.5, clipInput=True, forced=True)\n",
"print \"3 =\", enc.encode(3)\n",
"print \"4 =\", enc.encode(4)\n",
"print \"5 =\", enc.encode(5)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"100 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n",
"1000 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"# Encode maxval\n",
"print \"100 =\", enc.encode(100)\n",
"# See that any larger number gets the same encoding\n",
"print \"1000 =\", enc.encode(1000)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder\n",
"\n",
"RandomDistributedScalarEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n",
"4 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n",
"5 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1]\n",
"\n",
"100 = [0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n",
"1000 = [0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0]\n"
]
}
],
"source": [
"# 21 bits with 3 active with buckets of size 5\n",
"rdse = RandomDistributedScalarEncoder(n=21, w=3, resolution=5, offset=2.5)\n",
"\n",
"print \"3 = \", rdse.encode(3)\n",
"print \"4 = \", rdse.encode(4)\n",
"print \"5 = \", rdse.encode(5)\n",
"print\n",
"print \"100 = \", rdse.encode(100)\n",
"print \"1000 =\", rdse.encode(1000)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"import datetime\n",
"from nupic.encoders.date import DateEncoder\n",
"\n",
"DateEncoder?"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"now = [0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0]\n",
"next month = [0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"xmas = [1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"de = DateEncoder(season=5)\n",
"\n",
"now = datetime.datetime.strptime(\"2014-05-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"now = \", de.encode(now)\n",
"nextMonth = datetime.datetime.strptime(\"2014-06-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"next month =\", de.encode(nextMonth)\n",
"xmas = datetime.datetime.strptime(\"2014-12-25 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n",
"print \"xmas = \", de.encode(xmas)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"cat = [0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"dog = [0 0 0 0 0 0 1 1 1 0 0 0 0 0 0]\n",
"monkey = [0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n",
"slow loris = [0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n"
]
}
],
"source": [
"from nupic.encoders.category import CategoryEncoder\n",
"\n",
"categories = (\"cat\", \"dog\", \"monkey\", \"slow loris\")\n",
"encoder = CategoryEncoder(w=3, categoryList=categories, forced=True)\n",
"cat = encoder.encode(\"cat\")\n",
"dog = encoder.encode(\"dog\")\n",
"monkey = encoder.encode(\"monkey\")\n",
"loris = encoder.encode(\"slow loris\")\n",
"print \"cat = \", cat\n",
"print \"dog = \", dog\n",
"print \"monkey = \", monkey\n",
"print \"slow loris =\", loris"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print encoder.encode(None)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print encoder.encode(\"unknown\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"({'category': ([(1, 1)], 'cat')}, ['category'])\n"
]
}
],
"source": [
"print encoder.decode(cat)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"({'category': ([(1, 2)], 'cat, dog')}, ['category'])\n"
]
}
],
"source": [
"catdog = numpy.array([0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0])\n",
"print encoder.decode(catdog)"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Spatial Pooler"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.research.spatial_pooler import SpatialPooler\n",
"\n",
"print SpatialPooler?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'nupic.research.spatial_pooler.SpatialPooler'>\n"
]
}
],
"source": [
"print SpatialPooler"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"15\n",
"[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"print len(cat)\n",
"print cat"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n",
"[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n"
]
}
],
"source": [
"sp = SpatialPooler(inputDimensions=(15,),\n",
" columnDimensions=(4,),\n",
" potentialRadius=15,\n",
" numActiveColumnsPerInhArea=1,\n",
" globalInhibition=True,\n",
" synPermActiveInc=0.03,\n",
" potentialPct=1.0)\n",
"import numpy\n",
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1 0 0 0]\n"
]
}
],
"source": [
"output = numpy.zeros((4,), dtype=\"int\")\n",
"sp.compute(cat, learn=True, activeArray=output)\n",
"print output"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for _ in xrange(20):\n",
" sp.compute(cat, learn=True, activeArray=output)"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n",
"[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n"
]
}
],
"source": [
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for _ in xrange(200):\n",
" sp.compute(cat, learn=True, activeArray=output)\n",
" sp.compute(dog, learn=True, activeArray=output)\n",
" sp.compute(monkey, learn=True, activeArray=output)\n",
" sp.compute(loris, learn=True, activeArray=output)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n",
"[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n",
"[0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n",
"[0 0 0 0 0 0 1 1 1 0 0 0 1 1 1]\n"
]
}
],
"source": [
"for column in xrange(4):\n",
" connected = numpy.zeros((15,), dtype=\"int\")\n",
" sp.getConnectedSynapses(column, connected)\n",
" print connected"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 0 0 1 1 0 1 0 0 0 0 0 0 0 0]\n"
]
}
],
"source": [
"noisyCat = numpy.zeros((15,), dtype=\"uint32\")\n",
"noisyCat[3] = 1\n",
"noisyCat[4] = 1\n",
"# This is part of dog!\n",
"noisyCat[6] = 1\n",
"print noisyCat"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0 1 0 0]\n"
]
}
],
"source": [
"sp.compute(noisyCat, learn=False, activeArray=output)\n",
"print output # matches cat!"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Temporal Memory (a.k.a. Sequence Memory, Temporal Pooler)\n",
"\n",
"From: `examples/tm/hello_tm.py`"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.research.BacktrackingTM import BacktrackingTM\n",
"\n",
"BacktrackingTM?"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 1: create Temporal Pooler instance with appropriate parameters\n",
"tm = BacktrackingTM(numberOfCols=50, cellsPerColumn=2,\n",
" initialPerm=0.5, connectedPerm=0.5,\n",
" minThreshold=10, newSynapseCount=10,\n",
" permanenceInc=0.1, permanenceDec=0.0,\n",
" activationThreshold=8,\n",
" globalDecay=0, burnIn=1,\n",
" checkSynapseConsistency=False,\n",
" pamLength=10)"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 2: create input vectors to feed to the temporal memory. Each input vector\n",
"# must be numberOfCols wide. Here we create a simple sequence of 5 vectors\n",
"# representing the sequence A -> B -> C -> D -> E\n",
"x = numpy.zeros((5, tm.numberOfCols), dtype=\"uint32\")\n",
"x[0,0:10] = 1 # Input SDR representing \"A\", corresponding to columns 0-9\n",
"x[1,10:20] = 1 # Input SDR representing \"B\", corresponding to columns 10-19\n",
"x[2,20:30] = 1 # Input SDR representing \"C\", corresponding to columns 20-29\n",
"x[3,30:40] = 1 # Input SDR representing \"D\", corresponding to columns 30-39\n",
"x[4,40:50] = 1 # Input SDR representing \"E\", corresponding to columns 40-49"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Step 3: send this simple sequence to the temporal memory for learning\n",
"# We repeat the sequence 10 times\n",
"for i in range(10):\n",
"\n",
" # Send each letter in the sequence in order\n",
" for j in range(5):\n",
"\n",
" # The compute method performs one step of learning and/or inference. Note:\n",
" # here we just perform learning but you can perform prediction/inference and\n",
" # learning in the same step if you want (online learning).\n",
" tm.compute(x[j], enableLearn = True, enableInference = False)\n",
"\n",
" # This function prints the segments associated with every cell.$$$$\n",
" # If you really want to understand the TP, uncomment this line. By following\n",
" # every step you can get an excellent understanding for exactly how the TP\n",
" # learns.\n",
" #tm.printCells()\n",
"\n",
" # The reset command tells the TM that a sequence just ended and essentially\n",
" # zeros out all the states. It is not strictly necessary but it's a bit\n",
" # messier without resets, and the TM learns quicker with resets.\n",
" tm.reset()"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"-------- A -----------\n",
"Raw input vector\n",
"1111111111 0000000000 0000000000 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"1111111111 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[10 11 12 13 14 15 16 17 18 19] \n",
"\n",
"\n",
"-------- B -----------\n",
"Raw input vector\n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[20 21 22 23 24 25 26 27 28 29] \n",
"\n",
"\n",
"-------- C -----------\n",
"Raw input vector\n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[30 31 32 33 34 35 36 37 38 39] \n",
"\n",
"\n",
"-------- D -----------\n",
"Raw input vector\n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[40 41 42 43 44 45 46 47 48 49] \n",
"\n",
"\n",
"-------- E -----------\n",
"Raw input vector\n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"\n",
"All the active and predicted cells:\n",
"\n",
"Inference Active state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"Inference Predicted state\n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[] \n"
]
}
],
"source": [
"# Step 4: send the same sequence of vectors and look at predictions made by\n",
"# temporal memory\n",
"\n",
"# Utility routine for printing the input vector\n",
"def formatRow(x):\n",
" s = ''\n",
" for c in range(len(x)):\n",
" if c > 0 and c % 10 == 0:\n",
" s += ' '\n",
" s += str(x[c])\n",
" s += ' '\n",
" return s\n",
"\n",
"for j in range(5):\n",
" print \"\\n\\n--------\",\"ABCDE\"[j],\"-----------\"\n",
" print \"Raw input vector\\n\",formatRow(x[j])\n",
"\n",
" # Send each vector to the TP, with learning turned off\n",
" tm.compute(x[j], enableLearn=False, enableInference=True)\n",
"\n",
" # This method prints out the active state of each cell followed by the\n",
" # predicted state of each cell. For convenience the cells are grouped\n",
" # 10 at a time. When there are multiple cells per column the printout\n",
" # is arranged so the cells in a column are stacked together\n",
" #\n",
" # What you should notice is that the columns where active state is 1\n",
" # represent the SDR for the current input pattern and the columns where\n",
" # predicted state is 1 represent the SDR for the next expected pattern\n",
" print \"\\nAll the active and predicted cells:\"\n",
" tm.printStates(printPrevious=False, printLearnState=False)\n",
"\n",
" # tm.getPredictedState() gets the predicted cells.\n",
" # predictedCells[c][i] represents the state of the i'th cell in the c'th\n",
" # column. To see if a column is predicted, we can simply take the OR\n",
" # across all the cells in that column. In numpy we can do this by taking\n",
" # the max along axis 1.\n",
" print \"\\n\\nThe following columns are predicted by the temporal memory. This\"\n",
" print \"should correspond to columns in the *next* item in the sequence.\"\n",
" predictedCells = tm.getPredictedState()\n",
" print formatRow(predictedCells.max(axis=1).nonzero())"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Networks and Regions\n",
"\n",
"See slides."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Online Prediction Framework\n",
"\n",
"* CLAModel\n",
"* OPF Client\n",
"* Swarming\n",
"\n",
"# CLAModel\n",
"\n",
"From `examples/opf/clients/hotgym/simple/hotgym.py`"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Model Parameters\n",
"\n",
"`MODEL_PARAMS` have all of the parameters for the CLA model and subcomponents"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Model Params!\n",
"MODEL_PARAMS = {\n",
" # Type of model that the rest of these parameters apply to.\n",
" 'model': \"HTMPrediction\",\n",
"\n",
" # Version that specifies the format of the config.\n",
" 'version': 1,\n",
"\n",
" # Intermediate variables used to compute fields in modelParams and also\n",
" # referenced from the control section.\n",
" 'aggregationInfo': { 'days': 0,\n",
" 'fields': [('consumption', 'sum')],\n",
" 'hours': 1,\n",
" 'microseconds': 0,\n",
" 'milliseconds': 0,\n",
" 'minutes': 0,\n",
" 'months': 0,\n",
" 'seconds': 0,\n",
" 'weeks': 0,\n",
" 'years': 0},\n",
"\n",
" 'predictAheadTime': None,\n",
"\n",
" # Model parameter dictionary.\n",
" 'modelParams': {\n",
" # The type of inference that this model will perform\n",
" 'inferenceType': 'TemporalMultiStep',\n",
"\n",
" 'sensorParams': {\n",
" # Sensor diagnostic output verbosity control;\n",
" # if > 0: sensor region will print out on screen what it's sensing\n",
" # at each step 0: silent; >=1: some info; >=2: more info;\n",
" # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n",
" 'verbosity' : 0,\n",
"\n",
" # Include the encoders we use\n",
" 'encoders': {\n",
" u'timestamp_timeOfDay': {\n",
" 'fieldname': u'timestamp',\n",
" 'name': u'timestamp_timeOfDay',\n",
" 'timeOfDay': (21, 0.5),\n",
" 'type': 'DateEncoder'\n",
" },\n",
" u'timestamp_dayOfWeek': None,\n",
" u'timestamp_weekend': None,\n",
" u'consumption': {\n",
" 'clipInput': True,\n",
" 'fieldname': u'consumption',\n",
" 'maxval': 100.0,\n",
" 'minval': 0.0,\n",
" 'n': 50,\n",
" 'name': u'c1',\n",
" 'type': 'ScalarEncoder',\n",
" 'w': 21\n",
" },\n",
" },\n",
"\n",
" # A dictionary specifying the period for automatically-generated\n",
" # resets from a RecordSensor;\n",
" #\n",
" # None = disable automatically-generated resets (also disabled if\n",
" # all of the specified values evaluate to 0).\n",
" # Valid keys is the desired combination of the following:\n",
" # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n",
" #\n",
" # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n",
" #\n",
" # (value generated from SENSOR_AUTO_RESET)\n",
" 'sensorAutoReset' : None,\n",
" },\n",
"\n",
" 'spEnable': True,\n",
"\n",
" 'spParams': {\n",
" # SP diagnostic output verbosity control;\n",
" # 0: silent; >=1: some info; >=2: more info;\n",
" 'spVerbosity' : 0,\n",
"\n",
" # Spatial Pooler implementation selector, see getSPClass\n",
" # in py/regions/SPRegion.py for details\n",
" # 'py' (default), 'cpp' (speed optimized, new)\n",
" 'spatialImp' : 'cpp',\n",
"\n",
" 'globalInhibition': 1,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" 'inputWidth': 0,\n",
"\n",
" # SP inhibition control (absolute value);\n",
" # Maximum number of active columns in the SP region's output (when\n",
" # there are more, the weaker ones are suppressed)\n",
" 'numActiveColumnsPerInhArea': 40,\n",
"\n",
" 'seed': 1956,\n",
"\n",
" # potentialPct\n",
" # What percent of the columns's receptive field is available\n",
" # for potential synapses. At initialization time, we will\n",
" # choose potentialPct * (2*potentialRadius+1)^2\n",
" 'potentialPct': 0.5,\n",
"\n",
" # The default connected threshold. Any synapse whose\n",
" # permanence value is above the connected threshold is\n",
" # a \"connected synapse\", meaning it can contribute to the\n",
" # cell's firing. Typical value is 0.10. Cells whose activity\n",
" # level before inhibition falls below minDutyCycleBeforeInh\n",
" # will have their own internal synPermConnectedCell\n",
" # threshold set below this default value.\n",
" # (This concept applies to both SP and TM and so 'cells'\n",
" # is correct here as opposed to 'columns')\n",
" 'synPermConnected': 0.1,\n",
"\n",
" 'synPermActiveInc': 0.1,\n",
"\n",
" 'synPermInactiveDec': 0.005,\n",
" },\n",
"\n",
" # Controls whether TM is enabled or disabled;\n",
" # TM is necessary for making temporal predictions, such as predicting\n",
" # the next inputs. Without TP, the model is only capable of\n",
" # reconstructing missing sensor inputs (via SP).\n",
" 'tmEnable' : True,\n",
"\n",
" 'tmParams': {\n",
" # TM diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" # The number of cells (i.e., states), allocated per column.\n",
" 'cellsPerColumn': 32,\n",
"\n",
" 'inputWidth': 2048,\n",
"\n",
" 'seed': 1960,\n",
"\n",
" # Temporal Pooler implementation selector (see _getTPClass in\n",
" # CLARegion.py).\n",
" 'temporalImp': 'cpp',\n",
"\n",
" # New Synapse formation count\n",
" # NOTE: If None, use spNumActivePerInhArea\n",
" #\n",
" # TODO: need better explanation\n",
" 'newSynapseCount': 20,\n",
"\n",
" # Maximum number of synapses per segment\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSynapsesPerSegment': 32,\n",
"\n",
" # Maximum number of segments per cell\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSegmentsPerCell': 128,\n",
"\n",
" # Initial Permanence\n",
" # TODO: need better explanation\n",
" 'initialPerm': 0.21,\n",
"\n",
" # Permanence Increment\n",
" 'permanenceInc': 0.1,\n",
"\n",
" # Permanence Decrement\n",
" # If set to None, will automatically default to tpPermanenceInc\n",
" # value.\n",
" 'permanenceDec' : 0.1,\n",
"\n",
" 'globalDecay': 0.0,\n",
"\n",
" 'maxAge': 0,\n",
"\n",
" # Minimum number of active synapses for a segment to be considered\n",
" # during search for the best-matching segments.\n",
" # None=use default\n",
" # Replaces: tpMinThreshold\n",
" 'minThreshold': 9,\n",
"\n",
" # Segment activation threshold.\n",
" # A segment is active if it has >= tpSegmentActivationThreshold\n",
" # connected synapses that are active due to infActiveState\n",
" # None=use default\n",
" # Replaces: tpActivationThreshold\n",
" 'activationThreshold': 12,\n",
"\n",
" 'outputType': 'normal',\n",
"\n",
" # \"Pay Attention Mode\" length. This tells the TM how many new\n",
" # elements to append to the end of a learned sequence at a time.\n",
" # Smaller values are better for datasets with short sequences,\n",
" # higher values are better for datasets with long sequences.\n",
" 'pamLength': 1,\n",
" },\n",
"\n",
" 'clParams': {\n",
" 'regionName' : 'SDRClassifierRegion',\n",
"\n",
" # Classifier diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" 'verbosity' : 0,\n",
"\n",
" # This controls how fast the classifier learns/forgets. Higher values\n",
" # make it adapt faster and forget older patterns faster.\n",
" 'alpha': 0.005,\n",
"\n",
" # This is set after the call to updateConfigFromSubConfig and is\n",
" # computed from the aggregationInfo and predictAheadTime.\n",
" 'steps': '1,5',\n",
"\n",
" 'implementation': 'cpp',\n",
" },\n",
"\n",
" 'trainSPNetOnlyIfRequested': False,\n",
" },\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Dataset Helpers"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/Users/mleborgne/_git/nupic/src/nupic/datafiles/extra/hotgym/hotgym.csv\n",
"\n",
"gym,address,timestamp,consumption\n",
"string,string,datetime,float\n",
"S,,T,\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:00:00.0,5.3\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:15:00.0,5.5\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:30:00.0,5.1\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:45:00.0,5.3\n",
"Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 01:00:00.0,5.2\n"
]
}
],
"source": [
"from pkg_resources import resource_filename\n",
"\n",
"datasetPath = resource_filename(\"nupic.datafiles\", \"extra/hotgym/hotgym.csv\")\n",
"print datasetPath\n",
"\n",
"with open(datasetPath) as inputFile:\n",
" print\n",
" for _ in xrange(8):\n",
" print inputFile.next().strip()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Loading Data\n",
"\n",
"`FileRecordStream` - file reader for the NuPIC file format (CSV with three header rows, understands datetimes)"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 0), 5.3]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 15), 5.5]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 30), 5.1]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 45), 5.3]\n",
"['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 1, 0), 5.2]\n"
]
}
],
"source": [
"from nupic.data.file_record_stream import FileRecordStream\n",
"\n",
"def getData():\n",
" return FileRecordStream(datasetPath)\n",
"\n",
"data = getData()\n",
"for _ in xrange(5):\n",
" print data.next()"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.opf.model_factory import ModelFactory\n",
"model = ModelFactory.create(MODEL_PARAMS)\n",
"model.enableInference({'predictedField': 'consumption'})"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"input: 5.3\n",
"prediction: 5.3\n",
"input: 5.5\n",
"prediction: 5.5\n",
"input: 5.1\n",
"prediction: 5.36\n",
"input: 5.3\n",
"prediction: 5.1\n",
"input: 5.2\n",
"prediction: 5.342\n",
"input: 5.5\n",
"prediction: 5.2994\n",
"input: 4.5\n",
"prediction: 5.35958\n",
"input: 1.2\n",
"prediction: 4.92\n",
"input: 1.1\n",
"prediction: 1.2\n",
"input: 1.2\n",
"prediction: 1.17\n",
"input: 1.2\n",
"prediction: 1.179\n",
"input: 1.2\n",
"prediction: 1.1853\n",
"input: 1.2\n",
"prediction: 1.18971\n",
"input: 1.2\n",
"prediction: 1.192797\n",
"input: 1.1\n",
"prediction: 1.1949579\n",
"input: 1.2\n",
"prediction: 1.16647053\n",
"input: 1.1\n",
"prediction: 1.176529371\n",
"input: 1.2\n",
"prediction: 1.1535705597\n",
"input: 1.2\n",
"prediction: 1.16749939179\n",
"input: 1.1\n",
"prediction: 1.17724957425\n",
"input: 1.2\n",
"prediction: 1.15407470198\n",
"input: 6.0\n",
"prediction: 1.16785229138\n",
"input: 7.9\n",
"prediction: 5.551706\n",
"input: 8.4\n",
"prediction: 6.2561942\n",
"input: 10.6\n",
"prediction: 6.89933594\n",
"input: 12.4\n",
"prediction: 10.6\n",
"input: 12.1\n",
"prediction: 12.4\n",
"input: 12.4\n",
"prediction: 12.31\n",
"input: 11.4\n",
"prediction: 12.337\n",
"input: 11.2\n",
"prediction: 10.84\n",
"input: 10.8\n",
"prediction: 10.948\n",
"input: 12.0\n",
"prediction: 10.9036\n",
"input: 11.8\n",
"prediction: 11.23252\n",
"input: 11.9\n",
"prediction: 11.402764\n",
"input: 11.4\n",
"prediction: 11.5519348\n",
"input: 11.0\n",
"prediction: 11.50635436\n",
"input: 9.8\n",
"prediction: 11.354448052\n",
"input: 9.8\n",
"prediction: 10.8881136364\n",
"input: 10.8\n",
"prediction: 10.5616795455\n",
"input: 11.1\n",
"prediction: 10.6331756818\n",
"input: 11.1\n",
"prediction: 10.7732229773\n",
"input: 11.0\n",
"prediction: 10.8712560841\n",
"input: 10.7\n",
"prediction: 10.9098792589\n",
"input: 10.6\n",
"prediction: 10.8469154812\n",
"input: 10.3\n",
"prediction: 10.7728408368\n",
"input: 10.1\n",
"prediction: 10.6309885858\n",
"input: 12.9\n",
"prediction: 10.4716920101\n",
"input: 10.5\n",
"prediction: 10.4716920101\n",
"input: 9.7\n",
"prediction: 10.480184407\n",
"input: 9.7\n",
"prediction: 10.2461290849\n",
"input: 9.2\n",
"prediction: 10.0822903594\n",
"input: 9.2\n",
"prediction: 9.81760325161\n",
"input: 9.2\n",
"prediction: 9.63232227613\n",
"input: 9.3\n",
"prediction: 9.50262559329\n",
"input: 9.1\n",
"prediction: 9.4418379153\n",
"input: 9.0\n",
"prediction: 9.33928654071\n",
"input: 8.9\n",
"prediction: 9.2375005785\n",
"input: 9.0\n",
"prediction: 9.13625040495\n",
"input: 8.9\n",
"prediction: 9.09537528346\n",
"input: 8.9\n",
"prediction: 9.03676269843\n",
"input: 9.0\n",
"prediction: 8.9957338889\n",
"input: 9.2\n",
"prediction: 8.99701372223\n",
"input: 10.0\n",
"prediction: 9.05790960556\n",
"input: 10.7\n",
"prediction: 9.34053672389\n",
"input: 8.9\n",
"prediction: 9.74837570672\n",
"input: 9.0\n",
"prediction: 9.49386299471\n",
"input: 9.0\n",
"prediction: 9.34570409629\n",
"input: 9.3\n",
"prediction: 9.24199286741\n",
"input: 9.3\n",
"prediction: 9.25939500718\n",
"input: 9.1\n",
"prediction: 9.27157650503\n",
"input: 9.1\n",
"prediction: 9.22010355352\n",
"input: 9.1\n",
"prediction: 9.18407248746\n",
"input: 9.2\n",
"prediction: 9.15885074122\n",
"input: 9.4\n",
"prediction: 9.17119551886\n",
"input: 9.3\n",
"prediction: 9.2398368632\n",
"input: 9.3\n",
"prediction: 9.25788580424\n",
"input: 9.1\n",
"prediction: 9.27052006297\n",
"input: 9.1\n",
"prediction: 9.21936404408\n",
"input: 11.0\n",
"prediction: 9.18355483085\n",
"input: 9.0\n",
"prediction: 9.7284883816\n",
"input: 8.6\n",
"prediction: 9.50994186712\n",
"input: 3.0\n",
"prediction: 9.50994186712\n",
"input: 1.3\n",
"prediction: 4.344\n",
"input: 1.2\n",
"prediction: 1.20749660397\n",
"input: 1.3\n",
"prediction: 1.20524762278\n",
"input: 1.3\n",
"prediction: 1.23367333594\n",
"input: 1.3\n",
"prediction: 1.25357133516\n",
"input: 1.2\n",
"prediction: 1.26749993461\n",
"input: 1.3\n",
"prediction: 1.24724995423\n",
"input: 1.2\n",
"prediction: 1.26307496796\n",
"input: 1.3\n",
"prediction: 1.24415247757\n",
"input: 1.2\n",
"prediction: 1.2609067343\n",
"input: 1.3\n",
"prediction: 1.24263471401\n",
"input: 1.2\n",
"prediction: 1.25984429981\n",
"input: 1.1\n",
"prediction: 1.24189100987\n",
"input: 2.3\n",
"prediction: 1.19932370691\n",
"input: 5.5\n",
"prediction: 3.7308\n",
"input: 5.5\n",
"prediction: 6.8366746106\n",
"input: 5.8\n",
"prediction: 6.43567222742\n",
"input: 5.7\n",
"prediction: 6.24497055919\n"
]
}
],
"source": [
"data = getData()\n",
"for _ in xrange(100):\n",
" record = dict(zip(data.getFieldNames(), data.next()))\n",
" print \"input: \", record[\"consumption\"]\n",
" result = model.run(record)\n",
" print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"5-step prediction: 1.19932370691\n"
]
}
],
"source": [
"print \"5-step prediction: \", result.inferences[\"multiStepBestPredictions\"][5]"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"# Anomaly Score"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Model Params!\n",
"MODEL_PARAMS = {\n",
" # Type of model that the rest of these parameters apply to.\n",
" 'model': \"HTMPrediction\",\n",
"\n",
" # Version that specifies the format of the config.\n",
" 'version': 1,\n",
"\n",
" # Intermediate variables used to compute fields in modelParams and also\n",
" # referenced from the control section.\n",
" 'aggregationInfo': { 'days': 0,\n",
" 'fields': [('consumption', 'sum')],\n",
" 'hours': 1,\n",
" 'microseconds': 0,\n",
" 'milliseconds': 0,\n",
" 'minutes': 0,\n",
" 'months': 0,\n",
" 'seconds': 0,\n",
" 'weeks': 0,\n",
" 'years': 0},\n",
"\n",
" 'predictAheadTime': None,\n",
"\n",
" # Model parameter dictionary.\n",
" 'modelParams': {\n",
" # The type of inference that this model will perform\n",
" 'inferenceType': 'TemporalAnomaly',\n",
"\n",
" 'sensorParams': {\n",
" # Sensor diagnostic output verbosity control;\n",
" # if > 0: sensor region will print out on screen what it's sensing\n",
" # at each step 0: silent; >=1: some info; >=2: more info;\n",
" # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n",
" 'verbosity' : 0,\n",
"\n",
" # Include the encoders we use\n",
" 'encoders': {\n",
" u'timestamp_timeOfDay': {\n",
" 'fieldname': u'timestamp',\n",
" 'name': u'timestamp_timeOfDay',\n",
" 'timeOfDay': (21, 0.5),\n",
" 'type': 'DateEncoder'},\n",
" u'timestamp_dayOfWeek': None,\n",
" u'timestamp_weekend': None,\n",
" u'consumption': {\n",
" 'clipInput': True,\n",
" 'fieldname': u'consumption',\n",
" 'maxval': 100.0,\n",
" 'minval': 0.0,\n",
" 'n': 50,\n",
" 'name': u'c1',\n",
" 'type': 'ScalarEncoder',\n",
" 'w': 21},},\n",
"\n",
" # A dictionary specifying the period for automatically-generated\n",
" # resets from a RecordSensor;\n",
" #\n",
" # None = disable automatically-generated resets (also disabled if\n",
" # all of the specified values evaluate to 0).\n",
" # Valid keys is the desired combination of the following:\n",
" # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n",
" #\n",
" # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n",
" #\n",
" # (value generated from SENSOR_AUTO_RESET)\n",
" 'sensorAutoReset' : None,\n",
" },\n",
"\n",
" 'spEnable': True,\n",
"\n",
" 'spParams': {\n",
" # SP diagnostic output verbosity control;\n",
" # 0: silent; >=1: some info; >=2: more info;\n",
" 'spVerbosity' : 0,\n",
"\n",
" # Spatial Pooler implementation selector, see getSPClass\n",
" # in py/regions/SPRegion.py for details\n",
" # 'py' (default), 'cpp' (speed optimized, new)\n",
" 'spatialImp' : 'cpp',\n",
"\n",
" 'globalInhibition': 1,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" 'inputWidth': 0,\n",
"\n",
" # SP inhibition control (absolute value);\n",
" # Maximum number of active columns in the SP region's output (when\n",
" # there are more, the weaker ones are suppressed)\n",
" 'numActiveColumnsPerInhArea': 40,\n",
"\n",
" 'seed': 1956,\n",
"\n",
" # potentialPct\n",
" # What percent of the columns's receptive field is available\n",
" # for potential synapses. At initialization time, we will\n",
" # choose potentialPct * (2*potentialRadius+1)^2\n",
" 'potentialPct': 0.5,\n",
"\n",
" # The default connected threshold. Any synapse whose\n",
" # permanence value is above the connected threshold is\n",
" # a \"connected synapse\", meaning it can contribute to the\n",
" # cell's firing. Typical value is 0.10. Cells whose activity\n",
" # level before inhibition falls below minDutyCycleBeforeInh\n",
" # will have their own internal synPermConnectedCell\n",
" # threshold set below this default value.\n",
" # (This concept applies to both SP and TM and so 'cells'\n",
" # is correct here as opposed to 'columns')\n",
" 'synPermConnected': 0.1,\n",
"\n",
" 'synPermActiveInc': 0.1,\n",
"\n",
" 'synPermInactiveDec': 0.005,\n",
" },\n",
"\n",
" # Controls whether TM is enabled or disabled;\n",
" # TM is necessary for making temporal predictions, such as predicting\n",
" # the next inputs. Without TP, the model is only capable of\n",
" # reconstructing missing sensor inputs (via SP).\n",
" 'tmEnable' : True,\n",
"\n",
" 'tmParams': {\n",
" # TM diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
" # SP and TM)\n",
" # (see also tpNCellsPerCol)\n",
" 'columnCount': 2048,\n",
"\n",
" # The number of cells (i.e., states), allocated per column.\n",
" 'cellsPerColumn': 32,\n",
"\n",
" 'inputWidth': 2048,\n",
"\n",
" 'seed': 1960,\n",
"\n",
" # Temporal Pooler implementation selector (see _getTPClass in\n",
" # CLARegion.py).\n",
" 'temporalImp': 'cpp',\n",
"\n",
" # New Synapse formation count\n",
" # NOTE: If None, use spNumActivePerInhArea\n",
" #\n",
" # TODO: need better explanation\n",
" 'newSynapseCount': 20,\n",
"\n",
" # Maximum number of synapses per segment\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSynapsesPerSegment': 32,\n",
"\n",
" # Maximum number of segments per cell\n",
" # > 0 for fixed-size CLA\n",
" # -1 for non-fixed-size CLA\n",
" #\n",
" # TODO: for Ron: once the appropriate value is placed in TP\n",
" # constructor, see if we should eliminate this parameter from\n",
" # description.py.\n",
" 'maxSegmentsPerCell': 128,\n",
"\n",
" # Initial Permanence\n",
" # TODO: need better explanation\n",
" 'initialPerm': 0.21,\n",
"\n",
" # Permanence Increment\n",
" 'permanenceInc': 0.1,\n",
"\n",
" # Permanence Decrement\n",
" # If set to None, will automatically default to tpPermanenceInc\n",
" # value.\n",
" 'permanenceDec' : 0.1,\n",
"\n",
" 'globalDecay': 0.0,\n",
"\n",
" 'maxAge': 0,\n",
"\n",
" # Minimum number of active synapses for a segment to be considered\n",
" # during search for the best-matching segments.\n",
" # None=use default\n",
" # Replaces: tpMinThreshold\n",
" 'minThreshold': 9,\n",
"\n",
" # Segment activation threshold.\n",
" # A segment is active if it has >= tpSegmentActivationThreshold\n",
" # connected synapses that are active due to infActiveState\n",
" # None=use default\n",
" # Replaces: tpActivationThreshold\n",
" 'activationThreshold': 12,\n",
"\n",
" 'outputType': 'normal',\n",
"\n",
" # \"Pay Attention Mode\" length. This tells the TM how many new\n",
" # elements to append to the end of a learned sequence at a time.\n",
" # Smaller values are better for datasets with short sequences,\n",
" # higher values are better for datasets with long sequences.\n",
" 'pamLength': 1,\n",
" },\n",
"\n",
" 'clParams': {\n",
" 'regionName' : 'SDRClassifierRegion',\n",
"\n",
" # Classifier diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" 'verbosity' : 0,\n",
"\n",
" # This controls how fast the classifier learns/forgets. Higher values\n",
" # make it adapt faster and forget older patterns faster.\n",
" 'alpha': 0.005,\n",
"\n",
" # This is set after the call to updateConfigFromSubConfig and is\n",
" # computed from the aggregationInfo and predictAheadTime.\n",
" 'steps': '1',\n",
"\n",
" 'implementation': 'cpp',\n",
" },\n",
"\n",
" 'anomalyParams': {\n",
" u'anomalyCacheRecords': None,\n",
" u'autoDetectThreshold': None,\n",
" u'autoDetectWaitRecords': 2184\n",
" },\n",
"\n",
" 'trainSPNetOnlyIfRequested': False,\n",
" },\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.opf.model_factory import ModelFactory\n",
"model = ModelFactory.create(MODEL_PARAMS)\n",
"model.enableInference({'predictedField': 'consumption'})"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"input: 5.3\n",
"prediction: 5.3\n",
"input: 5.5\n",
"prediction: 5.5\n",
"input: 5.1\n",
"prediction: 5.36\n",
"input: 5.3\n",
"prediction: 5.1\n",
"input: 5.2\n",
"prediction: 5.342\n"
]
}
],
"source": [
"data = getData()\n",
"for _ in xrange(5):\n",
" record = dict(zip(data.getFieldNames(), data.next()))\n",
" print \"input: \", record[\"consumption\"]\n",
" result = model.run(record)\n",
" print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ModelResult(\tpredictionNumber=4\n",
"\trawInput={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n",
"\tsensorInput=SensorInput(\tdataRow=(5.2, 1.0)\n",
"\tdataDict={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n",
"\tdataEncodings=[array([ 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32), array([ 0., 0., 0., ..., 0., 0., 0.], dtype=float32)]\n",
"\tsequenceReset=0.0\n",
"\tcategory=-1\n",
")\n",
"\tinferences={'multiStepPredictions': {1: {5.1: 0.0088801263517415546, 5.2: 0.010775254623541418, 5.341999999999999: 0.98034461902471692}}, 'multiStepBucketLikelihoods': {1: {1: 0.0088801263517415546, 2: 0.98034461902471692}}, 'multiStepBestPredictions': {1: 5.341999999999999}, 'anomalyLabel': '[]', 'anomalyScore': 0.40000001}\n",
"\tmetrics=None\n",
"\tpredictedFieldIdx=0\n",
"\tpredictedFieldName=consumption\n",
"\tclassifierInput=ClassifierInput(\tdataRow=5.2\n",
"\tbucketIndex=2\n",
")\n",
")\n"
]
}
],
"source": [
"print result"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"anomaly score: 0.4\n"
]
}
],
"source": [
"print \"anomaly score: \", result.inferences[\"anomalyScore\"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"__See Subutai's talk for more info on anomaly detection!__\n",
"\n",
"# Built-in OPF Clients\n",
"\n",
"`python examples/opf/bin/OpfRunExperiment.py examples/opf/experiments/multistep/hotgym/`\n",
"\n",
"Outputs `examples/opf/experiments/multistep/hotgym/inference/DefaultTask.TemporalMultiStep.predictionLog.csv`\n",
"\n",
"`python bin/run_swarm.py examples/opf/experiments/multistep/hotgym/permutations.py`\n",
"\n",
"Outputs `examples/opf/experiments/multistep/hotgym/model_0/description.py`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2.0
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
| 62,683 | Python | .py | 2,051 | 25.465627 | 339 | 0.516526 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,732 | tm_overlapping_sequences.py | numenta_nupic-legacy/examples/tm/tm_overlapping_sequences.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import pprint
import random
import sys
import unittest2 as unittest
from optparse import OptionParser
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
"""
Overlapping sequences test
===========================
Test learning of sequences with shared (overlapping) subsequences.
Test 1 - Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 2 - Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 3 - Test with slow learning, some overlap in the patterns, and TM
thresholds of 80% of newSynapseCount
Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
"""
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 35 # the random seed used throughout
# Whether to only run the short tests.
SHORT = True
# If set to 0 the CPP TM will not be tested
INCLUDE_CPP_TM = 1 # Also test with CPP TM
def printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for i,trainingSequence in enumerate(trainingSequences):
print "============= Sequence",i,"================="
for j,pattern in enumerate(trainingSequence):
printOneTrainingVector(pattern)
def getSimplePatterns(numOnes, numPatterns, patternOverlap=0):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. The amount of overlap between consecutive patterns is
configurable, via the patternOverlap parameter.
Parameters:
-----------------------------------------------------------------------
numOnes: Number of bits ON in each pattern
numPatterns: Number of unique patterns to generate
patternOverlap: Number of bits of overlap between each successive pattern
retval: patterns
"""
assert (patternOverlap < numOnes)
# How many new bits are introduced in each successive pattern?
numNewBitsInEachPattern = numOnes - patternOverlap
numCols = numNewBitsInEachPattern * numPatterns + patternOverlap
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
startBit = i*numNewBitsInEachPattern
nextStartBit = startBit + numOnes
x[startBit:nextStartBit] = 1
p.append(x)
return p
def buildOverlappedSequences( numSequences = 2,
seqLen = 5,
sharedElements = [3,4],
numOnBitsPerPattern = 3,
patternOverlap = 0,
seqOverlap = 0,
**kwargs
):
""" Create training sequences that share some elements in the middle.
Parameters:
-----------------------------------------------------
numSequences: Number of unique training sequences to generate
seqLen: Overall length of each sequence
sharedElements: Which element indices of each sequence are shared. These
will be in the range between 0 and seqLen-1
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Total number of patterns used to build the sequences
numSharedElements = len(sharedElements)
numUniqueElements = seqLen - numSharedElements
numPatterns = numSharedElements + numUniqueElements * numSequences
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
uniquePatternIndices = range(numSharedElements, numPatterns)
for _ in xrange(numSequences):
sequence = []
# pattern indices [0 ... numSharedElements-1] are reserved for the shared
# middle
sharedPatternIndices = range(numSharedElements)
# Build up the sequence
for j in xrange(seqLen):
if j in sharedElements:
patIdx = sharedPatternIndices.pop(0)
else:
patIdx = uniquePatternIndices.pop(0)
sequence.append(patterns[patIdx])
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def buildSequencePool(numSequences = 10,
seqLen = [2,3,4],
numPatterns = 5,
numOnBitsPerPattern = 3,
patternOverlap = 0,
**kwargs
):
""" Create a bunch of sequences of various lengths, all built from
a fixed set of patterns.
Parameters:
-----------------------------------------------------
numSequences: Number of training sequences to generate
seqLen: List of possible sequence lengths
numPatterns: How many possible patterns there are to use within
sequences
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
for _ in xrange(numSequences):
# Build it up from patterns
sequence = []
length = random.choice(seqLen)
for _ in xrange(length):
patIdx = random.choice(xrange(numPatterns))
sequence.append(patterns[patIdx])
# Put it in
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def createTMs(includeCPP = True,
includePy = True,
numCols = 100,
cellsPerCol = 4,
activationThreshold = 3,
minThreshold = 3,
newSynapseCount = 3,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = True,
maxInfBacktrack = 0,
maxLrnBacktrack = 0,
**kwargs
):
"""Create one or more TM instances, placing each into a dict keyed by
name.
Parameters:
------------------------------------------------------------------
retval: tms - dict of TM instances
"""
# Keep these fixed:
connectedPerm = 0.5
tms = dict()
if includeCPP:
if VERBOSITY >= 2:
print "Creating BacktrackingTMCPP instance"
cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency = checkSynapseConsistency,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
# Ensure we are copying over learning states for TMDiff
cpp_tm.retrieveLearningStates = True
tms['CPP'] = cpp_tm
if includePy:
if VERBOSITY >= 2:
print "Creating PY TM instance"
py_tm = BacktrackingTM(numberOfCols = numCols,
cellsPerColumn = cellsPerCol,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
tms['PY '] = py_tm
return tms
def assertNoTMDiffs(tms):
"""
Check for diffs among the TM instances in the passed in tms dict and
raise an assert if any are detected
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
"""
if len(tms) == 1:
return
if len(tms) > 2:
raise "Not implemented for more than 2 TMs"
same = fdrutils.tmDiff2(tms.values(), verbosity=VERBOSITY)
assert(same)
return
def evalSequences(tms,
trainingSequences,
testSequences = None,
nTrainRepetitions = 1,
doResets = True,
**kwargs):
"""Train the TMs on the entire training set for nTrainRepetitions in a row.
Then run the test set through inference once and return the inference stats.
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
trainingSequences: list of training sequences. Each sequence is a list
of TM input patterns
testSequences: list of test sequences. If None, we will test against
the trainingSequences
nTrainRepetitions: Number of times to run the training set through the TM
doResets: If true, send a reset to the TM between each sequence
"""
# If no test sequence is specified, use the first training sequence
if testSequences == None:
testSequences = trainingSequences
# First TM instance is used by default for verbose printing of input values,
# etc.
firstTM = tms.values()[0]
assertNoTMDiffs(tms)
# =====================================================================
# Loop through the training set nTrainRepetitions times
# ==========================================================================
for trainingNum in xrange(nTrainRepetitions):
if VERBOSITY >= 2:
print "\n##############################################################"
print "################# Training round #%d of %d #################" \
% (trainingNum, nTrainRepetitions)
for (name,tm) in tms.iteritems():
print "TM parameters for %s: " % (name)
print "---------------------"
tm.printParameters()
print
# ======================================================================
# Loop through the sequences in the training set
numSequences = len(testSequences)
for sequenceNum, trainingSequence in enumerate(trainingSequences):
numTimeSteps = len(trainingSequence)
if VERBOSITY >= 2:
print "\n================= Sequence #%d of %d ================" \
% (sequenceNum, numSequences)
if doResets:
for tm in tms.itervalues():
tm.reset()
# --------------------------------------------------------------------
# Train each element of the sequence
for t, x in enumerate(trainingSequence):
# Print Verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTM.printInput(x)
print "input nzs:", x.nonzero()
# Train in this element
x = numpy.array(x).astype('float32')
for tm in tms.itervalues():
tm.learn(x, enableInference=True)
# Print the input and output states
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5))
print
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
numBurstingCols = tm.infActiveState['t'].min(axis=1).sum()
print "# of bursting columns for %s TM: %d of %d" \
% (name, numBurstingCols, x.sum())
# Print the trained cells
if VERBOSITY >= 4:
print "Sequence %d finished." % (sequenceNum)
for (name,tm) in tms.iteritems():
print "All cells of %s TM:" % (name)
print "-------------------------------------",
tm.printCells()
print
# --------------------------------------------------------------------
# Done training all sequences in this round, print the total number of
# missing, extra columns and make sure it's the same among the TMs
if VERBOSITY >= 2:
print
prevResult = None
for (name,tm) in tms.iteritems():
stats = tm.getStats()
if VERBOSITY >= 1:
print "Stats for %s TM over all sequences for training round #%d of %d:" \
% (name, trainingNum, nTrainRepetitions)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
if prevResult is None:
prevResult = (stats['totalMissing'], stats['totalExtra'])
else:
assert (stats['totalMissing'] == prevResult[0])
assert (stats['totalExtra'] == prevResult[1])
tm.resetStats()
# =====================================================================
# Finish up learning
if VERBOSITY >= 3:
print "Calling trim segments"
prevResult = None
for tm in tms.itervalues():
nSegsRemoved, nSynsRemoved = tm.trimSegments()
if prevResult is None:
prevResult = (nSegsRemoved, nSynsRemoved)
else:
assert (nSegsRemoved == prevResult[0])
assert (nSynsRemoved == prevResult[1])
assertNoTMDiffs(tms)
if VERBOSITY >= 4:
print "Training completed. Complete state:"
for (name,tm) in tms.iteritems():
print "%s:" % (name)
tm.printCells()
print
# ==========================================================================
# Infer
# ==========================================================================
if VERBOSITY >= 2:
print "\n##############################################################"
print "########################## Inference #########################"
# Reset stats in all TMs
for tm in tms.itervalues():
tm.resetStats()
# -------------------------------------------------------------------
# Loop through the test sequences
numSequences = len(testSequences)
for sequenceNum, testSequence in enumerate(testSequences):
numTimeSteps = len(testSequence)
# Identify this sequence
if VERBOSITY >= 2:
print "\n================= Sequence %d of %d ================" \
% (sequenceNum, numSequences)
# Send in the rest
if doResets:
for tm in tms.itervalues():
tm.reset()
# -------------------------------------------------------------------
# Loop through the elements of this sequence
for t,x in enumerate(testSequence):
# Print verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTM.printInput(x)
print "input nzs:", x.nonzero()
# Infer on this element
for tm in tms.itervalues():
tm.infer(x)
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
# Debug print of internal state
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5),
printLearnState = False)
print
# Done with this sequence
# Debug print of all stats of the TMs
if VERBOSITY >= 4:
print
for (name,tm) in tms.iteritems():
print "Interim internal stats for %s TM:" % (name)
print "---------------------------------"
pprint.pprint(tm.getStats())
print
if VERBOSITY >= 2:
print "\n##############################################################"
print "####################### Inference Done #######################"
# Get the overall stats for each TM and return them
tmStats = dict()
for (name,tm) in tms.iteritems():
tmStats[name] = stats = tm.getStats()
if VERBOSITY >= 2:
print "Stats for %s TM over all sequences:" % (name)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
for (name,tm) in tms.iteritems():
if VERBOSITY >= 3:
print "\nAll internal stats for %s TM:" % (name)
print "-------------------------------------",
pprint.pprint(tmStats[name])
print
return tmStats
def testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods):
"""
Build up a set of sequences, create the TM(s), train them, test them,
and check that we got the expected number of missing predictions during
inference.
Parameters:
-----------------------------------------------------------------------
baseParams: dict of all of the parameters for building sequences,
creating the TMs, and training and testing them. This
gets updated from 'mods' before we use it.
expMissingMin: Minimum number of expected missing predictions during testing.
expMissingMax: Maximum number of expected missing predictions during testing.
mods: dict of modifications to make to the baseParams.
"""
# Update the base with the modifications
params = dict(baseParams)
params.update(mods)
# --------------------------------------------------------------------
# Create the sequences
func = params['seqFunction']
(numCols, trainingSequences) = func(**params)
# --------------------------------------------------------------------
# Create the TMs
if params['numCols'] is None:
params['numCols'] = numCols
tms = createTMs(**params)
# --------------------------------------------------------------------
# Train and get test results
tmStats = evalSequences(tms= tms,
trainingSequences=trainingSequences,
testSequences=None,
**params)
# -----------------------------------------------------------------------
# Make sure there are the expected number of missing predictions
for (name, stats) in tmStats.iteritems():
print "Detected %d missing predictions overall during inference" \
% (stats['totalMissing'])
if expMissingMin is not None and stats['totalMissing'] < expMissingMin:
print "FAILURE: Expected at least %d total missing but got %d" \
% (expMissingMin, stats['totalMissing'])
assert False
if expMissingMax is not None and stats['totalMissing'] > expMissingMax:
print "FAILURE: Expected at most %d total missing but got %d" \
% (expMissingMax, stats['totalMissing'])
assert False
return True
class TMOverlappingSeqsTest(testcasebase.TestCaseBase):
def testFastLearning(self):
"""
Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 3 repetitions, still missing predictions
print "\nRunning without PAM, 3 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=20,
expMissingMax=None, pamLength=1,
nTrainRepetitions=3))
# With PAM, with only 3 repetitions, 0 missing predictions
print "\nRunning with PAM, 3 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=3))
def testSlowLearning(self):
"""
Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, requires 40 repetitions
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testSlowLearningWithOverlap(self):
"""
Test with slow learning, some overlap in the patterns, and TM thresholds
of 80% of newSynapseCount
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 5
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 2,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testForbesLikeData(self):
"""
Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
We will test with both fast and slow learning.
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildSequencePool,
numSequences = 20,
seqLen = [3,10],
numPatterns = 10,
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 1,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.51,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = False,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# Fast mode, no PAM
# Fast mode, with PAM
print "\nRunning without PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=50,
expMissingMax=None, pamLength=1,
nTrainRepetitions=2))
# Fast mode, with PAM
print "\nRunning with PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=2))
# Slow mode, no PAM
print "\nRunning without PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=1,
expMissingMax=None, initialPerm=0.31,
pamLength=1, nTrainRepetitions=8))
# Fast mode, with PAM
print "\nRunning with PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, initialPerm=0.31, pamLength=5,
nTrainRepetitions=8))
if __name__=="__main__":
# Process command line arguments
parser = OptionParser()
parser.add_option(
"--verbosity", default=VERBOSITY, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--seed", default=SEED, type="int",
help="Random seed to use [default: %default].")
parser.add_option("--short", action="store_true", default=True,
help="Run short version of the tests [default: %default].")
parser.add_option("--long", action="store_true", default=False,
help="Run long version of the tests [default: %default].")
(options, args) = parser.parse_args()
SEED = options.seed
VERBOSITY = options.verbosity
SHORT = not options.long
# Seed the random number generators
rgen = numpy.random.RandomState(SEED)
random.seed(SEED)
if not INCLUDE_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Form the command line for the unit test framework.
args = [sys.argv[0]] + args
unittest.main(argv=args, verbosity=VERBOSITY)
| 32,320 | Python | .py | 728 | 35.427198 | 94 | 0.563704 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,733 | tm_high_order.py | numenta_nupic-legacy/examples/tm/tm_high_order.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Temporal Memory.
The following program has the purpose of presenting some
basic properties of the Temporal Memory, in particular when it comes
to how it handles high-order sequences.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.bindings.algorithms import TemporalMemory as TM
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
accuracy = 0
if np.count_nonzero(predicted) > 0:
accuracy = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return accuracy
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2
def showPredictions():
"""
Shows predictions of the TM when presented with the characters A, B, C, D, X, and
Y without any contextual information, that is, not embedded within a sequence.
"""
for k in range(6):
tm.reset()
print "--- " + "ABCDXY"[k] + " ---"
tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
print("Active cols: " + str(np.nonzero(currentColumns)[0]))
print("Predicted cols: " + str(np.nonzero(predictedColumns)[0]))
print ""
def trainTM(sequence, timeSteps, noiseLevel):
"""
Trains the TM with given sequence for a given number of time steps and level of input
corruption
@param sequence (array) array whose rows are the input characters
@param timeSteps (int) number of time steps in which the TM will be presented with sequence
@param noiseLevel (float) amount of noise to be applied on the characters in the sequence
"""
currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
ts = 0
for t in range(timeSteps):
tm.reset()
for k in range(4):
v = corruptVector(sequence[k][:], noiseLevel, sparseCols)
tm.compute(set(v[:].nonzero()[0].tolist()), learn=True)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
acc = accuracy(currentColumns, predictedColumns)
x.append(ts)
y.append(acc)
ts += 1
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
uintType = "uint32"
random.seed(1)
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
sparsity = 0.02
sparseCols = int(tm.numberOfColumns() * sparsity)
# We will create a sparse representation of characters A, B, C, D, X, and Y.
# In this particular example we manually construct them, but usually you would
# use the spatial pooler to build these.
seq1 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq1[0, 0:sparseCols] = 1 # Input SDR representing "A"
seq1[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq1[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq1[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seq2 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq2[0, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seq2[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq2[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq2[3, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
seqT = np.zeros((6, tm.numberOfColumns()), dtype="uint32")
seqT[0, 0:sparseCols] = 1 # Input SDR representing "A"
seqT[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seqT[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seqT[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seqT[4, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seqT[5, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
# PART 1. Feed the TM with sequence "ABCD". The TM will eventually learn
# the pattern and it's prediction accuracy will go to 1.0 (except in-between sequences
# where the TM doesn't output any prediction)
print ""
print "-"*50
print "Part 1. We present the sequence ABCD to the TM. The TM will eventually"
print "will learn the sequence and predict the upcoming characters. This can be"
print "measured by the prediction accuracy in Fig 1."
print "N.B. In-between sequences the accuracy is 0.0 as the TM does not output"
print "any prediction."
print "-"*50
print ""
x = []
y = []
trainTM(seq1, timeSteps=10, noiseLevel=0.0)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 1: TM learns sequence ABCD")
plt.savefig("figure_1")
plt.close()
print ""
print "-"*50
print "Once the TM has learned the sequence ABCD, we will present the individual"
print "characters to the TM to know its prediction. The TM outputs the columns"
print "that become active upon the presentation of a particular character as well"
print "as the columns predicted in the next time step. Here, you should see that"
print "A predicts B, B predicts C, C predicts D, and D does not output any"
print "prediction."
print "N.B. Here, we are presenting individual characters, that is, a character"
print "deprived of context in a sequence. There is no prediction for characters"
print "X and Y as we have not presented them to the TM in any sequence."
print "-"*50
print ""
showPredictions()
print ""
print "-"*50
print "Part 2. We now present the sequence XBCY to the TM. As expected, the accuracy will"
print "drop until the TM learns the new sequence (Fig 2). What will be the prediction of"
print "the TM if presented with the sequence BC? This would depend on what character"
print "anteceding B. This is an important feature of high-order sequences."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
# In this figure you can see how the TM starts making good predictions for particular
# characters (spikes in the plot). Then, it will get half of its predictions right, which
# correspond to the times in which is presented with character C. After some time, it
# will learn correctly the sequence XBCY, and predict its characters accordingly.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 2: TM learns new sequence XBCY")
plt.savefig("figure_2")
plt.close()
print ""
print "-"*50
print "We will present again each of the characters individually to the TM, that is,"
print "not within any of the two sequences. When presented with character A the TM"
print "predicts B, B predicts C, but this time C outputs a simultaneous prediction of"
print "both D and Y. In order to disambiguate, the TM would require to know if the"
print "preceding characters were AB or XB. When presented with character X the TM"
print "predicts B, whereas Y and D yield no prediction."
print "-"*50
print ""
showPredictions()
# PART 3. Now we will present noisy inputs to the TM. We will add noise to the sequence XBCY
# by corrupting 30% of its bits. We would like to see how the TM responds in the presence of
# noise and how it recovers from it.
print ""
print "-"*50
print "Part 3. We will add noise to the sequence XBCY by corrupting 30% of the bits in the vectors"
print "encoding each character. We would expect to see a decrease in prediction accuracy as the"
print "TM is unable to learn the random noise in the input (Fig 3). However, this decrease is not"
print "significant."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.3)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 3: Accuracy in TM with 30% noise in input")
plt.savefig("figure_3")
plt.close()
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input (30%). Here, the noise is low that the TM is not affected by it,"
print "which would be the case if we saw 'noisy' columns being predicted when"
print "presented with individual characters. Thus, we could say that the TM exhibits"
print "resilience to noise in its input."
print "-"*50
print ""
showPredictions()
# Let's corrupt the sequence more by adding 50% of noise to each of its characters.
# Here, we would expect to see some 'noisy' columns being predicted when the TM is
# presented with the individual characters.
print ""
print "-"*50
print "Now, we will set noise to be 50% of the bits in the characters X, B, C, and Y."
print "As expected, the accuracy will decrease (Fig 5) and 'noisy' columns will be"
print "predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.5)
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input. The prediction of some characters (eg. X) now includes columns that"
print "are not related to any other character. This is because the TM tried to learn"
print "the noise in the input patterns."
print "-"*50
print ""
showPredictions()
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 4: Accuracy in TM with 50% noise in input")
plt.savefig("figure_4")
plt.close()
# Will the TM be able to forget the 'noisy' columns learned in the previous step?
# We will present the TM with the original sequence XBCY so it forgets the 'noisy'.
# columns.
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
print ""
print "-"*50
print "After presenting the original sequence XBCY to the TM, we would expect to see"
print "the predicted noisy columns from the previous step disappear. We will verify that"
print "by presenting the individual characters to the TM."
print "-"*50
print ""
showPredictions()
# We can see how the prediction accuracy goes back to 1.0 (as before, not in-between sequences)
# when the TM 'forgets' the noisy columns.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 5: TM forgets noise in sequence XBCY when noise is over")
plt.savefig("figure_5")
plt.close()
# Let's corrupt the sequence even more and add 90% of noise to each of its characters.
# Here, we would expect to see even more of a decrease in accuracy along with more 'noisy'
# columns being predicted.
print ""
print "-"*50
print "We will add more noise to the characters in the sequence XBCY. This time we will"
print "corrupt 90% of its contents. As expected, the accuracy will decrease (Fig 6) and"
print "'noisy' columns will be predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.9)
print ""
print "-"*50
print "Next, we will have a look at the output of the TM when presented with the"
print "individual characters of the sequence. As before, we see 'noisy' predicted"
print "columns emerging as a result of the TM trying to learn the noise."
print "-"*50
print ""
showPredictions()
# In this figure we can observe how the prediction accuracy is affected by the presence
# of noise in the input. However, the accuracy does not drops dramatically even with 90%
# of noise which implies that the TM exhibits some resilience to noise in its input
# which means that it does not forget easily a well-learned, real pattern.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 6: Accuracy with 90% noise in input")
plt.savefig("figure_6")
plt.close()
# Let's present the original sequence to the TM in order to make it forget the noisy columns.
# After this, the TM will predict accurately the sequence again, and its predictions will
# not include 'noisy' columns anymore.
x = []
y = []
trainTM(seq2, timeSteps=25, noiseLevel=0.0)
# We will observe how the prediction accuracy gets back to 1.0 (not in-between sequences)
# as the TM is presented with the original sequence.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 7: When noise is suspended, accuracy is restored")
plt.savefig("figure_7")
plt.close()
# The TM restores its prediction accuracy and it can be seen when presented with the individual characters.
# There's no noisy columns being predicted.
print ""
print "-"*50
print "After presenting noisy input to the TM, we present the original sequence in"
print "order to make it re-learn XBCY. We verify that this was achieved by presenting"
print "the TM with the individual characters and observing its output. Again, we can"
print "see that the 'noisy' columns are not being predicted anymore, and that the"
print "prediction accuracy goes back to 1.0 when the sequence is presented (Fig 7)."
print "-"*50
print ""
showPredictions()
# PART 4. Now, we will present both sequences ABCD and XBCY randomly to the TM.
# For this purpose we will start with a new TM.
# What would be the output of the TM when presented with character D if it has
# been exposed to sequences ABCD and XBCY occurring randomly one after the other?
# If one quarter of the time the TM sees the sequence ABCDABCD, another quarter the
# TM sees ABCDXBCY, another quarter it sees XBCYXBCY, and the last quarter it saw
# XBCYABCD, then the TM would exhibit simultaneous predictions for characters D, Y
# and C.
print ""
print "-"*50
print "Part 4. We will present both sequences ABCD and XBCY randomly to the TM."
print "Here, we might observe simultaneous predictions occurring when the TM is"
print "presented with characters D, Y, and C. For this purpose we will use a"
print "blank TM"
print "NB. Here we will not reset the TM after presenting each sequence with the"
print "purpose of making the TM learn different predictions for D and Y."
print "-"*50
print ""
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
for t in range(75):
rnd = random.randrange(2)
for k in range(4):
if rnd == 0:
tm.compute(set(seq1[k][:].nonzero()[0].tolist()), learn=True)
else:
tm.compute(set(seq2[k][:].nonzero()[0].tolist()), learn=True)
print ""
print "-"*50
print "We now have a look at the output of the TM when presented with the individual"
print "characters A, B, C, D, X, and Y. We might observe simultaneous predictions when"
print "presented with character D (predicting A and X), character Y (predicting A and X),"
print "and when presented with character C (predicting D and Y)."
print "N.B. Due to the stochasticity of this script, we might not observe simultaneous"
print "predictions in *all* the aforementioned characters."
print "-"*50
print ""
showPredictions()
print ""
print "-*"*25
print "Scroll up to see the development of this simple"
print "tutorial. Also open the source file to see more"
print "comments regarding each part of the script."
print "All images generated by this script will be saved"
print "in your current working directory."
print "-*"*25
print ""
| 17,726 | Python | .py | 407 | 41.638821 | 107 | 0.743664 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,734 | tm_segment_learning.py | numenta_nupic-legacy/examples/tm/tm_segment_learning.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Segment Learning Tests
======================
Multi-attribute sequence tests.
SL1) Train the TM repeatedly using a single sequence plus noise. The sequence
can be relatively short, say 5 patterns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern.
Simplified patterns will be used, where each pattern consists of consecutive
bits and no two patterns share columns. The patterns that belong to the sequence
will be in the left half of the input vector. The noise bits will be in the
right half of the input vector.
After several iterations of each sequence, the TM should should achieve perfect
inference on the true sequence. There should be resets between each presentation
of the sequence. Check predictions in the sequence part only (it's ok to predict
random bits in the right half of the column space), and test with clean
sequences.
SL2) As above but train with 3 different inter-leaved sequences.
SL3) Vary percentage of bits that are signal vs noise.
SL4) Noise can be a fixed alphabet instead of being randomly generated.
SL5) Have two independent sequences, one in the left half, and one in the
right half. Both should be learned well.
"""
import numpy
import unittest2 as unittest
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
g_testCPPTM = True
class ExperimentTestBaseClass(testcasebase.TestCaseBase):
""" The base class for all of our tests in this module"""
def __init__(self, testMethodName, *args, **kwargs):
# Construct the base-class instance
super(ExperimentTestBaseClass, self).__init__(testMethodName, *args,
**kwargs)
# Module specific instance variables
self._rgen = numpy.random.RandomState(g_options.seed)
def _printOneTrainingVector(self, x):
"""Print a single vector succinctly."""
print ''.join('1' if k != 0 else '.' for k in x)
def _printAllTrainingSequences(self, trainingSequences):
"""Print all vectors"""
for i, trainingSequence in enumerate(trainingSequences):
print "============= Sequence", i, "================="
for pattern in trainingSequence:
self._printOneTrainingVector(pattern)
def _setVerbosity(self, verbosity, tm, tmPy):
"""Set verbosity level on the TM"""
tm.cells4.setVerbosity(verbosity)
tm.verbosity = verbosity
tmPy.verbosity = verbosity
def _createTMs(self, numCols, fixedResources=False,
checkSynapseConsistency = True):
"""Create an instance of the appropriate temporal memory. We isolate
all parameters as constants specified here."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 8
newSynapseCount = 15
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
if fixedResources:
permanenceDec = 0.1
maxSegmentsPerCell = 5
maxSynapsesPerSegment = 15
globalDecay = 0
maxAge = 0
else:
permanenceDec = 0.05
maxSegmentsPerCell = -1
maxSynapsesPerSegment = -1
globalDecay = 0.0001
maxAge = 1
if g_testCPPTM:
if g_options.verbosity > 1:
print "Creating BacktrackingTMCPP instance"
cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = 4,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, maxAge=maxAge, burnIn = 1,
seed=g_options.seed, verbosity=g_options.verbosity,
checkSynapseConsistency = checkSynapseConsistency,
pamLength = 1000,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
)
# Ensure we are copying over learning states for TMDiff
cppTM.retrieveLearningStates = True
else:
cppTM = None
if g_options.verbosity > 1:
print "Creating PY TM instance"
pyTM = BacktrackingTM(numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, maxAge=maxAge, burnIn = 1,
seed=g_options.seed, verbosity=g_options.verbosity,
pamLength = 1000,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
)
return cppTM, pyTM
def _getSimplePatterns(self, numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions= 10):
"""A simple sequence of 5 patterns. The left half of the vector contains
the pattern elements, each with numOnes consecutive bits. The right half
contains numOnes random bits. The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequence
testSequence: A single clean test sequence containing the 5 patterns
but with no noise on the right half
"""
numPatterns = 5
numCols = 2 * numPatterns * numOnes
halfCols = numPatterns * numOnes
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
# Create noisy training sequence
trainingSequences = []
for _ in xrange(numRepetitions):
sequence = []
for j in xrange(numPatterns):
# Make left half
v = numpy.zeros(numCols)
v[0:halfCols] = p[j]
# Select numOnes noise bits
noiseIndices = (self._rgen.permutation(halfCols)
+ halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
# Create a single clean test sequence
testSequence = []
for j in xrange(numPatterns):
# Make only left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[j]
testSequence.append(v)
if g_options.verbosity > 1:
print "\nTraining sequences"
self._printAllTrainingSequences(trainingSequences)
print "\nTest sequence"
self._printAllTrainingSequences([testSequence])
return (trainingSequences, [testSequence])
def _buildSL2TrainingSet(self, numOnes=10, numRepetitions= 10):
"""Three simple sequences, composed of the same 5 static patterns. The left
half of the vector contains the pattern elements, each with numOnes
consecutive bits. The right half contains numOnes random bits.
Sequence 1 is: p0, p1, p2, p3, p4
Sequence 2 is: p4, p3, p2, p1, p0
Sequence 3 is: p2, p0, p4, p1, p3
The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequences
testSequence: Clean test sequences with no noise on the right half
"""
numPatterns = 5
numCols = 2 * numPatterns * numOnes
halfCols = numPatterns * numOnes
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
# Indices of the patterns in the underlying sequences
numSequences = 3
indices = [
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
[2, 0, 4, 1, 3],
]
# Create the noisy training sequence
trainingSequences = []
for i in xrange(numRepetitions*numSequences):
sequence = []
for j in xrange(numPatterns):
# Make left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[i % numSequences][j]]
# Select numOnes noise bits
noiseIndices = (self._rgen.permutation(halfCols)
+ halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
# Create the clean test sequences
testSequences = []
for i in xrange(numSequences):
sequence = []
for j in xrange(numPatterns):
# Make only left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[i % numSequences][j]]
sequence.append(v)
testSequences.append(sequence)
if g_options.verbosity > 1:
print "\nTraining sequences"
self._printAllTrainingSequences(trainingSequences)
print "\nTest sequences"
self._printAllTrainingSequences(testSequences)
return (trainingSequences, testSequences)
def _testSegmentLearningSequence(self, tms,
trainingSequences,
testSequences,
doResets = True):
"""Train the given TM once on the entire training set. on the Test a single
set of sequences once and check that individual predictions reflect the true
relative frequencies. Return a success code. Success code is 1 for pass, 0
for fail."""
# If no test sequence is specified, use the first training sequence
if testSequences == None:
testSequences = trainingSequences
cppTM, pyTM = tms[0], tms[1]
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
#--------------------------------------------------------------------------
# Learn
if g_options.verbosity > 0:
print "============= Training ================="
print "TM parameters:"
print "CPP"
if cppTM is not None:
print cppTM.printParameters()
print "\nPY"
print pyTM.printParameters()
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if g_options.verbosity > 1:
print "============= New sequence ================="
if doResets:
if cppTM is not None:
cppTM.reset()
pyTM.reset()
for t, x in enumerate(trainingSequence):
if g_options.verbosity > 1:
print "Time step", t, "sequence number", sequenceNum
print "Input: ", pyTM.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
if cppTM is not None:
cppTM.learn(x)
pyTM.learn(x)
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity,
relaxSegmentTests = False) == True
if g_options.verbosity > 2:
if cppTM is not None:
print "CPP"
cppTM.printStates(printPrevious = (g_options.verbosity > 4))
print "\nPY"
pyTM.printStates(printPrevious = (g_options.verbosity > 4))
print
if g_options.verbosity > 4:
print "Sequence finished. Complete state after sequence"
if cppTM is not None:
print "CPP"
cppTM.printCells()
print "\nPY"
pyTM.printCells()
print
if g_options.verbosity > 2:
print "Calling trim segments"
if cppTM is not None:
nSegsRemovedCPP, nSynsRemovedCPP = cppTM.trimSegments()
nSegsRemoved, nSynsRemoved = pyTM.trimSegments()
if cppTM is not None:
assert nSegsRemovedCPP == nSegsRemoved
assert nSynsRemovedCPP == nSynsRemoved
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
print "Training completed. Stats:"
info = pyTM.getSegmentInfo()
print " nSegments:", info[0]
print " nSynapses:", info[1]
if g_options.verbosity > 3:
print "Complete state:"
if cppTM is not None:
print "CPP"
cppTM.printCells()
print "\nPY"
pyTM.printCells()
#---------------------------------------------------------------------------
# Infer
if g_options.verbosity > 1:
print "============= Inference ================="
if cppTM is not None:
cppTM.collectStats = True
pyTM.collectStats = True
nPredictions = 0
cppNumCorrect, pyNumCorrect = 0, 0
for sequenceNum, testSequence in enumerate(testSequences):
if g_options.verbosity > 1:
print "============= New sequence ================="
slen = len(testSequence)
if doResets:
if cppTM is not None:
cppTM.reset()
pyTM.reset()
for t, x in enumerate(testSequence):
if g_options.verbosity >= 2:
print "Time step", t, '\nInput:'
pyTM.printInput(x)
if cppTM is not None:
cppTM.infer(x)
pyTM.infer(x)
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
if g_options.verbosity > 2:
if cppTM is not None:
print "CPP"
cppTM.printStates(printPrevious = (g_options.verbosity > 4),
printLearnState = False)
print "\nPY"
pyTM.printStates(printPrevious = (g_options.verbosity > 4),
printLearnState = False)
if cppTM is not None:
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
if g_options.verbosity >= 2:
if cppTM is not None:
print "CPP"
print cppScores
print "\nPY"
print pyScores
if t < slen-1 and t > pyTM.burnIn:
nPredictions += 1
if cppTM is not None:
if cppScores['curPredictionScore2'] > 0.3:
cppNumCorrect += 1
if pyScores['curPredictionScore2'] > 0.3:
pyNumCorrect += 1
# Check that every inference was correct, excluding the very last inference
if cppTM is not None:
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
passTest = False
if cppTM is not None:
if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions:
passTest = True
else:
if pyNumCorrect == nPredictions:
passTest = True
if not passTest:
print "CPP correct predictions:", cppNumCorrect
print "PY correct predictions:", pyNumCorrect
print "Total predictions:", nPredictions
return passTest
def _testSL1(self, numOnes = 10, numRepetitions = 6, fixedResources = False,
checkSynapseConsistency = True):
"""Test segment learning"""
if fixedResources:
testName = "TestSL1_FS"
else:
testName = "TestSL1"
print "\nRunning %s..." % testName
trainingSet, testSet = self._buildSegmentLearningTrainingSet(numOnes,
numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols = numCols, fixedResources=fixedResources,
checkSynapseConsistency = checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print "%s PASS" % testName
return 1
else:
print "%s FAILED" % testName
return 0
def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False,
checkSynapseConsistency = True):
"""Test segment learning"""
if fixedResources:
testName = "TestSL2_FS"
else:
testName = "TestSL2"
print "\nRunning %s..." % testName
trainingSet, testSet = self._buildSL2TrainingSet(numOnes, numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols = numCols, fixedResources=fixedResources,
checkSynapseConsistency = checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print "%s PASS" % testName
return 1
else:
print "%s FAILED" % testName
return 0
class TMSegmentLearningTests(ExperimentTestBaseClass):
"""Our high level tests"""
def test_SL1NoFixedResources(self):
"""Test segment learning without fixed resources"""
self._testSL1(fixedResources=False,
checkSynapseConsistency=g_options.long)
def test_SL1WithFixedResources(self):
"""Test segment learning with fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL1(fixedResources=True,
checkSynapseConsistency=g_options.long)
def test_SL2NoFixedResources(self):
"""Test segment learning without fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL2(fixedResources=False,
checkSynapseConsistency=g_options.long)
def test_SL2WithFixedResources(self):
"""Test segment learning with fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL2(fixedResources=True,
checkSynapseConsistency=g_options.long)
if __name__ == "__main__":
# Process command line arguments
parser = testcasebase.TestOptionParser()
# Make the default value of the random seed 35
parser.remove_option('--seed')
parser.add_option('--seed', default=35, type='int',
help='Seed to use for random number generators '
'[default: %default].')
g_options, _ = parser.parse_args()
# Run the tests
unittest.main(verbosity=g_options.verbosity)
| 19,895 | Python | .py | 466 | 33.785408 | 89 | 0.631715 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,735 | tm_constant_test.py | numenta_nupic-legacy/examples/tm/tm_constant_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests that we can learn and predict the particularly vexing case of a
single constant signal!
"""
import numpy as np
import unittest2 as unittest
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers.testcasebase import (TestCaseBase,
TestOptionParser)
def _printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = np.zeros(numCols, dtype='float32')
x[i*numOnes:(i + 1)*numOnes] = 1
p.append(x)
return p
def _createTms(numCols):
"""Create two instances of temporal poolers (backtracking_tm.py
and backtracking_tm_cpp.py) with identical parameter settings."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 5
newSynapseCount = 7
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
globalDecay = 0
cellsPerColumn = 1
cppTm = BacktrackingTMCPP(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency=True,
pamLength=1000)
# Ensure we are copying over learning states for TPDiff
cppTm.retrieveLearningStates = True
pyTm = BacktrackingTM(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTm, pyTm
class TMConstantTest(TestCaseBase):
def setUp(self):
self.cppTm, self.pyTm = _createTms(100)
def _basicTest(self, tm=None):
"""Test creation, pickling, and basic run of learning and inference."""
trainingSet = _getSimplePatterns(10, 10)
# Learn on several constant sequences, with a reset in between
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tm.learn(seq)
tm.reset()
print "Learning completed"
# Infer
print "Running inference"
tm.collectStats = True
for seq in trainingSet[0:5]:
tm.reset()
tm.resetStats()
for _ in range(10):
tm.infer(seq)
if VERBOSITY > 1 :
print
_printOneTrainingVector(seq)
tm.printStates(False, False)
print
print
if VERBOSITY > 1:
print tm.getStats()
# Ensure our predictions are accurate for each sequence
self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8)
print ("tm.getStats()['predictionScoreAvg2'] = ",
tm.getStats()['predictionScoreAvg2'])
print "TMConstantTest ok"
def testCppTmBasic(self):
self._basicTest(self.cppTm)
def testPyTmBasic(self):
self._basicTest(self.pyTm)
def testIdenticalTms(self):
self.assertTrue(fdrutils.tmDiff2(self.cppTm, self.pyTm))
if __name__=="__main__":
parser = TestOptionParser()
options, _ = parser.parse_args()
SEED = options.seed
VERBOSITY = options.verbosity
np.random.seed(SEED)
unittest.main()
| 5,459 | Python | .py | 133 | 32.601504 | 79 | 0.642871 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,736 | tm_test.py | numenta_nupic-legacy/examples/tm/tm_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file performs a variety of tests on the reference temporal memory code.
basic_test
==========
Tests creation and serialization of the TM class. Sets parameters and ensures
they are the same after a serialization and de-serialization step. Runs learning
and inference on a small number of random patterns and ensures it doesn't crash.
===============================================================================
Basic First Order Sequences
===============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": turn off global decay, temporal pooling
and hilo (make minThreshold really high). initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
temporalPooling = False
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
numCols = 100
cellsPerCol = 1
newSynapseCount=11
activationThreshold = 8
permanenceMax = 1
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. A perfect
prediction consists of getting every column correct in the prediction, with no
extra columns. We report the number of columns that are incorrect and report a
failure if more than 2 columns are incorrectly predicted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
B3) N=300, M=1, P=1. (See how high we can go with M)
B4) N=100, M=3, P=1 (See how high we can go with N*M)
B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead
of between 21 and 25), and set activationThreshold to newSynapseCount.
B6) Like B1 but with cellsPerCol = 4. First order sequences should still work
just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns (except for H0). The
sequences are constructed so that consecutive patterns within a sequence don't
share any columns. The sequences are constructed to contain shared subsequences,
such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H0) Two simple high order sequences, each of length 7, with a shared
subsequence in positions 2-4. Each pattern has a consecutive set of 5 bits on.
No pattern shares any columns with the others. These sequences are easy to
visualize and is very useful for debugging.
H1) Learn two sequences with a short shared pattern. Parameters
should be the same as B1. This test will FAIL since cellsPerCol == 1. No
consecutive patterns share any column.
H2) As above but with cellsPerCol == 4. This test should PASS. No consecutive
patterns share any column.
H2a) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
H3) Same parameters as H.2 except sequences are created such that they share a
single significant sub-sequence. Subsequences should be reasonably long and in
the middle of sequences. No consecutive patterns share any column.
H4) Like H.3, except the shared subsequence is in the beginning. (e.g.
"ABCDEF" and "ABCGHIJ". At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H5) Shared patterns. Similar to H3 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence). Care should be taken such that the
same three patterns never follow one another in two sequences.
H6) Combination of H5) and H3). Shared patterns in different sequences, with a
shared subsequence.
H7) Stress test: every other pattern is shared. [Unimplemented]
H8) Start predicting in the middle of a sequence. [Unimplemented]
H9) Hub capacity. How many patterns can use that hub?
[Implemented, but does not run by default.]
H10) Sensitivity to small amounts of noise during inference. [Unimplemented]
H11) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation.
===============================================================================
Temporal Pooling Tests [UNIMPLEMENTED]
===============================================================================
Parameters: Use a "fast learning mode": With these settings sequences should be
learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
cellsPerCol = 4
newSynapseCount=11
activationThreshold = 11
permanenceMax = 1
doPooling = True
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 17 and 21 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Note: for pooling tests the density of input patterns should be pretty low
since each pooling step increases the output density. At the same time, we need
enough bits on in the input for the temporal memory to find enough synapses. So,
for the tests, constraints should be something like:
(Input Density) * (Number of pooling steps) < 25 %.
AND
sum(Input) > newSynapseCount*1.5
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next P time steps, up to and including the N-P'th pattern. A
perfect prediction consists of getting every column correct in the prediction,
with no extra columns. We report the number of columns that are incorrect and
report a failure if more than 2 columns are incorrectly predicted.
P1) Train the TM two times (P=2) on a single long sequence consisting of random
patterns (N=20, M=1). There should be no overlapping columns between successive
patterns. During inference, the TM should be able reliably predict the pattern
two time steps in advance. numCols should be about 350 to meet the above
constraints and also to maintain consistency with test P2.
P2) Increase TM rate to 3 time steps in advance (P=3). At each step during
inference, the TM should be able to reliably predict the pattern coming up at
t+1, t+2, and t+3..
P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost
identically to P1. It should only predict the next time step correctly and not
two time steps in advance. (Check off by one error in this logic.)
P4) As above, but with multiple sequences.
P5) Same as P3 but with shared subsequences.
Continuous mode tests
=====================
Slow changing inputs.
Orphan Decay Tests
==================
HiLo Tests
==========
A high order sequence memory like the TM can memorize very long sequences. In
many applications though you don't want to memorize. You see a long sequence of
patterns but there are actually lower order repeating sequences embedded within
it. A simplistic example is words in a sentence. Words such as You'd like the
TM to learn those sequences.
Tests should capture number of synapses learned and compare against
theoretically optimal numbers to pass/fail.
HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence
presented with noise before and after, with no resets. Two steps of noise will
be presented.
The noise will be 20 patterns, presented in random order. Every pattern has a
consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern
shares any columns with the others. These sequences are easy to visualize and is
very useful for debugging.
TM parameters should be the same as B7 except that permanenceDec should be 0.05:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
permanenceDec = 0.05
So, this means it should learn a sequence after 4 repetitions. It will take
4 orphan decay steps to get an incorrect synapse to go away completely.
HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence
that builds on the 3-sequence. For example, if learning A-B-C we train also on
D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this
test is disabled in the code. It is a bit tricky to test this. When you present
DAB, you should predict the same columns as when you present AB (i.e. in both
cases C should be predicted). However, the representation for C in DABC should
be different than the representation for C in ABC. Furthermore, when you present
AB, the representation for C should be an OR of the representation in DABC and
ABC since you could also be starting in the middle of the DABC sequence. All
this is actually happening in the code, but verified by visual inspection only.
HL1) Noise + sequence + noise + sequence repeatedly without resets until it has
learned that sequence. Train the TM repeatedly with N random sequences that all
share a single subsequence. Each random sequence can be 10 patterns long,
sharing a subsequence that is 5 patterns long. There should be no resets
between presentations. Inference should then be on that 5 long shared
subsequence.
Example (3-long shared subsequence):
A B C D E F G H I J
K L M D E F N O P Q
R S T D E F U V W X
Y Z 1 D E F 2 3 4 5
TM parameters should be the same as HL0.
HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should
learn ABC is separate from DABC.
HL3) Like HL2, but test with resets.
HL4) Like HL1 but with minThreshold high. This should FAIL and learn a ton
of synapses.
HiLo but with true high order sequences embedded in noise
Present 25 sequences in random order with no resets but noise between
sequences (1-20 samples). Learn all 25 sequences. Test global decay vs non-zero
permanenceDec .
Pooling + HiLo Tests [UNIMPLEMENTED]
====================
Needs to be defined.
Global Decay Tests [UNIMPLEMENTED]
==================
Simple tests to ensure global decay is actually working.
Sequence Likelihood Tests
=========================
These tests are in the file TMLikelihood.py
Segment Learning Tests [UNIMPLEMENTED]
======================
Multi-attribute sequence tests.
SL1) Train the TM repeatedly using a single (multiple) sequence plus noise. The
sequence can be relatively short, say 20 patterns. No two consecutive patterns
in the sequence should share columns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern. After N iterations of the noisy
sequences, the TM should should achieve perfect inference on the true sequence.
There should be resets between each presentation of the sequence.
Check predictions in the sequence only. And test with clean sequences.
Vary percentage of bits that are signal vs noise.
Noise can be a fixed alphabet instead of being randomly generated.
HL2) As above, but with no resets.
Shared Column Tests [UNIMPLEMENTED]
===================
Carefully test what happens when consecutive patterns in a sequence share
columns.
Sequence Noise Tests [UNIMPLEMENTED]
====================
Note: I don't think these will work with the current logic. Need to discuss
whether we want to accommodate sequence noise like this.
SN1) Learn sequence with pooling up to T timesteps. Run inference on a sequence
and occasionally drop elements of a sequence. Inference should still work.
SN2) As above, but occasionally add a random pattern into a sequence.
SN3) A combination of the above two.
Capacity Tests [UNIMPLEMENTED]
==============
These are stress tests that verify that the temporal memory can learn a large
number of sequences and can predict a large number of possible next steps. Some
research needs to be done first to understand the capacity of the system as it
relates to the number of columns, cells per column, etc.
Token Prediction Tests: Test how many predictions of individual tokens we can
superimpose and still recover.
Online Learning Tests [UNIMPLEMENTED]
=====================
These tests will verify that the temporal memory continues to work even if
sequence statistics (and the actual sequences) change slowly over time. The TM
should adapt to the changes and learn to recognize newer sequences (and forget
the older sequences?).
"""
import numpy
import pprint
import random
import sys
from numpy import *
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
#-------------------------------------------------------------------------------
TEST_CPP_TM = 1 # temporarily disabled until it can be updated
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 33 # the random seed used throughout
TMClass = BacktrackingTM
checkSynapseConsistency = False
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
#-------------------------------------------------------------------------------
# Helper routines
#-------------------------------------------------------------------------------
def printOneTrainingVector(x):
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for t in xrange(min(len(trainingSequences[0]), upTo)):
print 't=',t,
for i,trainingSequence in enumerate(trainingSequences):
print "\tseq#",i,'\t',
printOneTrainingVector(trainingSequences[i][t])
def generatePattern(numCols = 100,
minOnes =21,
maxOnes =25,
colSet = [],
prevPattern =numpy.array([])):
"""Generate a single test pattern with given parameters.
Parameters:
--------------------------------------------
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
colSet: The set of column indices for the pattern.
prevPattern: Pattern to avoid (null intersection).
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
def buildTrainingSet(numSequences = 2,
sequenceLength = 100,
pctShared = 0.2,
seqGenMode = 'shared sequence',
subsequenceStartPos = 10,
numCols = 100,
minOnes=21,
maxOnes = 25,
disjointConsecutive =True):
"""Build random high order test sequences.
Parameters:
--------------------------------------------
numSequences: The number of sequences created.
sequenceLength: The length of each sequence.
pctShared: The percentage of sequenceLength that is shared across
every sequence. If sequenceLength is 100 and pctShared
is 0.2, then a subsequence consisting of 20 patterns
will be in every sequence. Can also be the keyword
'one pattern', in which case a single time step is
shared.
seqGenMode: What kind of sequence to generate. If contains 'shared'
generates shared subsequence. If contains 'no shared',
does not generate any shared subsequence. If contains
'shuffle', will use common patterns shuffle among the
different sequences. If contains 'beginning', will
place shared subsequence at the beginning.
subsequenceStartPos: The position where the shared subsequence starts
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
disjointConsecutive: Whether to generate disjoint consecutive patterns or not.
"""
# Calculate the set of column indexes once to be used in each call to
# generatePattern()
colSet = set(range(numCols))
if 'beginning' in seqGenMode:
assert 'shared' in seqGenMode and 'no shared' not in seqGenMode
if 'no shared' in seqGenMode or numSequences == 1:
pctShared = 0.0
#-----------------------------------------------------------------------------
# Build shared subsequence
if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode:
sharedSequenceLength = int(pctShared*sequenceLength)
elif 'one pattern' in seqGenMode:
sharedSequenceLength = 1
else:
sharedSequenceLength = 0
assert sharedSequenceLength + subsequenceStartPos < sequenceLength
sharedSequence = []
for i in xrange(sharedSequenceLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet,
sharedSequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sharedSequence.append(x)
#-----------------------------------------------------------------------------
# Build random training set, splicing in the shared subsequence
trainingSequences = []
if 'beginning' not in seqGenMode:
trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos
else:
trailingLength = sequenceLength - sharedSequenceLength
for k,s in enumerate(xrange(numSequences)):
# TODO: implement no repetitions
if len(trainingSequences) > 0 and 'shuffle' in seqGenMode:
r = range(subsequenceStartPos) \
+ range(subsequenceStartPos + sharedSequenceLength, sequenceLength)
rgen.shuffle(r)
r = r[:subsequenceStartPos] \
+ range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength) \
+ r[subsequenceStartPos:]
sequence = [trainingSequences[k-1][j] for j in r]
else:
sequence = []
if 'beginning' not in seqGenMode:
for i in xrange(subsequenceStartPos):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
if 'shared' in seqGenMode and 'no shared' not in seqGenMode:
sequence.extend(sharedSequence)
for i in xrange(trailingLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
assert len(sequence) == sequenceLength
trainingSequences.append(sequence)
assert len(trainingSequences) == numSequences
if VERBOSITY >= 2:
print "Training Sequences"
pprint.pprint(trainingSequences)
if sharedSequenceLength > 0:
return (trainingSequences, subsequenceStartPos + sharedSequenceLength)
else:
return (trainingSequences, -1)
def getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def buildSimpleTrainingSet(numOnes=5):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 11
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4], p[5], p[6] ]
s2 = [p[7], p[8], p[2], p[3], p[4], p[9], p[10]]
trainingSequences = [s1, s2]
return (trainingSequences, 5)
def buildAlternatingTrainingSet(numOnes=5):
"""High order sequences that alternate elements. Pattern i has one's in
i*numOnes to (i+1)*numOnes.
The sequences are:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
"""
numPatterns = 14
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[0], p[1], p[0], p[2]]
s2 = [p[0], p[1], p[0], p[1], p[3], p[4]]
s3 = [p[0], p[1], p[5], p[6], p[7], p[8]]
s4 = [p[0], p[9], p[10], p[11], p[12], p[13]]
trainingSequences = [s1, s2, s3, s4]
return (trainingSequences, 5)
def buildHL0aTrainingSet(numOnes=5):
"""Simple sequences for HL0. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequence we want to learn is p0->p1->p2
We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2
N is randomly chosen from p3 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(3,23)])
for _ in xrange(20):
s.append(p[rgen.randint(3,23)])
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[rgen.randint(3,23)])
return ([s], [[p[0], p[1], p[2]]])
def buildHL0bTrainingSet(numOnes=5):
"""Simple sequences for HL0b. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.
We create a very long sequence consisting of these two sub-sequences
intermixed with noise, such as:
N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3
N is randomly chosen from p5 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(5,numPatterns)])
for _ in xrange(50):
r = rgen.randint(5,numPatterns)
print r,
s.append(p[r])
if rgen.binomial(1, 0.5) > 0:
print "S1",
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[4])
else:
print "S2",
s.append(p[1])
s.append(p[2])
s.append(p[3])
r = rgen.randint(5,numPatterns)
s.append(p[r])
print r,
print
return ([s], [ [p[0], p[1], p[2], p[4]], [p[1], p[2], p[3]] ])
# Basic test (creation, pickling, basic run of learning and inference)
def basicTest():
global TMClass, SEED, VERBOSITY, checkSynapseConsistency
#--------------------------------------------------------------------------------
# Create TM object
numberOfCols =10
cellsPerColumn =3
initialPerm =.2
connectedPerm =.8
minThreshold =2
newSynapseCount =5
permanenceInc =.1
permanenceDec =.05
permanenceMax =1
globalDecay =.05
activationThreshold =4 # low for those basic tests on purpose
doPooling =True
segUpdateValidDuration =5
seed =SEED
verbosity =VERBOSITY
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=seed, verbosity=verbosity,
pamLength = 1000,
checkSynapseConsistency=checkSynapseConsistency)
print "Creation ok"
#--------------------------------------------------------------------------------
# Save and reload
schema = TMClass.getSchema()
with open("test_tm.bin", "w+b") as f:
# Save
proto = schema.new_message()
tm.write(proto)
proto.write(f)
# Load
f.seek(0)
proto2 = schema.read(f)
tm2 = TMClass.read(proto2)
assert tm2.numberOfCols == numberOfCols
assert tm2.cellsPerColumn == cellsPerColumn
print tm2.initialPerm
assert tm2.initialPerm == numpy.float32(.2)
assert tm2.connectedPerm == numpy.float32(.8)
assert tm2.minThreshold == minThreshold
assert tm2.newSynapseCount == newSynapseCount
assert tm2.permanenceInc == numpy.float32(.1)
assert tm2.permanenceDec == numpy.float32(.05)
assert tm2.permanenceMax == 1
assert tm2.globalDecay == numpy.float32(.05)
assert tm2.activationThreshold == activationThreshold
assert tm2.doPooling == doPooling
assert tm2.segUpdateValidDuration == segUpdateValidDuration
assert tm2.seed == SEED
assert tm2.verbosity == verbosity
print "Save/load ok"
#--------------------------------------------------------------------------------
# Learn
for i in xrange(5):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.learn(x)
#--------------------------------------------------------------------------------
# Infer
patterns = rgen.randint(0,2,(4,numberOfCols))
for i in xrange(10):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.infer(x)
if i > 0:
p = tm._checkPrediction([pattern.nonzero()[0] for pattern in patterns])
print "basicTest ok"
#---------------------------------------------------------------------------------
# Figure out acceptable patterns if none were passed to us.
def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = 1):
"""
Tries to infer the set of acceptable patterns for prediction at the given
time step and for the give sequence. Acceptable patterns are: the current one,
plus a certain number of patterns after timeStep, in the sequence that the TM
is currently tracking. Any other pattern is not acceptable.
TODO:
====
- Doesn't work for noise cases.
- Might run in trouble if shared subsequence at the beginning.
Parameters:
==========
tm the whole TM, so that we can look at its parameters
t the current time step
whichSequence the sequence we are currently tracking
trainingSequences all the training sequences
nAcceptable the number of steps forward from the current timeStep
we are willing to consider acceptable. In the case of
pooling, it is less than or equal to the min of the
number of training reps and the segUpdateValidDuration
parameter of the TM, depending on the test case.
The default value is 1, because by default, the pattern
after the current one should always be predictable.
Return value:
============
acceptablePatterns A list of acceptable patterns for prediction.
"""
# Determine how many steps forward we want to see in the prediction
upTo = t + 2 # always predict current and next
# If the TM is pooling, more steps can be predicted
if tm.doPooling:
upTo += min(tm.segUpdateValidDuration, nAcceptable)
assert upTo <= len(trainingSequences[whichSequence])
acceptablePatterns = []
# Check whether we were in a shared subsequence at the beginning.
# If so, at the point of exiting the shared subsequence (t), we should
# be predicting multiple patterns for 1 time step, then collapse back
# to a single sequence.
if len(trainingSequences) == 2 and \
(trainingSequences[0][0] == trainingSequences[1][0]).all():
if (trainingSequences[0][t] == trainingSequences[1][t]).all() \
and (trainingSequences[0][t+1] != trainingSequences[1][t+1]).any():
acceptablePatterns.append(trainingSequences[0][t+1])
acceptablePatterns.append(trainingSequences[1][t+1])
# Add patterns going forward
acceptablePatterns += [trainingSequences[whichSequence][t] \
for t in xrange(t,upTo)]
return acceptablePatterns
def testSequence(trainingSequences,
nTrainingReps = 1,
numberOfCols = 40,
cellsPerColumn =5,
initialPerm =.8,
connectedPerm =.7,
minThreshold = 11,
newSynapseCount =5,
permanenceInc =.4,
permanenceDec =0.0,
permanenceMax =1,
globalDecay =0.0,
pamLength = 1000,
activationThreshold =5,
acceptablePatterns = [], # if empty, try to infer what they are
doPooling = False,
nAcceptable = -1, # if doPooling, number of acceptable steps
noiseModel = None,
noiseLevel = 0,
doResets = True,
shouldFail = False,
testSequences = None,
predJustAfterHubOnly = None,
compareToPy = False,
nMultiStepPrediction = 0,
highOrder = False):
"""Test a single set of sequences once and return the number of
prediction failures, the number of errors, and the number of perfect
predictions"""
global BacktrackingTM, SEED, checkSynapseConsistency, VERBOSITY
numPerfect = 0 # When every column is correct in the prediction
numStrictErrors = 0 # When at least one column is incorrect
numFailures = 0 # When > 2 columns are incorrect
sequenceLength = len(trainingSequences[0])
segUpdateValidDuration =5
verbosity = VERBOSITY
# override default maxSeqLEngth value for high-order sequences
if highOrder:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength,
maxSeqLength=0
)
else:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength
)
if compareToPy:
# override default maxSeqLEngth value for high-order sequences
if highOrder:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
maxSeqLength=0
)
else:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
)
trainingSequences = trainingSequences[0]
if testSequences == None: testSequences = trainingSequences
inferAcceptablePatterns = acceptablePatterns == []
#--------------------------------------------------------------------------------
# Learn
for r in xrange(nTrainingReps):
if VERBOSITY > 1:
print "============= Learning round",r,"================="
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if VERBOSITY > 1:
print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
for t,x in enumerate(trainingSequence):
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'training' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2:
print "Time step",t, "learning round",r, "sequence number", sequenceNum
print "Input: ",tm.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
y = tm.learn(x)
if compareToPy:
py_y = py_tm.learn(x)
if t % 25 == 0: # To track bugs, do that every iteration, but very slow
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
if VERBOSITY > 3:
tm.printStates(printPrevious = (VERBOSITY > 4))
print
if VERBOSITY > 3:
print "Sequence finished. Complete state after sequence"
tm.printCells()
print
numPerfectAtHub = 0
if compareToPy:
print "End of training"
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
#--------------------------------------------------------------------------------
# Infer
if VERBOSITY > 1: print "============= Inference ================="
for s,testSequence in enumerate(testSequences):
if VERBOSITY > 1: print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
slen = len(testSequence)
for t,x in enumerate(testSequence):
# Generate noise (optional)
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'inference' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2: print "Time step",t, '\nInput:', tm.printInput(x)
x = numpy.array(x).astype('float32')
y = tm.infer(x)
if compareToPy:
py_y = py_tm.infer(x)
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
# if t == predJustAfterHubOnly:
# z = sum(y, axis = 1)
# print '\t\t',
# print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z)))
if VERBOSITY > 3: tm.printStates(printPrevious = (VERBOSITY > 4),
printLearnState = False); print
if nMultiStepPrediction > 0:
y_ms = tm.predict(nSteps=nMultiStepPrediction)
if VERBOSITY > 3:
print "Multi step prediction at Time step", t
for i in range(nMultiStepPrediction):
print "Prediction at t+", i+1
tm.printColConfidence(y_ms[i])
# Error Checking
for i in range(nMultiStepPrediction):
predictedTimeStep = t+i+1
if predictedTimeStep < slen:
input = testSequence[predictedTimeStep].nonzero()[0]
prediction = y_ms[i].nonzero()[0]
foundInInput, totalActiveInInput, \
missingFromInput, totalActiveInPrediction = \
fdrutils.checkMatch(input, prediction, sparse=True)
falseNegatives = totalActiveInInput - foundInInput
falsePositives = missingFromInput
if VERBOSITY > 2:
print "Predition from %d to %d" % (t, t+i+1)
print "\t\tFalse Negatives:", falseNegatives
print "\t\tFalse Positivies:", falsePositives
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false negative with error=",falseNegatives,
print "out of", totalActiveInInput,"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false positive with error=",falsePositives,
print "out of",totalActiveInInput,"ones"
if falsePositives > 3 or falseNegatives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Input at t=', t
print '\t\t',; printOneTrainingVector(testSequence[t])
print 'Prediction for t=', t+i+1
print '\t\t',; printOneTrainingVector(y_ms[i])
print 'Actual input at t=', t+i+1
print '\t\t',; printOneTrainingVector(testSequence[t+i+1])
if t < slen-1:
# If no acceptable patterns were passed to us, we need to infer them
# for the current sequence and time step by looking at the testSequences.
# nAcceptable is used to reduce the number of automatically determined
# acceptable patterns.
if inferAcceptablePatterns:
acceptablePatterns = findAcceptablePatterns(tm, t, s, testSequences,
nAcceptable)
scores = tm._checkPrediction([pattern.nonzero()[0] \
for pattern in acceptablePatterns])
falsePositives, falseNegatives = scores[0], scores[1]
# We report an error if FN or FP is > 0.
# We report a failure if number of FN or number of FP is > 2 for any
# pattern. We also count the number of perfect predictions.
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false negative with error=",falseNegatives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false positive with error=",falsePositives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falseNegatives > 3 or falsePositives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Test sequences'
if len(testSequences) > 1:
printAllTrainingSequences(testSequences, t+1)
else:
print '\t\t',; printOneTrainingVector(testSequence[t])
print '\t\t',; printOneTrainingVector(testSequence[t+1])
print 'Acceptable'
for p in acceptablePatterns:
print '\t\t',; printOneTrainingVector(p)
print 'Output'
diagnostic = ''
output = sum(tm.currentOutput,axis=1)
print '\t\t',; printOneTrainingVector(output)
else:
numPerfect += 1
if predJustAfterHubOnly is not None and predJustAfterHubOnly == t:
numPerfectAtHub += 1
if predJustAfterHubOnly is None:
return numFailures, numStrictErrors, numPerfect, tm
else:
return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tm
def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 1 repetition - 1 sequence)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 4 repetition - 1 sequence - slow learning)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = 4,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
minThreshold = 11,
newSynapseCount = 11,
activationThreshold = 11,
initialPerm = .2,
connectedPerm = .6,
permanenceInc = .2,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures,
print "numStrictErrors=", numStrictErrors,
print "numPerfect=", numPerfect
return nFailed
def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]: # TestC has multiple sequences
print "Test",name,"(sequence memory - second repetition of the same sequence" +\
" should not add synapses)"
print "Num patterns in sequence =", numUniquePatterns,
print "cellsPerColumn=",cellsPerColumn
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
# Do one pass through the training set
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Do two passes through the training set
numFailures, numStrictErrors, numPerfect, tm2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (segmentInfo1[0] != segmentInfo2[0]) or \
(segmentInfo1[1] != segmentInfo2[1]) :
print "Training twice incorrectly resulted in more segments or synapses"
print "Number of segments: ", segmentInfo1[0], segmentInfo2[0]
numFailures += 1
if numFailures == 0:
print "Test",name,"ok"
else:
print "Test",name,"failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB3(numUniquePatterns, nTests):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [2,5]:
print "Test B3 (sequence memory - 2 repetitions -", numSequences, "sequences)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test B3 ok"
else:
print "Test B3 failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH0(numOnes = 5,nMultiStepPrediction=0):
cellsPerColumn = 4
print "Higher order test 0 with cellsPerColumn=",cellsPerColumn
trainingSet = buildSimpleTrainingSet(numOnes)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = 20,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = .2,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 5,
activationThreshold = 4,
doPooling = False,
nMultiStepPrediction=nMultiStepPrediction)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared sequence', nTrainingReps = 2,
shouldFail = False, compareToPy = False, highOrder = False):
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .4,
permanenceDec = .1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False,
shouldFail = shouldFail,
compareToPy = compareToPy,
highOrder = highOrder)
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH11(numOnes = 3):
cellsPerColumn = 4
print "Higher order test 11 with cellsPerColumn=",cellsPerColumn
trainingSet = buildAlternatingTrainingSet(numOnes= 3)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 1,
activationThreshold = 1,
doPooling = False)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.02, seqGenMode = 'shared sequence',
shouldFail = False):
"""
Still need to test:
Two overlapping sequences. OK to get new segments but check that we can
get correct high order prediction after multiple reps.
"""
print "Test H2a - second repetition of the same sequence should not add synapses"
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,"numCols=", numCols
print "numSequences=",numSequences, "pctShared=", pctShared,
print "sharing mode=", seqGenMode
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
print "============== 10 ======================"
numFailures3, numStrictErrors3, numPerfect3, tm3 = \
testSequence(trainingSet,
nTrainingReps = 10,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .4,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0.1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 2 ======================"
numFailures, numStrictErrors, numPerfect, tm2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 1 ======================"
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \
(abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) :
print "Training twice incorrectly resulted in too many segments or synapses"
print segmentInfo1
print segmentInfo2
print tm3.getSegmentInfo()
tm3.trimSegments()
print tm3.getSegmentInfo()
print "Failures for 1, 2, and N reps"
print numFailures1, numStrictErrors1, numPerfect1
print numFailures, numStrictErrors, numPerfect
print numFailures3, numStrictErrors3, numPerfect3
numFailures += 1
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 2):
nFailed = 0
newSynapseCount = 7
activationThreshold = newSynapseCount - 2
minOnes = 1.5 * newSynapseCount
maxOnes = .3 * numCols / nTrainingReps
for numSequences in nSequences:
print "Pooling test with sequenceLength=",sequenceLength,
print 'numCols=', numCols,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = True)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def TestHL0a(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0a with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0aTrainingSet()
numCols = trainingSet[0][0].size
numFailures, numStrictErrors, numPerfect, tm = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
pamLength = 2,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
print retAfter[0], retAfter[1]
if retAfter[0] > 20:
print "Too many segments"
numFailures += 1
if retAfter[1] > 100:
print "Too many synapses"
numFailures += 1
if numFailures == 0:
print "Test HL0a ok"
return 0
else:
print "Test HL0a failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL0b(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0b with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0bTrainingSet()
numCols = trainingSet[0][0].size
print "numCols=", numCols
numFailures, numStrictErrors, numPerfect, tm = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
tm.printCells()
if numFailures == 0:
print "Test HL0 ok"
return 0
else:
print "Test HL0 failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 3,
noiseModel = 'xor binomial in learning only', noiseLevel = 0.1,
hiloOn = True):
nFailed = 0
newSynapseCount = 8
activationThreshold = newSynapseCount
minOnes = 1.5 * newSynapseCount
maxOnes = 0.3 * numCols / nTrainingReps
if hiloOn == False:
minThreshold = 0.9
for numSequences in nSequences:
print "Hilo test with sequenceLength=", sequenceLength,
print "cellsPerColumn=", cellsPerColumn, "nTests=", nTests,
print "numSequences=", numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes,
print 'noiseModel=', noiseModel, 'noiseLevel=', noiseLevel
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
doPooling = False,
noiseModel = noiseModel,
noiseLevel = noiseLevel)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def worker(x):
"""Worker function to use in parallel hub capacity test below."""
cellsPerColumn, numSequences = x[0], x[1]
nTrainingReps = 1
sequenceLength = 10
numCols = 200
print 'Started', cellsPerColumn, numSequences
seqGenMode = 'shared subsequence, one pattern'
subsequenceStartPos = 5
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = .1, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures1, numStrictErrors1, numPerfect1, atHub, tm = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False,
predJustAfterHubOnly = 5)
seqGenMode = 'no shared subsequence'
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = 0, seqGenMode = seqGenMode,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures2, numStrictErrors2, numPerfect2, tm = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False)
print 'Completed',
print cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
def hubCapacity():
"""
Study hub capacity. Figure out how many sequences can share a pattern
for a given number of cells per column till we the system fails.
DON'T RUN IN BUILD SYSTEM!!! (takes too long)
"""
from multiprocessing import Pool
import itertools
print "Hub capacity test"
# scalar value on predictions by looking at max perm over column
p = Pool(2)
results = p.map(worker, itertools.product([1,2,3,4,5,6,7,8], xrange(1,2000,200)))
f = open('results-numPerfect.11.22.10.txt', 'w')
for i,r in enumerate(results):
print >>f, '{%d,%d,%d,%d,%d,%d,%d,%d,%d},' % r
f.close()
def runTests(testLength = "short"):
# Data structure to collect results of tests
# TODO: put numFailures, numStrictErrors and numPerfect in here for reporting
tests = {}
# always run this one: if that one fails, we can't do anything
basicTest()
print
#---------------------------------------------------------------------------------
if testLength == "long":
tests['B1'] = TestB1(numUniquePatterns, nTests)
tests['B2'] = TestB2(numUniquePatterns, nTests)
tests['B8'] = TestB7(4, nTests, cellsPerColumn = 4, name="B8")
tests['B10'] = TestB2(numUniquePatterns, nTests, cellsPerColumn = 4,
name = "B10")
# Run these always
tests['B3'] = TestB3(numUniquePatterns, nTests)
tests['B6'] = TestB1(numUniquePatterns, nTests,
cellsPerColumn = 4, name="B6")
tests['B7'] = TestB7(numUniquePatterns, nTests)
print
#---------------------------------------------------------------------------------
#print "Test H11"
#tests['H11'] = TestH11()
if True:
print "Test H0"
tests['H0'] = TestH0(numOnes = 5)
print "Test H2"
#tests['H2'] = TestH(numUniquePatterns, nTests, cellsPerColumn = 4,
# nTrainingReps = numUniquePatterns, compareToPy = False)
print "Test H3"
tests['H3'] = TestH(numUniquePatterns, nTests,
numCols = 200,
cellsPerColumn = 20,
pctShared = 0.3, nTrainingReps=numUniquePatterns,
compareToPy = False,
highOrder = True)
print "Test H4" # Produces 3 false positives, but otherwise fine.
# TODO: investigate initial false positives?
tests['H4'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 20,
pctShared = 0.1,
seqGenMode='shared subsequence at beginning')
if True:
print "Test H0 with multistep prediction"
tests['H0_MS'] = TestH0(numOnes = 5, nMultiStepPrediction=2)
if True:
print "Test H1" # - Should Fail
tests['H1'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 1, nTrainingReps = 1,
shouldFail = True)
# Also fails in --long mode. See H2 above
#print "Test H2a"
#tests['H2a'] = TestH2a(numUniquePatterns,
# nTests, pctShared = 0.02, numCols = 300, cellsPerColumn = 4)
if False:
print "Test H5" # make sure seqs are good even with shuffling, fast learning
tests['H5'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.0,
seqGenMode='shuffle, no shared subsequence')
print "Test H6" # should work
tests['H6'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
# Try with 2 sequences, then 3 sequences interleaved so that there is
# always a shared pattern, but it belongs to 2 different sequences each
# time!
#print "Test H7"
#tests['H7'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
# tricky: if start predicting in middle of subsequence, several predictions
# are possible
#print "Test H8"
#tests['H8'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print "Test H9" # plot hub capacity
tests['H9'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
#print "Test H10" # plot
#tests['H10'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print
#---------------------------------------------------------------------------------
if False:
print "Test P1"
tests['P1'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 3)
if False:
print "Test P2"
tests['P2'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 5)
print "Test P3"
tests['P3'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print "Test P4"
tests['P4'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print
#---------------------------------------------------------------------------------
if True:
print "Test HL0a"
tests['HL0a'] = TestHL0a(numOnes = 5)
if False:
print "Test HL0b"
tests['HL0b'] = TestHL0b(numOnes = 5)
print "Test HL1"
tests['HL1'] = TestHL(sequenceLength = 20,
nTests = nTests,
numCols = 100,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL2"
tests['HL2'] = TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL3"
tests['HL3'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = True)
print "Test HL4"
tests['HL4'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = False)
print "Test HL5"
tests['HL5'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL6"
tests['HL6'] = nTests - TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = True,
hiloOn = False)
print
#---------------------------------------------------------------------------------
nFailures = 0
for k,v in tests.iteritems():
nFailures = nFailures + v
if nFailures > 0: # 1 to account for H1
print "There are failed tests"
print "Test\tn failures"
for k,v in tests.iteritems():
print k, "\t", v
assert 0
else:
print "All tests pass"
#---------------------------------------------------------------------------------
# Keep
if False:
import hotshot
import hotshot.stats
prof = hotshot.Profile("profile.prof")
prof.runcall(TestB2, numUniquePatterns=100, nTests=2)
prof.close()
stats = hotshot.stats.load("profile.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if __name__=="__main__":
if not TEST_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Three different test lengths are passed in through the command line.
# Developer tests use --short. Autobuild does not pass in anything.
# Acceptance tests pass in --long. testLength reflects these possibilities
# as "autobuild", "short", and "long"
testLength = "autobuild"
# Scan command line arguments to see what to do for the seed
# TODO: make default be a random seed, once we're sure it will pass reliably!
for i,arg in enumerate(sys.argv):
if 'seed' in arg:
try:
# used specified seed
SEED = int(sys.argv[i+1])
except ValueError as e:
# random seed
SEED = numpy.random.randint(100)
if 'verbosity' in arg:
VERBOSITY = int(sys.argv[i+1])
if 'help' in arg:
print "TMTest.py --short|long --seed number|'rand' --verbosity number"
sys.exit()
if "short" in arg:
testLength = "short"
if "long" in arg:
testLength = "long"
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
# Setup the severity and length of the tests
if testLength == "short":
numUniquePatterns = 50
nTests = 1
elif testLength == "autobuild":
print "Running autobuild tests"
numUniquePatterns = 50
nTests = 1
elif testLength == "long":
numUniquePatterns = 100
nTests = 3
print "TM tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests,
print "seed=", SEED
print
if testLength == "long":
print 'Testing BacktrackingTM'
TMClass = BacktrackingTM
runTests(testLength)
if testLength != 'long':
checkSynapseConsistency = False
else:
# Setting this to True causes test to take way too long
# Temporarily turned off so we can investigate
checkSynapseConsistency = False
if TEST_CPP_TM:
print 'Testing C++ TM'
TMClass = BacktrackingTMCPP
runTests(testLength)
| 88,287 | Python | .py | 1,847 | 35.313481 | 93 | 0.569301 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,737 | hello_tm.py | numenta_nupic-legacy/examples/tm/hello_tm.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
print """
This program shows how to access the Temporal Memory directly by demonstrating
how to create a TM instance, train it with vectors, get predictions, and
inspect the state.
The code here runs a very simple version of sequence learning, with one
cell per column. The TM is trained with the simple sequence A->B->C->D->E
HOMEWORK: once you have understood exactly what is going on here, try changing
cellsPerColumn to 4. What is the difference between once cell per column and 4
cells per column?
PLEASE READ THROUGH THE CODE COMMENTS - THEY EXPLAIN THE OUTPUT IN DETAIL
"""
# Can't live without numpy
import numpy
from itertools import izip as zip, count
from nupic.algorithms.temporal_memory import TemporalMemory as TM
# Utility routine for printing the input vector
def formatRow(x):
s = ''
for c in range(len(x)):
if c > 0 and c % 10 == 0:
s += ' '
s += str(x[c])
s += ' '
return s
# Step 1: create Temporal Pooler instance with appropriate parameters
tm = TM(columnDimensions = (50,),
cellsPerColumn=2,
initialPermanence=0.5,
connectedPermanence=0.5,
minThreshold=8,
maxNewSynapseCount=20,
permanenceIncrement=0.1,
permanenceDecrement=0.0,
activationThreshold=8,
)
# Step 2: create input vectors to feed to the temporal memory. Each input vector
# must be numberOfCols wide. Here we create a simple sequence of 5 vectors
# representing the sequence A -> B -> C -> D -> E
x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32")
x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9
x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19
x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29
x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39
x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49
# Step 3: send this simple sequence to the temporal memory for learning
# We repeat the sequence 10 times
for i in range(10):
# Send each letter in the sequence in order
for j in range(5):
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# The compute method performs one step of learning and/or inference. Note:
# here we just perform learning but you can perform prediction/inference and
# learning in the same step if you want (online learning).
tm.compute(activeColumns, learn = True)
# The following print statements can be ignored.
# Useful for tracing internal states
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
# The reset command tells the TM that a sequence just ended and essentially
# zeros out all the states. It is not strictly necessary but it's a bit
# messier without resets, and the TM learns quicker with resets.
tm.reset()
#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal memory
for j in range(5):
print "\n\n--------","ABCDE"[j],"-----------"
print "Raw input vector : " + formatRow(x[j])
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# Send each vector to the TM, with learning turned off
tm.compute(activeColumns, learn = False)
# The following print statements prints out the active cells, predictive
# cells, active segments and winner cells.
#
# What you should notice is that the columns where active state is 1
# represent the SDR for the current input pattern and the columns where
# predicted state is 1 represent the SDR for the next expected pattern
print "\nAll the active and predicted cells:"
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
activeColumnsIndeces = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndeces = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
# Reconstructing the active and inactive columns with 1 as active and 0 as
# inactive representation.
actColState = ['1' if i in activeColumnsIndeces else '0' for i in range(tm.numberOfColumns())]
actColStr = ("".join(actColState))
predColState = ['1' if i in predictedColumnIndeces else '0' for i in range(tm.numberOfColumns())]
predColStr = ("".join(predColState))
# For convenience the cells are grouped
# 10 at a time. When there are multiple cells per column the printout
# is arranged so the cells in a column are stacked together
print "Active columns: " + formatRow(actColStr)
print "Predicted columns: " + formatRow(predColStr)
# predictedCells[c][i] represents the state of the i'th cell in the c'th
# column. To see if a column is predicted, we can simply take the OR
# across all the cells in that column. In numpy we can do this by taking
# the max along axis 1.
| 6,174 | Python | .py | 122 | 47.581967 | 99 | 0.706693 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,738 | svm_how_to.py | numenta_nupic-legacy/examples/bindings/svm_how_to.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from random import *
import numpy
import pylab
import nupic.bindings.algorithms as algo
from nupic.bindings.math import GetNumpyDataType
type = GetNumpyDataType('NTA_Real')
type = 'float32'
def simple():
print "Simple"
numpy.random.seed(42)
n_dims = 2
n_class = 4
size = 200
labels = numpy.random.random_integers(0, n_class-1, size)
samples = numpy.zeros((size, n_dims), dtype=type)
do_plot = False
print "Generating data"
centers = numpy.array([[0,0],[0,1],[1,0],[1,1]])
for i in range(0, size):
t = 6.28 * numpy.random.random_sample()
samples[i][0] = 2 * centers[labels[i]][0] + .5*numpy.random.random() * numpy.cos(t)
samples[i][1] = 2 * centers[labels[i]][1] + .5*numpy.random.random() * numpy.sin(t)
classifier = algo.svm_dense(0, n_dims, probability=True, seed=42)
print "Adding sample vectors"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
print "Displaying problem"
problem = classifier.get_problem()
print "Problem size:", problem.size()
print "Problem dimensionality:", problem.n_dims()
print "Problem samples:"
s = numpy.zeros((problem.size(), problem.n_dims()+1), dtype=type)
problem.get_samples(s)
print s
if do_plot:
pylab.ion()
pylab.plot(s[s[:,0]==0,1], s[s[:,0]==0,2], '.', color='r')
pylab.plot(s[s[:,0]==1,1], s[s[:,0]==1,2], '+', color='b')
pylab.plot(s[s[:,0]==2,1], s[s[:,0]==2,2], '^', color='g')
pylab.plot(s[s[:,0]==3,1], s[s[:,0]==3,2], 'v', color='g')
print "Training"
classifier.train(gamma = 1./3., C = 100, eps=1e-1)
print "Displaying model"
model = classifier.get_model()
print "Number of support vectors:", model.size()
print "Number of classes:", model.n_class()
print "Number of dimensions: ", model.n_dims()
print "Support vectors:"
sv = numpy.zeros((model.size(), model.n_dims()), dtype=type)
model.get_support_vectors(sv)
print sv
if do_plot:
pylab.plot(sv[:,0], sv[:,1], 'o', color='g')
print "Support vector coefficients:"
svc = numpy.zeros((model.n_class()-1, model.size()), dtype=type)
model.get_support_vector_coefficients(svc)
print svc
print "Hyperplanes (for linear kernel only):"
h = model.get_hyperplanes()
print h
if do_plot:
xmin = numpy.min(samples[:,0])
xmax = numpy.max(samples[:,0])
xstep = (xmax - xmin) / 10
X = numpy.arange(xmin, xmax, xstep)
ymin = numpy.min(samples[:,1])
ymax = numpy.max(samples[:,1])
ystep = (ymax - ymin) / 10
Y = numpy.arange(ymin, ymax, ystep)
points = numpy.zeros((len(X), len(Y)))
for i,x in enumerate(X):
for j,y in enumerate(Y):
proba = numpy.zeros(model.n_class(), dtype=type)
classifier.predict_probability(numpy.array([x,y]), proba)
points[i,j] = proba[0]
pylab.contour(X,Y,points)
print "Cross-validation"
print classifier.cross_validate(2, gamma = .5, C = 10, eps = 1e-3)
print "Predicting"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
proba = numpy.zeros(model.n_class(), dtype=type)
print x, ': real=', y,
print 'p1=', classifier.predict(x),
print 'p2=', classifier.predict_probability(x, proba),
print 'proba=', proba
print "Discarding problem"
classifier.discard_problem()
print "Predicting after discarding the problem"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
proba = numpy.zeros(model.n_class(), dtype=type)
print x, ': real=', y,
print 'p1=', classifier.predict(x),
print 'p2=', classifier.predict_probability(x, proba),
print 'proba=', proba
def persistence():
print "Persistence"
numpy.random.seed(42)
n_dims = 2
n_class = 12
size = 100
labels = numpy.random.random_integers(0, 256, size)
samples = numpy.zeros((size, n_dims), dtype=type)
print "Generating data"
for i in range(0, size):
t = 6.28 * numpy.random.random_sample()
samples[i][0] = 2 * labels[i] + 1.5 * numpy.cos(t)
samples[i][1] = 2 * labels[i] + 1.5 * numpy.sin(t)
print "Creating dense classifier"
classifier = algo.svm_dense(0, n_dims = n_dims, seed=42)
print "Adding sample vectors to dense classifier"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
print "Serializing dense classifier"
schema = classifier.getSchema()
with open("test", "w+b") as f:
# Save
proto = schema.new_message()
classifier.write(proto)
proto.write(f)
# Load
f.seek(0)
proto2 = schema.read(f)
classifier = algo.svm_dense.read(proto2)
print "Training dense classifier"
classifier.train(gamma = 1, C = 10, eps=1e-1)
print "Predicting with dense classifier"
print classifier.predict(samples[0])
print "Creating 0/1 classifier"
classifier01 = algo.svm_01(n_dims = n_dims, seed=42)
print "Adding sample vectors to 0/1 classifier"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier01.add_sample(float(y), x)
print "Training 0/1 classifier"
classifier01.train(gamma = 1./3., C = 100, eps=1e-1)
print "Serializing 0/1 classifier"
schema = classifier01.getSchema()
with open("test", "w+b") as f:
# Save
proto = schema.new_message()
classifier01.write(proto)
proto.write(f)
# Load
f.seek(0)
proto2 = schema.read(f)
classifier01 = algo.svm_01.read(proto2)
print "Predicting with 0/1 classifier"
print classifier01.predict(numpy.array(samples[0], dtype=type))
def cross_validation():
return
print "Cross validation"
numpy.random.seed(42)
labels = [0, 1, 1, 2, 1, 2]
samples = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [1, 1, 0], [0, 1, 1]]
classifier = algo.svm_dense(0, n_dims = 3, seed=42)
print "Adding sample vectors"
for y, x_list in zip(labels, samples):
x = numpy.array(x_list, dtype=type)
classifier.add_sample(float(y), x)
cPickle.dump(classifier, open('test', 'wb'))
classifier = cPickle.load(open('test', 'rb'))
print "Training"
classifier.train(gamma = 1./3., C = 100, eps=1e-1)
print "Cross validation =",
print classifier.cross_validate(3, gamma = .5, C = 10, eps = 1e-3)
#--------------------------------------------------------------------------------
simple()
persistence()
cross_validation()
| 7,836 | Python | .py | 193 | 34.455959 | 91 | 0.614246 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,739 | sparse_matrix_how_to.py | numenta_nupic-legacy/examples/bindings/sparse_matrix_how_to.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# SparseMatrix is a versatile class that offers a wide range of functionality.
# This tutorial will introduce you to the main features of SparseMatrix.
# SparseMatrix is located in nupic.bindings.math, and here is the import you need:
from nupic.bindings.math import *
# 1. Types of sparse matrices:
# ===========================
# There are three types of SparseMatrix, depending on the precision you need
# in your application: 32 and 32 bits. To create a SparseMatrix holding
# floating point values of the desired precision, simply specify it as the
# 'dtype' parameter in the constructor:
s = SparseMatrix(dtype='Float32')
# 2. Global Epsilon:
# =================
# By default, NuPIC is compiled to handle only 32 bits of precision at max,
# and sparse matrices consider a floating point value to be zero if it's less than
# 1e-6 (the best precision possible with 32 bits floats). This value of 1e-6 is
# called "epsilon", and it is a global value used throughout NuPIC to deal with
# near-zero floating point numbers.
# If this is not enough, NuPIC can be recompiled to access more precision.
# With NTA_DOUBLE_PRECISION or NTA_QUAD_PRECISION set at compile time, NuPIC can
# use 32 bits to represent floating point values. The global epsilon can
# then be set to smaller values via the variable nupic::Epsilon in nupic/math/math.hpp
print '\nGlobal epsilon :', getGlobalEpsilon()
# 3. Creation of sparse matrices:
# ==============================
# There are several convenient ways to create sparse matrices.
# You can create a SparseMatrix by passing it a 2D array:
s = SparseMatrix([[1,2],[3,4]], dtype='Float32')
print '\nFrom array 32\n', s
# ... or by passing it a numpy.array:
s = SparseMatrix(numpy.array([[1,2],[3,4]]),dtype='Float32')
print '\nFrom numpy array 32\n', s
# ... or by using one of the shortcuts: SM32, SM32:
s = SM32([[1,2],[3,4]])
print '\nWith shortcut 32\n', s
# It is also possible to create an empty SparseMatrix, or a copy of another
# SparseMatrix, or a SparseMatrix from a string in CSR format:
s_empty = SM32()
print '\nEmpty sparse matrix\n', s_empty
s_string = SM32('sm_csr_1.5 26 2 2 4 2 0 1 1 2 2 0 3 1 4')
print '\nSparse matrix from string\n', s_string
# A sparse matrix can be converted to a dense one via toDense:
a = numpy.array(s_string.toDense())
print '\ntoDense\n', a
# To set a sparse matrix from a dense one, one can use fromDense:
s = SM32()
s.fromDense(numpy.random.random((4,4)))
print '\nfromDense\n', s
# A sparse matrix can be serialized:
schema = SM32.getSchema()
with open("sm.bin", "w+b") as f:
# Save
proto = schema.new_message()
s.write(proto)
proto.write(f)
# Load
f.seek(0)
proto2 = schema.read(f)
s2 = SM32()
s2.read(proto2)
print '\nSerializing\n', s2
# 4. Simple queries:
# =================
# You can print a SparseMatrix, and query it for its number of rows, columns,
# non-zeros per row or column... There are many query methods available.
# All row operations are mirrored by the equivalent column operations
# Most operations are available either for a given row, or a given col, or
# all rows or all cols simultaneously. All col operations can be pretty efficient,
# even if the internal storage is CSR.
s = SM32(numpy.random.random((4,4)))
s.threshold(.5)
print '\nPrint\n', s
print '\nNumber of rows ', s.nRows()
print 'Number of columns ', s.nCols()
print 'Is matrix zero? ', s.isZero()
print 'Total number of non zeros ', s.nNonZeros()
print 'Sum of all values ', s.sum()
print 'Prod of non-zeros ', s.prod()
print 'Maximum value and its location ', s.max()
print 'Minimum value and its location ', s.min()
print 'Number of non-zeros on row 0 ', s.nNonZerosOnRow(0)
print 'If first row zero? ', s.isRowZero(0)
print 'Number of non-zeros on each row ', s.nNonZerosPerRow()
print 'Minimum on row 0 ', s.rowMin(0)
print 'Minimum values and locations for all rows', s.rowMin()
print 'Maximum on row 0 ', s.rowMax(0)
print 'Maximum values and locations for all rows', s.rowMax()
print 'Sum of values on row 0 ', s.rowSum(0)
print 'Sum of each row ', s.rowSums()
print 'Product of non-zeros on row 1', s.rowProd(1)
print 'Product of each row ', s.rowProds()
print 'Number of non-zeros on col 0 ', s.nNonZerosOnCol(0)
print 'If first col zero? ', s.isColZero(0)
print 'Number of non-zeros on each col ', s.nNonZerosPerCol()
print 'Minimum on col 0 ', s.colMin(0)
print 'Minimum values and locations for all cols', s.colMin()
print 'Maximum on col 0 ', s.colMax(0)
print 'Maximum values and locations for all cols', s.colMax()
print 'Sum of values on col 0 ', s.colSum(0)
print 'Sum of each col ', s.colSums()
print 'Product of non-zeros on col 1', s.colProd(1)
print 'Product of each col ', s.colProds()
# 5. Element access and slicing:
# =============================
# It is very easy to access individual elements:
print '\n', s
print '\ns[0,0] = ', s[0,0], 's[1,1] = ', s[1,1]
s[0,0] = 3.5
print 'Set [0,0] to 3.5 ', s[0,0]
# There are powerful slicing operations:
print '\ngetOuter\n', s.getOuter([0,2],[0,2])
s.setOuter([0,2],[0,2],[[1,2],[3,4]])
print '\nsetOuter\n', s
s.setElements([0,1,2],[0,1,2],[1,1,1])
print '\nsetElements\n', s
print '\ngetElements\n', s.getElements([0,1,2],[0,1,2])
s2 = s.getSlice(0,2,0,3)
print '\ngetSlice\n', s2
s.setSlice(1,1, s2)
print '\nsetSlice\n', s
# A whole row or col can be set to zero with one call:
s.setRowToZero(1)
print '\nsetRowToZero\n', s
s.setColToZero(1)
print '\nsetColToZero\n', s
# Individual rows and cols can be retrieved as sparse or dense vectors:
print '\nrowNonZeros ', s.rowNonZeros(0)
print 'colNonZeros ', s.colNonZeros(0)
print 'getRow ', s.getRow(0)
print 'getCol ', s.getCol(0)
# 6. Dynamic features:
# ===================
# SparseMatrix is very dynamic. Rows and columns can be added and deleted.
# A sparse matrix can also be resized and reshaped.
print '\n', s
s.reshape(2,8)
print '\nreshape 2 8\n', s
s.reshape(8,2)
print '\nreshape 8 2\n', s
s.reshape(1,16)
print '\nreshape 1 16\n', s
s.reshape(4,4)
print '\nreshape 4 4\n', s
s.resize(5,5)
print '\nresize 5 5\n', s
s.resize(3,3)
print '\nresize 3 3\n', s
s.resize(4,4)
print '\nresize 4 4\n', s
s.deleteRows([3])
print '\ndelete row 3\n', s
s.deleteCols([1])
print '\ndelete col 1\n', s
s.addRow([1,2,3])
print '\nadd row 1 2 3\n', s
s.addCol([1,2,3,4])
print '\nadd col 1 2 3 4\n', s
s.deleteRows([0,3])
print '\ndelete rows 0 and 3\n', s
s.deleteCols([1,2])
print '\ndelete cols 1 and 2\n', s
# It is also possible to threshold a row, column or whole sparse matrix.
# This operation usually introduces zeros.
s.normalize()
print '\n', s
s.thresholdRow(0, .1)
print '\nthreshold row 0 .1\n', s
s.thresholdCol(1, .1)
print '\nthreshold col 1 .1\n', s
s.threshold(.1)
print '\nthreshold .1\n', s
# 7. Element wise operations:
# ==========================
# Element wise operations are prefixed with 'element'. There are row-oriented
# column-oriented and whole matrix element-wise operations.
s = SM32(numpy.random.random((4,4)))
print '\n', s
s.elementNZInverse()
print '\nelementNZInverse\n', s
s.elementNZLog()
print '\nelementNZLog\n', s
s = abs(s)
print '\nabs\n', s
s.elementSqrt()
print '\nelementSqrt\n', s
s.add(4)
print '\nadd 4\n', s
s.normalizeRow(1, 10)
print '\nnormalizeRow 1 10\n', s
print 'sum row 1 = ', s.rowSum(1)
s.normalizeCol(0, 3)
print '\nnormalizeCol 0 3\n', s
print 'sum col 0 = ', s.colSum(0)
s.normalize(5)
print '\nnormalize to 5\n', s
print 'sum = ', s.sum()
s.normalize()
print '\nnormalize\n', s
print 'sum = ', s.sum()
s.transpose()
print '\ntranspose\n', s
s2 = SM32(numpy.random.random((3,4)))
print '\n', s2
s2.transpose()
print '\ntranspose rectangular\n', s2
s2.transpose()
print '\ntranspose rectangular again\n', s2
# 8. Matrix vector and matrix matrix operations:
# =============================================
# SparseMatrix provides matrix vector multiplication on the right and left,
# as well as specialized operations between the a vector and the rows
# of the SparseMatrix.
x = numpy.array([1,2,3,4])
print '\nx = ', x
print 'Product on the right:\n', s.rightVecProd(x)
print 'Product on the left:\n', s.leftVecProd(x)
print 'Product of x elements corresponding to nz on each row:\n', s.rightVecProdAtNZ(x)
print 'Product of x elements and nz:\n', s.rowVecProd(x)
print 'Max of x elements corresponding to nz:\n', s.vecMaxAtNZ(x)
print 'Max of products of x elements and nz:\n', s.vecMaxProd(x)
print 'Max of elements of x corresponding to nz:\n', s.vecMaxAtNZ(x)
# axby computes linear combinations of rows and vectors
s.axby(0, 1.5, 1.5, x)
print '\naxby 0 1.5 1.5\n', s
s.axby(1.5, 1.5, x)
print '\naxby 1.5 1.5\n', s
# The multiplication operator can be used both for inner and outer product,
# depending on the shape of its operands, when using SparseMatrix instances:
s_row = SM32([[1,2,3,4]])
s_col = SM32([[1],[2],[3],[4]])
print '\nInner product: ', s_row * s_col
print '\nOuter product:\n', s_col * s_row
# SparseMatrix supports matrix matrix multiplication:
s1 = SM32(numpy.random.random((4,4)))
s2 = SM32(numpy.random.random((4,4)))
print '\nmatrix matrix multiplication\n', s1 * s2
# The block matrix vector multiplication treats the matrix as if it were
# a collection of narrower matrices. The following multiplies a1 by x and then a2 by x,
# where a1 is the sub-matrix of size (4,2) obtained by considering
# only the first two columns of a, and a2 the sub-matrix obtained by considering only
# the last two columns of x.
a = SM32([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
x = [1,2,3,4]
print a.blockRightVecProd(2, x)
# To do an element multiplication of two matrices, do:
print a
b = SM32(numpy.random.randint(0,2,(4,4)))
print b
a.elementNZMultiply(b)
print a
# In general, the "element..." operations implement element by element operations.
# 9. Arithmetic operators:
# =======================
# It is possible to use all 4 arithmetic operators, with scalars or matrices:
print '\ns + 3\n', s + 3
print '\n3 + s\n', 3 + s
print '\ns - 1\n', s - 1
print '\n1 - s\n', 1 - s
print '\ns + s\n', s + s
print '\ns * 3\n', s * 3
print '\n3 * s\n', 3 * s
print '\ns * s\n', s * s
print '\ns / 3.1\n', s / 3.1
# ... and to write arbitrarily linear combinations of sparse matrices:
print '\ns1 + 2 * s - s2 / 3.1\n', s1 + 2 * s - s2 / 3.1
# In place operators are supported:
s += 3.5
print '\n+= 3.5\n', s
s -= 3.2
print '\n-= 3.2\n', s
s *= 3.1
print '\n*= 3.1\n', s
s /= -1.5
print '\n/= -1.5\n', s
# 10. Count/find:
# ==============
# Use countWhereEqual and whereEqual to count or find the elements that have
# a specific value. The first four parameters define a box in which to look:
# [begin_row, end_row) X [begin_col, end _col). The indices returned by whereEqual
# are relative to the orignal matrix. countWhereEqual is faster than using len()
# on the list returned by whereEqual.
s = SM32(numpy.random.randint(0,3,(5,5)))
print '\nThe matrix is now:\n', s
print '\nNumber of elements equal to 0=', s.countWhereEqual(0,5,0,5,0)
print 'Number of elements equal to 1=', s.countWhereEqual(0,5,0,5,1)
print 'Number of elements equal to 2=', s.countWhereEqual(0,5,0,5,2)
print '\nIndices of the elements == 0:', s.whereEqual(0,5,0,5,0)
print '\nIndices of the elements == 1:', s.whereEqual(0,5,0,5,1)
print '\nIndices of the elements == 2:', s.whereEqual(0,5,0,5,2)
# ... and there is even more:
print '\nAll ' + str(len(dir(s))) + ' methods:\n', dir(s)
| 12,453 | Python | .py | 311 | 38.575563 | 87 | 0.695305 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,740 | custom_region_demo.py | numenta_nupic-legacy/examples/network/custom_region_demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
import sys
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
from nupic.engine import Network
_VERBOSITY = 0 # how chatty the demo should be
_SEED = 1956 # the random seed used throughout
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "network-demo-output.csv"
_NUM_RECORDS = 2000
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder
def createNetwork(dataSource):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an Identity Region.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# CUSTOM REGION
# Add path to custom region to PYTHONPATH
# NOTE: Before using a custom region, please modify your PYTHONPATH
# export PYTHONPATH="<path to custom region module>:$PYTHONPATH"
# In this demo, we have modified it using sys.path.append since we need it to
# have an effect on this program.
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from custom_region.identity_region import IdentityRegion
# Add custom region class to the network
Network.registerRegion(IdentityRegion)
# Create a custom region
network.addRegion("identityRegion", "py.IdentityRegion",
json.dumps({
"dataWidth": sensor.encoder.getWidth(),
}))
# Link the Identity region to the sensor output
network.link("sensor", "identityRegion", "UniformLink", "")
network.initialize()
return network
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
identityRegion = network.regions["identityRegion"]
for i in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the record number and encoding
encoding = identityRegion.getOutputData("out")
writer.writerow((i, encoding))
if __name__ == "__main__":
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
network = createNetwork(dataSource)
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_PATH)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Writing output to %s" % outputPath
runNetwork(network, writer)
| 4,519 | Python | .py | 102 | 40.519608 | 79 | 0.721095 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,741 | hierarchy_network_demo.py | numenta_nupic-legacy/examples/network/hierarchy_network_demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
An example of a hierarchy of cortical regions in a Network. There are two
levels in this demo each with a Spatial Pooler, Temporal Memory,
and a classifier. Anomaly scores are output to a file while classification
scores are output to console.
"""
import copy
import csv
import json
import os
import math
import numpy
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder
# Level of detail of console output. Int value from 0 (none)
# to 3 (super detailed)
_VERBOSITY = 0
# Seed used for random number generation
_SEED = 2045
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_FILE_NAME = "hierarchy-demo-output.csv"
# Parameter dict for SPRegion
SP_PARAMS = {"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"seed": _SEED,
# determined and set during network creation
"inputWidth": 0,
# @see nupic.research.spatial_pooler.SpatialPooler for explanations
"globalInhibition": 1,
"columnCount": 2048,
"numActiveColumnsPerInhArea": 40,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0}
# Parameter dict for TPRegion
TM_PARAMS = {"verbosity": _VERBOSITY,
"temporalImp": "cpp",
"seed": _SEED,
# @see nupic.research.temporal_memory.TemporalMemory
# for explanations
"columnCount": 2048,
"cellsPerColumn": 12,
"inputWidth": 2048,
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3}
_RECORD_SENSOR = "sensorRegion"
_L1_SPATIAL_POOLER = "l1SpatialPoolerRegion"
_L1_TEMPORAL_MEMORY = "l1TemporalMemoryRegion"
_L1_CLASSIFIER = "l1Classifier"
_L2_SPATIAL_POOLER = "l2SpatialPoolerRegion"
_L2_TEMPORAL_MEMORY = "l2TemporalMemoryRegion"
_L2_CLASSIFIER = "l2Classifier"
def createEncoder():
"""
Creates and returns a #MultiEncoder including a ScalarEncoder for
energy consumption and a DateEncoder for the time of the day.
@see nupic/encoders/__init__.py for type to file-name mapping
@see nupic/encoders for encoder source files
"""
encoder = MultiEncoder()
encoder.addMultipleEncoders({
"consumption": {"fieldname": u"consumption",
"type": "ScalarEncoder",
"name": u"consumption",
"minval": 0.0,
"maxval": 100.0,
"clipInput": True,
"w": 21,
"n": 500},
"timestamp_timeOfDay": {"fieldname": u"timestamp",
"type": "DateEncoder",
"name": u"timestamp_timeOfDay",
"timeOfDay": (21, 9.5)}
})
return encoder
def createRecordSensor(network, name, dataSource):
"""
Creates a RecordSensor region that allows us to specify a file record
stream as the input source.
"""
# Specific type of region. Possible options can be found in /nupic/regions/
regionType = "py.RecordSensor"
# Creates a json from specified dictionary.
regionParams = json.dumps({"verbosity": _VERBOSITY})
network.addRegion(name, regionType, regionParams)
# getSelf returns the actual region, instead of a region wrapper
sensorRegion = network.regions[name].getSelf()
# Specify how RecordSensor encodes input values
sensorRegion.encoder = createEncoder()
# Specify which sub-encoder should be used for "actValueOut"
network.regions[name].setParameter("predictedField", "consumption")
# Specify the dataSource as a file record stream instance
sensorRegion.dataSource = dataSource
return sensorRegion
def createSpatialPooler(network, name, inputWidth):
# Create the spatial pooler region
SP_PARAMS["inputWidth"] = inputWidth
spatialPoolerRegion = network.addRegion(name, "py.SPRegion",
json.dumps(SP_PARAMS))
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
return spatialPoolerRegion
def createTemporalMemory(network, name):
temporalMemoryRegion = network.addRegion(name, "py.TMRegion",
json.dumps(TM_PARAMS))
# Enable topDownMode to get the predicted columns output
temporalMemoryRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalMemoryRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalMemoryRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score. This actually doesn't work
# now so doesn't matter. We instead compute the anomaly score based on
# topDownOut (predicted columns) and SP bottomUpOut (active columns).
temporalMemoryRegion.setParameter("anomalyMode", True)
return temporalMemoryRegion
def createNetwork(dataSource):
"""Creates and returns a new Network with a sensor region reading data from
'dataSource'. There are two hierarchical levels, each with one SP and one TM.
@param dataSource - A RecordStream containing the input data
@returns a Network ready to run
"""
network = Network()
# Create and add a record sensor and a SP region
sensor = createRecordSensor(network, name=_RECORD_SENSOR,
dataSource=dataSource)
createSpatialPooler(network, name=_L1_SPATIAL_POOLER,
inputWidth=sensor.encoder.getWidth())
# Link the SP region to the sensor input
linkType = "UniformLink"
linkParams = ""
network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams)
# Create and add a TM region
l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY)
# Link SP region to TM region in the feedforward direction
network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams)
# Add a classifier
classifierParams = { # Learning rate. Higher values make it adapt faster.
'alpha': 0.005,
# A comma separated list of the number of steps the
# classifier predicts in the future. The classifier will
# learn predictions of each order specified.
'steps': '1',
# The specific implementation of the classifier to use
# See SDRClassifierFactory#create for options
'implementation': 'py',
# Diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity': 0}
l1Classifier = network.addRegion(_L1_CLASSIFIER, "py.SDRClassifierRegion",
json.dumps(classifierParams))
l1Classifier.setParameter('inferenceMode', True)
l1Classifier.setParameter('learningMode', True)
network.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, linkType, linkParams,
srcOutput="bottomUpOut", destInput="bottomUpIn")
network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,
srcOutput="categoryOut", destInput="categoryIn")
network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,
srcOutput="actValueOut", destInput="actValueIn")
# Second Level
l2inputWidth = l1temporalMemory.getSelf().getOutputElementCount("bottomUpOut")
createSpatialPooler(network, name=_L2_SPATIAL_POOLER, inputWidth=l2inputWidth)
network.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, linkType, linkParams)
createTemporalMemory(network, _L2_TEMPORAL_MEMORY)
network.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, linkType, linkParams)
l2Classifier = network.addRegion(_L2_CLASSIFIER, "py.SDRClassifierRegion",
json.dumps(classifierParams))
l2Classifier.setParameter('inferenceMode', True)
l2Classifier.setParameter('learningMode', True)
network.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, linkType, linkParams,
srcOutput="bottomUpOut", destInput="bottomUpIn")
network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,
srcOutput="categoryOut", destInput="categoryIn")
network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,
srcOutput="actValueOut", destInput="actValueIn")
return network
def runNetwork(network, numRecords, writer):
"""
Runs specified Network writing the ensuing anomaly
scores to writer.
@param network: The Network instance to be run
@param writer: A csv.writer used to write to output file.
"""
sensorRegion = network.regions[_RECORD_SENSOR]
l1SpRegion = network.regions[_L1_SPATIAL_POOLER]
l1TpRegion = network.regions[_L1_TEMPORAL_MEMORY]
l1Classifier = network.regions[_L1_CLASSIFIER]
l2SpRegion = network.regions[_L2_SPATIAL_POOLER]
l2TpRegion = network.regions[_L2_TEMPORAL_MEMORY]
l2Classifier = network.regions[_L2_CLASSIFIER]
l1PreviousPredictedColumns = []
l2PreviousPredictedColumns = []
l1PreviousPrediction = None
l2PreviousPrediction = None
l1ErrorSum = 0.0
l2ErrorSum = 0.0
for record in xrange(numRecords):
# Run the network for a single iteration
network.run(1)
actual = float(sensorRegion.getOutputData("actValueOut")[0])
l1Predictions = l1Classifier.getOutputData("actualValues")
l1Probabilities = l1Classifier.getOutputData("probabilities")
l1Prediction = l1Predictions[l1Probabilities.argmax()]
if l1PreviousPrediction is not None:
l1ErrorSum += math.fabs(l1PreviousPrediction - actual)
l1PreviousPrediction = l1Prediction
l2Predictions = l2Classifier.getOutputData("actualValues")
l2Probabilities = l2Classifier.getOutputData("probabilities")
l2Prediction = l2Predictions[l2Probabilities.argmax()]
if l2PreviousPrediction is not None:
l2ErrorSum += math.fabs(l2PreviousPrediction - actual)
l2PreviousPrediction = l2Prediction
l1AnomalyScore = l1TpRegion.getOutputData("anomalyScore")[0]
l2AnomalyScore = l2TpRegion.getOutputData("anomalyScore")[0]
# Write record number, actualInput, and anomaly scores
writer.writerow((record, actual, l1PreviousPrediction, l1AnomalyScore, l2PreviousPrediction, l2AnomalyScore))
# Store the predicted columns for the next timestep
l1PredictedColumns = l1TpRegion.getOutputData("topDownOut").nonzero()[0]
l1PreviousPredictedColumns = copy.deepcopy(l1PredictedColumns)
#
l2PredictedColumns = l2TpRegion.getOutputData("topDownOut").nonzero()[0]
l2PreviousPredictedColumns = copy.deepcopy(l2PredictedColumns)
# Output absolute average error for each level
if numRecords > 1:
print "L1 ave abs class. error: %f" % (l1ErrorSum / (numRecords - 1))
print "L2 ave abs class. error: %f" % (l2ErrorSum / (numRecords - 1))
def runDemo():
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
numRecords = dataSource.getDataRowCount()
print "Creating network"
network = createNetwork(dataSource)
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_FILE_NAME)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Running network"
print "Writing output to: %s" % outputPath
runNetwork(network, numRecords, writer)
print "Hierarchy demo finished"
if __name__ == "__main__":
runDemo()
| 13,394 | Python | .py | 283 | 40.328622 | 113 | 0.695492 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,742 | core_encoders_demo.py | numenta_nupic-legacy/examples/network/core_encoders_demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import json
from datetime import datetime
from pkg_resources import resource_filename
from nupic.engine import Network
from nupic.encoders import DateEncoder
def createNetwork():
network = Network()
#
# Sensors
#
# C++
consumptionSensor = network.addRegion('consumptionSensor', 'ScalarSensor',
json.dumps({'n': 120,
'w': 21,
'minValue': 0.0,
'maxValue': 100.0,
'clipInput': True}))
# Python
timestampSensor = network.addRegion("timestampSensor",
'py.PluggableEncoderSensor', "")
timestampSensor.getSelf().encoder = DateEncoder(timeOfDay=(21, 9.5),
name="timestamp_timeOfDay")
#
# Add a SPRegion, a region containing a spatial pooler
#
consumptionEncoderN = consumptionSensor.getParameter('n')
timestampEncoderN = timestampSensor.getSelf().encoder.getWidth()
inputWidth = consumptionEncoderN + timestampEncoderN
network.addRegion("sp", "py.SPRegion",
json.dumps({
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}))
#
# Input to the Spatial Pooler
#
network.link("consumptionSensor", "sp", "UniformLink", "")
network.link("timestampSensor", "sp", "UniformLink", "")
#
# Add a TPRegion, a region containing a Temporal Memory
#
network.addRegion("tm", "py.TMRegion",
json.dumps({
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}))
network.link("sp", "tm", "UniformLink", "")
network.link("tm", "sp", "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
# Enable anomalyMode so the tm calculates anomaly scores
network.regions['tm'].setParameter("anomalyMode", True)
# Enable inference mode to be able to get predictions
network.regions['tm'].setParameter("inferenceMode", True)
return network
def runNetwork(network):
consumptionSensor = network.regions['consumptionSensor']
timestampSensor = network.regions['timestampSensor']
tmRegion = network.regions['tm']
filename = resource_filename("nupic.datafiles",
"extra/hotgym/rec-center-hourly.csv")
csvReader = csv.reader(open(filename, 'r'))
csvReader.next()
csvReader.next()
csvReader.next()
for row in csvReader:
timestampStr, consumptionStr = row
# For core encoders, use the network API.
consumptionSensor.setParameter('sensedValue', float(consumptionStr))
# For Python encoders, circumvent the Network API.
# The inputs are often crazy Python types, for example:
t = datetime.strptime(timestampStr, "%m/%d/%y %H:%M")
timestampSensor.getSelf().setSensedValue(t)
network.run(1)
anomalyScore = tmRegion.getOutputData('anomalyScore')[0]
print "Consumption: %s, Anomaly score: %f" % (consumptionStr, anomalyScore)
if __name__ == "__main__":
network = createNetwork()
runNetwork(network)
| 5,291 | Python | .py | 123 | 32.04878 | 79 | 0.571901 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,743 | network_api_demo.py | numenta_nupic-legacy/examples/network/network_api_demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
from nupic.regions.sp_region import SPRegion
from nupic.regions.tm_region import TMRegion
_VERBOSITY = 0 # how chatty the demo should be
_SEED = 1956 # the random seed used throughout
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "network-demo-anomaly-output.csv"
_NUM_RECORDS = 2000
# Config field for SPRegion
SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
# This must be set before creating the SPRegion
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}
# Config field for TMRegion
TM_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder
def createNetwork(dataSource):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an SPRegion. The SPRegion output is passed to
a TMRegion.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# Create the spatial pooler region
SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# Add the TMRegion on top of the SPRegion
network.addRegion("temporalPoolerRegion", "py.TMRegion",
json.dumps(TM_PARAMS))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
# Add the AnomalyLikelihoodRegion on top of the TMRegion
network.addRegion("anomalyLikelihoodRegion", "py.AnomalyLikelihoodRegion",
json.dumps({}))
network.link("temporalPoolerRegion", "anomalyLikelihoodRegion", "UniformLink",
"", srcOutput="anomalyScore", destInput="rawAnomalyScore")
network.link("sensor", "anomalyLikelihoodRegion", "UniformLink", "",
srcOutput="sourceOut", destInput="metricValue")
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score to be passed to the anomaly
# likelihood region.
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
sensorRegion = network.regions["sensor"]
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
anomalyLikelihoodRegion = network.regions["anomalyLikelihoodRegion"]
prevPredictedColumns = []
for i in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the anomaly likelihood along with the record number and consumption
# value.
consumption = sensorRegion.getOutputData("sourceOut")[0]
anomalyScore = temporalPoolerRegion.getOutputData("anomalyScore")[0]
anomalyLikelihood = anomalyLikelihoodRegion.getOutputData("anomalyLikelihood")[0]
writer.writerow((i, consumption, anomalyScore, anomalyLikelihood))
if __name__ == "__main__":
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
network = createNetwork(dataSource)
network.initialize()
spRegion = network.getRegionsByType(SPRegion)[0]
sp = spRegion.getSelf().getAlgorithmInstance()
print "spatial pooler region inputs: {0}".format(spRegion.getInputNames())
print "spatial pooler region outputs: {0}".format(spRegion.getOutputNames())
print "# spatial pooler columns: {0}".format(sp.getNumColumns())
print
tmRegion = network.getRegionsByType(TMRegion)[0]
tm = tmRegion.getSelf().getAlgorithmInstance()
print "temporal memory region inputs: {0}".format(tmRegion.getInputNames())
print "temporal memory region outputs: {0}".format(tmRegion.getOutputNames())
print "# temporal memory columns: {0}".format(tm.numberOfCols)
print
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_PATH)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Writing output to %s" % outputPath
runNetwork(network, writer)
| 8,279 | Python | .py | 183 | 41.377049 | 85 | 0.735809 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,744 | temporal_anomaly_network_demo.py | numenta_nupic-legacy/examples/network/temporal_anomaly_network_demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import json
import os
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
_VERBOSITY = 0
# Default config fields for SPRegion
_SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}
# Default config fields for TPRegion
_TM_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "network-demo2-output.csv"
_NUM_RECORDS = 2000
def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS,
temporalParams=_TM_PARAMS,
verbosity=_VERBOSITY):
"""Generates a Network with connected RecordSensor, SP, TM.
This function takes care of generating regions and the canonical links.
The network has a sensor region reading data from a specified input and
passing the encoded representation to an SPRegion.
The SPRegion output is passed to a TMRegion.
Note: this function returns a network that needs to be initialized. This
allows the user to extend the network by adding further regions and
connections.
:param recordParams: a dict with parameters for creating RecordSensor region.
:param spatialParams: a dict with parameters for creating SPRegion.
:param temporalParams: a dict with parameters for creating TMRegion.
:param verbosity: an integer representing how chatty the network will be.
"""
inputFilePath = recordParams["inputFilePath"]
scalarEncoderArgs = recordParams["scalarEncoderArgs"]
dateEncoderArgs = recordParams["dateEncoderArgs"]
scalarEncoder = ScalarEncoder(**scalarEncoderArgs)
dateEncoder = DateEncoder(**dateEncoderArgs)
encoder = MultiEncoder()
encoder.addEncoder(scalarEncoderArgs["name"], scalarEncoder)
encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)
network = Network()
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": verbosity}))
sensor = network.regions["sensor"].getSelf()
sensor.encoder = encoder
sensor.dataSource = FileRecordStream(streamID=inputFilePath)
# Create the spatial pooler region
spatialParams["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion",
json.dumps(spatialParams))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# Add the TPRegion on top of the SPRegion
network.addRegion("temporalPoolerRegion", "py.TMRegion",
json.dumps(temporalParams))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score.
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
sensorRegion = network.regions["sensor"]
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
for i in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the anomaly score along with the record number and consumption
# value.
anomalyScore = temporalPoolerRegion.getOutputData("anomalyScore")[0]
consumption = sensorRegion.getOutputData("sourceOut")[0]
writer.writerow((i, consumption, anomalyScore))
if __name__ == "__main__":
inputFilePath = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
scalarEncoderArgs = {
"w": 21,
"minval": 0.0,
"maxval": 100.0,
"periodic": False,
"n": 50,
"radius": 0,
"resolution": 0,
"name": "consumption",
"verbosity": 0,
"clipInput": True,
"forced": False,
}
dateEncoderArgs = {
"season": 0,
"dayOfWeek": 0,
"weekend": 0,
"holiday": 0,
"timeOfDay": (21, 9.5),
"customDays": 0,
"name": "timestamp",
"forced": True
}
recordParams = {
"inputFilePath": _INPUT_FILE_PATH,
"scalarEncoderArgs": scalarEncoderArgs,
"dateEncoderArgs": dateEncoderArgs,
}
network = createTemporalAnomaly(recordParams)
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_PATH)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Writing output to %s" % outputPath
runNetwork(network, writer)
| 7,578 | Python | .py | 186 | 36.456989 | 80 | 0.717998 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,745 | identity_region.py | numenta_nupic-legacy/examples/network/custom_region/identity_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.bindings.regions.PyRegion import PyRegion
class IdentityRegion(PyRegion):
"""
IdentityRegion is designed to implement a dummy region that returns the input
as the output.
"""
def __init__(self, dataWidth):
if dataWidth <= 0:
raise ValueError("Parameter dataWidth must be > 0")
self._dataWidth = dataWidth
def initialize(self):
pass
def compute(self, inputs, outputs):
"""
Run one iteration of IdentityRegion's compute
"""
outputs["out"][:] = inputs["in"]
@classmethod
def getSpec(cls):
"""Return the Spec for IdentityRegion.
"""
spec = {
"description":IdentityRegion.__doc__,
"singleNodeOnly":True,
"inputs":{
"in":{
"description":"The input vector.",
"dataType":"Real32",
"count":0,
"required":True,
"regionLevel":False,
"isDefaultInput":True,
"requireSplitterMap":False},
},
"outputs":{
"out":{
"description":"A copy of the input vector.",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":True},
},
"parameters":{
"dataWidth":{
"description":"Size of inputs",
"accessMode":"Read",
"dataType":"UInt32",
"count":1,
"constraints":""},
},
}
return spec
def getOutputElementCount(self, name):
if name == "out":
return self._dataWidth
else:
raise Exception("Unrecognized output: " + name)
| 2,601 | Python | .py | 77 | 27.480519 | 79 | 0.599681 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,746 | webdata.py | numenta_nupic-legacy/examples/prediction/category_prediction/webdata.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This example shows how to use the `SDRCategoryEncoder` with `HTMPredictionModel`
to analyze web site traffic data by extracting temporal patterns from user
sessions described as a sequences of web page categories.
We will use the [MSNBC.com Anonymous Web Data][1] data set provided by
[UCI Machine Learning Repository][2] to predict the next page the user is more
likely to click. In this data set each page is assigned a category and the user
behavior is recorded as navigating from one page to another.
Dataset characteristics:
- Number of users: 989,818
- Average number of visits per user: 5.7
- Number of categories: 17
- Number of URLs per category: 10 to 5,000
See [dataset][1] description for more information.
References:
1. https://archive.ics.uci.edu/ml/datasets/MSNBC.com+Anonymous+Web+Data
2. Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
Irvine, CA: University of California, School of Information and Computer Science
"""
import os
import random
import sys
import zipfile
from operator import itemgetter
import numpy as np
import prettytable
from prettytable import PrettyTable
from nupic.frameworks.opf.model_factory import ModelFactory
# List of page categories used in the dataset
PAGE_CATEGORIES = [
"frontpage", "news", "tech", "local", "opinion", "on-air", "misc", "weather",
"msn-news", "health", "living", "business", "msn-sports", "sports", "summary",
"bbs", "travel"
]
# Configure the sensor/input region using the "SDRCategoryEncoder" to encode
# the page category into SDRs suitable for processing directly by the TM
SENSOR_PARAMS = {
"verbosity": 0,
"encoders": {
"page": {
"fieldname": "page",
"name": "page",
"type": "SDRCategoryEncoder",
# The output of this encoder will be passed directly to the TM region,
# therefore the number of bits should match TM's "inputWidth" parameter
"n": 1024,
# Use ~2% sparsity
"w": 21
},
},
}
# Configure the temporal memory to learn a sequence of page SDRs and make
# predictions on the next page of the sequence.
TM_PARAMS = {
"seed": 1960,
# Use "nupic.bindings.algorithms.TemporalMemoryCPP" algorithm
"temporalImp": "tm_cpp",
# Should match the encoder output
"inputWidth": 1024,
"columnCount": 1024,
# Use 1 cell per column for first order prediction.
# Use more cells per column for variable order predictions.
"cellsPerColumn": 1,
}
# Configure the output region with a classifier used to decode TM SDRs back
# into pages
CL_PARAMS = {
"implementation": "cpp",
"regionName": "SDRClassifierRegion",
# alpha parameter controls how fast the classifier learns/forgets. Higher
# values make it adapt faster and forget older patterns faster.
"alpha": 0.001,
"steps": 1,
}
# Create a simple HTM network that will receive the current page as input, pass
# the encoded page SDR to the temporal memory to learn the sequences and
# interpret the output SDRs from the temporary memory using the SDRClassifier
# whose output will be a list of predicted next pages and their probabilities.
#
# page => [encoder] => [TM] => [classifier] => prediction
#
MODEL_PARAMS = {
"version": 1,
"model": "HTMPrediction",
"modelParams": {
"inferenceType": "TemporalMultiStep",
"sensorParams": SENSOR_PARAMS,
# The purpose of the spatial pooler is to create a stable representation of
# the input SDRs. In our case the category encoder output is already a
# stable representation of the category therefore adding the spatial pooler
# to this network would not help and could potentially slow down the
# learning process
"spEnable": False,
"spParams": {},
"tmEnable": True,
"tmParams": TM_PARAMS,
"clParams": CL_PARAMS,
},
}
# Learn page sequences from the first 10,000 user sessions.
# We chose 10,000 because it gives results that are good enough for this example
# Use more records for learning to improve the prediction accuracy
LEARNING_RECORDS = 10000
def computeAccuracy(model, size, top):
"""
Compute prediction accuracy by checking if the next page in the sequence is
within the top N predictions calculated by the model
Args:
model: HTM model
size: Sample size
top: top N predictions to use
Returns: Probability the next page in the sequence is within the top N
predicted pages
"""
accuracy = []
# Load MSNBC web data file
filename = os.path.join(os.path.dirname(__file__), "msnbc990928.zip")
with zipfile.ZipFile(filename) as archive:
with archive.open("msnbc990928.seq") as datafile:
# Skip header lines (first 7 lines)
for _ in xrange(7):
next(datafile)
# Skip learning data and compute accuracy using only new sessions
for _ in xrange(LEARNING_RECORDS):
next(datafile)
# Compute prediction accuracy by checking if the next page in the sequence
# is within the top N predictions calculated by the model
for _ in xrange(size):
pages = readUserSession(datafile)
model.resetSequenceStates()
for i in xrange(len(pages) - 1):
result = model.run({"page": pages[i]})
inferences = result.inferences["multiStepPredictions"][1]
# Get top N predictions for the next page
predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:top]
# Check if the next page is within the predicted pages
accuracy.append(1 if pages[i + 1] in zip(*predicted)[0] else 0)
return np.mean(accuracy)
def readUserSession(datafile):
"""
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user
"""
for line in datafile:
pages = line.split()
total = len(pages)
# Select user sessions with 2 or more pages
if total < 2:
continue
# Exclude outliers by removing extreme long sessions
if total > 500:
continue
return [PAGE_CATEGORIES[int(i) - 1] for i in pages]
return []
def main():
# Create HTM prediction model and enable inference on the page field
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({"predictedField": "page"})
# Use the model encoder to display the encoded SDRs the model will learn
sdr_table = PrettyTable(field_names=["Page Category",
"Encoded SDR (on bit indices)"],
sortby="Page Category")
sdr_table.align = "l"
encoder = model._getEncoder()
sdrout = np.zeros(encoder.getWidth(), dtype=np.bool)
for page in PAGE_CATEGORIES:
encoder.encodeIntoArray({"page": page}, sdrout)
sdr_table.add_row([page, sdrout.nonzero()[0]])
print "The following table shows the encoded SDRs for every page " \
"category in the dataset"
print sdr_table
# At this point our model is configured and ready to learn the user sessions
# Extract the learning data from MSNBC archive and stream it to the model
filename = os.path.join(os.path.dirname(__file__), "msnbc990928.zip")
with zipfile.ZipFile(filename) as archive:
with archive.open("msnbc990928.seq") as datafile:
# Skip header lines (first 7 lines)
for _ in xrange(7):
next(datafile)
print
print "Start learning page sequences using the first {} user " \
"sessions".format(LEARNING_RECORDS)
model.enableLearning()
for count in xrange(LEARNING_RECORDS):
# Learn each user session as a single sequence
session = readUserSession(datafile)
model.resetSequenceStates()
for page in session:
model.run({"page": page})
# Simple progress status
sys.stdout.write("\rLearned {} Sessions".format(count + 1))
sys.stdout.flush()
print "\nFinished learning"
model.disableLearning()
# Use the new HTM model to predict next user session
# The test data starts right after the learning data
print
print "Start Inference using a new user session from the dataset"
prediction_table = PrettyTable(field_names=["Page", "Prediction"],
hrules=prettytable.ALL)
prediction_table.align["Prediction"] = "l"
# Infer one page of the sequence at the time
model.resetSequenceStates()
session = readUserSession(datafile)
for page in session:
result = model.run({"page": page})
inferences = result.inferences["multiStepPredictions"][1]
# Print predictions ordered by probabilities
predicted = sorted(inferences.items(),
key=itemgetter(1),
reverse=True)
prediction_table.add_row([page, zip(*predicted)[0]])
print "User Session to Predict: ", session
print prediction_table
print
print "Compute prediction accuracy by checking if the next page in the " \
"sequence is within the predicted pages calculated by the model:"
accuracy = computeAccuracy(model, 100, 1)
print " - Prediction Accuracy:", accuracy
accuracy = computeAccuracy(model, 100, 3)
print " - Accuracy Predicting Top 3 Pages:", accuracy
if __name__ == "__main__":
random.seed(1)
np.random.seed(1)
main()
| 10,376 | Python | .py | 249 | 36.835341 | 89 | 0.69863 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,747 | inspect_test.py | numenta_nupic-legacy/examples/prediction/experiments/inspect_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.engine import *
import os
import sys
os.chdir(sys.argv[1] + '/networks')
for name in ('trained_l1.nta', 'trained.nta'):
if os.path.exists(name):
break
n = Network(name)
n.inspect()
| 1,178 | Python | .py | 29 | 39.275862 | 72 | 0.673362 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,748 | description.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/bSDRSameDC/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
#sensorVerbosity=3,
iterationCount = 25000,
spPeriodicStats = 0,
#numAValues = 25,
#numBValues = 25,
#encodingFieldStyleA = 'contiguous',
#encodingFieldWidthA = 50,
#encodingOnBitsA = 5,
#encodingFieldStyleB = 'contiguous',
#encodingFieldWidthB = 25,
#encodingOnBitsB = 5,
b0Likelihood = None,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,578 | Python | .py | 37 | 38.378378 | 72 | 0.657011 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,749 | description.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/base/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import random
from nupic.frameworks.prediction.helpers import (updateConfigFromSubConfig,
getSubExpDir)
from nupic.encoders import (LogEncoder,
DateEncoder,
MultiEncoder,
CategoryEncoder,
ScalarEncoder,
SDRCategoryEncoder)
#from nupic.data import TextFileSource
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.prediction.callbacks import (printSPCoincidences,
displaySPCoincidences,
setAttribute,
sensorOpen)
# ========================================================================
# Define this experiment's base configuration, and adjust for any modifications
# if imported from a sub-experiment.
config = dict(
sensorVerbosity = 0,
spVerbosity = 0,
ppVerbosity = 0,
spPeriodicStats = 500,
spNumActivePerInhArea = 11,
spSynPermInactiveDec = 0.005,
spCoincCount = 300,
spMinPctDutyCycleAfterInh = 0.001,
tpActivationThresholds = None,
trainSP = True,
iterationCount = 50000,
#iterationCount = 100,
trainingSet = "trainingData.csv",
testingSet = "testingData.csv",
# Data set and encoding
numAValues = 25,
numBValues = 25,
b0Likelihood = 0.90, # Likelihood of getting 0 out of field B. None means
# not any more likely than any other B value.
testSetPct = 0.0, # What percent of unique combinations to reserve
encodingFieldStyleA = 'sdr', # contiguous, sdr
encodingFieldWidthA = 50,
encodingOnBitsA = 21,
encodingFieldStyleB = 'sdr', # contiguous, sdr
encodingFieldWidthB = 50, # 15, None means set same as A
encodingOnBitsB = 23, # 3, None means set same as A
)
updateConfigFromSubConfig(config)
if config['encodingFieldWidthB'] is None:
config['encodingFieldWidthB'] = config['encodingFieldWidthA']
if config['encodingOnBitsB'] is None:
config['encodingOnBitsB'] = config['encodingOnBitsA']
if config['tpActivationThresholds'] is None:
config['tpActivationThresholds'] = range(8, config['spNumActivePerInhArea']+1)
def getBaseDatasets():
# we generate all of our data
return dict()
def getDatasets(baseDatasets, generate=False):
# We're going to put datasets in data/dutyCycle/expname_<file>.csv
expDir = getSubExpDir()
if expDir is None:
name = "base"
else:
name = os.path.basename(expDir)
dataDir = "data/dutyCycle"
trainingFilename = os.path.join(dataDir, name + "_" + config['trainingSet'])
datasets = dict(trainingFilename=trainingFilename)
numUnique = config['numAValues'] * config['numBValues']
testSetSize = int(config['testSetPct'] * numUnique)
if testSetSize > 0:
testingFilename = os.path.join(dataDir, config['testingSet'])
datasets['testingFilename'] = testingFilename
else:
testingFilename = None
if not generate:
return datasets
# ========================================================================
# Create the data files. We create a training set and a testing set. The
# testing set contains combinations of A and B that do not appear in the
# training set
#
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if (not os.path.exists(trainingFilename)) or \
(testingFilename is not None and not os.path.exists(testingFilename)):
print "====================================================================="
print "Creating data set..."
# Create the pool of A values
aValues = range(config['numAValues'])
# Create the pool of B values, allowing for unequal distribution
bValues = range(config['numBValues'])
# Pick a random A and B value
random.seed(42)
def generateSample():
a = random.sample(aValues, 1)[0]
b = random.sample(bValues, 1)[0]
return (a, b)
if config['b0Likelihood'] is not None:
print "In the B dataset, there is a %d%% chance of getting a B value of 0" \
% (int(100 * config['b0Likelihood']))
# likelihood of B0 is: (numB0) / (numB0 + numBvalues)
# solving for numB0 = numBValues / (1 - likelihood)
numB0Values = int(round(len(bValues) / (1.0 - config['b0Likelihood'])))
bValues.extend([0]*numB0Values) # 90% chance of getting first B value
else:
print "All values in B are equally likely"
print
# -----------------------------------------------------------------------
fields = [('fieldA', 'int', ''), ('fieldB', 'int', '')]
# Generate the test set
testSet = set()
if testSetSize > 0:
# Hold back 10% of the possible combinations for the test set
while len(testSet) < testSetSize:
testSet.add(generateSample())
testList = list(testSet)
testList.sort()
print "These (A,B) combinations are reserved for the test set:", testList
print
# Write out the test set
print "Creating test set: %s..." % (testingFilename)
print "Contains %d unique combinations of A and B chosen from the %d possible" \
% (testSetSize, numUnique)
with File(testingFilename, fields=fields) as o:
numSamples = 0
while numSamples < config['iterationCount']:
sample = generateSample()
if sample in testSet:
o.write(list(sample))
#print >>fd, "%d, %d" % (sample[0], sample[1])
numSamples += 1
print
# ------------------------------------------------------------------------
# Write out the training set
print "Creating training set: %s..." % (trainingFilename)
if len(testSet) > 0:
print "Contains %d samples, chosen from %d of the possible %d combinations " \
"that are not in the test set" % (config['iterationCount'],
numUnique - testSetSize, numUnique)
else:
print "Contains %d samples" % (config['iterationCount'])
print
with FileRecordStream(trainingFilename, write=True, fields=fields) as o:
numSamples = 0
while numSamples < config['iterationCount']:
sample = generateSample()
if sample in testSet:
continue
#print >>fd, "%d, %d" % (sample[0], sample[1])
o.appendRecord(list(sample))
numSamples += 1
return datasets
def getDescription(datasets):
# ========================================================================
# Encoder for the sensor
encoder = MultiEncoder()
if config['encodingFieldStyleA'] == 'contiguous':
encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
n=config['encodingFieldWidthA'], minval=0,
maxval=config['numAValues'], periodic=True, name='fieldA'))
elif config['encodingFieldStyleA'] == 'sdr':
encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
n=config['encodingFieldWidthA'],
categoryList=range(config['numAValues']), name='fieldA'))
else:
assert False
if config['encodingFieldStyleB'] == 'contiguous':
encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'],
n=config['encodingFieldWidthB'], minval=0,
maxval=config['numBValues'], periodic=True, name='fieldB'))
elif config['encodingFieldStyleB'] == 'sdr':
encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'],
n=config['encodingFieldWidthB'],
categoryList=range(config['numBValues']), name='fieldB'))
else:
assert False
# ========================================================================
# Network definition
# ------------------------------------------------------------------
# Node params
# The inputs are long, horizontal vectors
inputDimensions = (1, encoder.getWidth())
# Layout the coincidences vertically stacked on top of each other, each
# looking at the entire input field.
columnDimensions = (config['spCoincCount'], 1)
sensorParams = dict(
# encoder/datasource are not parameters so don't include here
verbosity=config['sensorVerbosity']
)
CLAParams = dict(
inputDimensions = inputDimensions,
columnDimensions = columnDimensions,
potentialRadius = inputDimensions[1]/2,
potentialPct = 1.0,
gaussianDist = 0,
commonDistributions = 0, # should be False if possibly not training
localAreaDensity = -1, #0.05,
numActiveColumnsPerInhArea = config['spNumActivePerInhArea'],
dutyCyclePeriod = 1000,
stimulusThreshold = 1,
synPermInactiveDec = config['spSynPermInactiveDec'],
synPermActiveInc = 0.02,
synPermActiveSharedDec=0.0,
synPermOrphanDec = 0.0,
minPctDutyCycleBeforeInh = 0.001,
minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
minDistance = 0.05,
computeTopDown = 1,
spVerbosity = config['spVerbosity'],
spSeed = 1,
printPeriodicStats = int(config['spPeriodicStats']),
# TM params
disableTemporal = 1,
# General params
trainingStep = 'spatial',
)
trainingDataSource = FileRecordStream(datasets['trainingFilename'])
description = dict(
options = dict(
logOutputsDuringInference = False,
),
network = dict(
sensorDataSource = trainingDataSource,
sensorEncoder = encoder,
sensorParams = sensorParams,
CLAType = 'py.CLARegion',
CLAParams = CLAParams,
classifierType = None,
classifierParams = None),
)
if config['trainSP']:
description['spTrain'] = dict(
iterationCount=config['iterationCount'],
#iter=displaySPCoincidences(50),
finish=printSPCoincidences()
),
else:
description['spTrain'] = dict(
# need to train with one iteration just to initialize data structures
iterationCount=1)
# ============================================================================
# Inference tests
inferSteps = []
# ----------------------------------------
# Training dataset
if True:
datasetName = 'bothTraining'
inferSteps.append(
dict(name = '%s_baseline' % datasetName,
iterationCount = config['iterationCount'],
setup = [sensorOpen(datasets['trainingFilename'])],
ppOptions = dict(printLearnedCoincidences=True),
)
)
inferSteps.append(
dict(name = '%s_acc' % datasetName,
iterationCount = config['iterationCount'],
setup = [sensorOpen(datasets['trainingFilename'])],
ppOptions = dict(onlyClassificationAcc=True,
tpActivationThresholds=config['tpActivationThresholds'],
computeDistances=True,
verbosity = 1),
)
)
# ----------------------------------------
# Testing dataset
if 'testingFilename' in datasets:
datasetName = 'bothTesting'
inferSteps.append(
dict(name = '%s_baseline' % datasetName,
iterationCount = config['iterationCount'],
setup = [sensorOpen(datasets['testingFilename'])],
ppOptions = dict(printLearnedCoincidences=False),
)
)
inferSteps.append(
dict(name = '%s_acc' % datasetName,
iterationCount = config['iterationCount'],
setup = [sensorOpen(datasets['testingFilename'])],
ppOptions = dict(onlyClassificationAcc=True,
tpActivationThresholds=config['tpActivationThresholds']),
)
)
description['infer'] = inferSteps
return description
| 12,866 | Python | .py | 305 | 34.770492 | 86 | 0.610551 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,750 | permutationsActiveCount.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/base/permutationsActiveCount.py |
permutations = dict(
iterationCount = [25000],
spPeriodicStats = [0],
#spCoincCount = [200, 300, 400, 500],
spNumActivePerInhArea = [9, 11, 13, 15, 17],
tpActivationThresholds = [range(8,18)],
#spSynPermInactiveDec = [0.005, 0.01, 0.02, 0.04],
)
report = ['overallTime',
'.*classificationSamples.*',
'.*classificationAccPct.*',
'.*tpFitnessScore.*',
]
| 517 | Python | .py | 13 | 26.769231 | 66 | 0.479042 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,751 | permutationsEncoder.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/base/permutationsEncoder.py |
permutations = dict(
iterationCount = [50000],
spPeriodicStats = [0],
#encodingFieldStyleA = ['contiguous', 'sdr'],
encodingFieldWidthA = [256],
encodingFieldWidthB = [256],
encodingOnBitsA = [5, 7, 9] + range(11, 40, 4) + range(43, 100, 8),
encodingOnBitsB = [5, 7, 9] + range(11, 40, 4) + range(43, 100, 8),
numAValues = [25],
numBValues = [25],
b0Likelihood = [0],
#spSynPermInactiveDec = [0.0, 0.005],
)
report = ['overallTime',
'.*classificationSamples.*',
'.*classificationAccPct.*',
'.*tpFitnessScore.*',
]
| 843 | Python | .py | 18 | 29.888889 | 87 | 0.441558 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,752 | permutations.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/base/permutations.py |
permutations = dict(
iterationCount = [50000],
spPeriodicStats = [0],
#spCoincCount = [200, 300, 400, 500],
#spNumActivePerInhArea = [3, 5, 7, 9, 11],
spSynPermInactiveDec = [0.005, 0.01, 0.02, 0.04],
)
report = ['overallTime',
'.*classificationSamples.*',
'.*classificationAccPct.*',
'.*tpFitnessScore.*',
'.*outputRepresentationChangePctAvg.*',
'.*unusedCellsCount.*',
]
| 541 | Python | .py | 14 | 26.285714 | 65 | 0.486641 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,753 | permutationsData.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/base/permutationsData.py |
permutations = dict(
iterationCount = [50000],
spPeriodicStats = [500],
#encodingFieldStyleA = ['contiguous', 'sdr'],
#encodingFieldWidthA = [50],
#encodingFieldWidthB = [50],
#encodingOnBitsA = [11],
#encodingOnBitsB = [5],
numAValues = [2, 10, 25, 50],
numBValues = [2, 10, 25, 50],
b0Likelihood = [0, 0.90],
#spSynPermInactiveDec = [0.0, 0.005],
)
report = ['overallTime',
'.*classificationSamples.*',
'.*classificationAccPct.*',
'.*tpFitnessScore.*',
'.*outputRepresentationChangePctAvg.*',
'.*unusedCellsCount.*',
]
| 819 | Python | .py | 20 | 27.15 | 61 | 0.447799 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,754 | description.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/bSDRHighDC/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
#sensorVerbosity=1,
iterationCount = 25000,
spPeriodicStats = 0,
#numAValues = 25,
#numBValues = 25,
#encodingFieldStyleA = 'contiguous',
#encodingFieldWidthA = 50,
#encodingOnBitsA = 5,
#encodingFieldStyleB = 'contiguous',
#encodingFieldWidthB = 25,
#encodingOnBitsB = 5,
b0Likelihood = 0.90,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,579 | Python | .py | 37 | 38.378378 | 72 | 0.656353 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,755 | description.py | numenta_nupic-legacy/examples/prediction/experiments/dutyCycle/problem/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
#sensorVerbosity=3,
iterationCount = 1000,
numAValues = 10,
numBValues = 10,
#encodingFieldStyleA = 'contiguous',
encodingFieldWidthA = 50,
#encodingOnBitsA = 5,
#encodingFieldStyleB = 'contiguous',
encodingFieldWidthB = 50,
#encodingOnBitsB = 5,
b0Likelihood = None,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,547 | Python | .py | 36 | 38.75 | 72 | 0.659274 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,756 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Tests the following set of sequences:
z-a-b-c: (1X)
a-b-c: (6X)
a-d-e: (2X)
a-f-g-a-h: (1X)
We want to insure that when we see 'a', that we predict 'b' with highest
confidence, then 'd', then 'f' and 'h' with equally low confidence.
We expect the following prediction scores:
inputPredScore_at1 : 0.7
inputPredScore_at2 : 1.0
inputPredScore_at3 : 1.0
inputPredScore_at4 : 1.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=2,
filenameTrain = 'confidence/confidence2.csv',
filenameTest = 'confidence/confidence2.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 5,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,034 | Python | .py | 49 | 36.244898 | 72 | 0.641119 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,757 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/3/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Problem case:
1.) learn a-b-c (2X, b-c probability = 0.2 after seeing a)
2.) learn b-c, ends up using same b-c connections as #1, (11X)
3.) learn a-b-d (4X, b-d probability = 0.4 after seeing a)
4.) learn a-b-e (3X, b-e probability = 0.4 after seeing a)
Without startCell mode, when we see a-b, we will predict c with
highest probability because of #2, when in fact we should predict d:
inputPredScore_at1 : 1.0
inputPredScore_at2 : 0.222222222222
inputPredScore_at3 : 0.0
inputPredScore_at4 : 0.0
If we use startCell mode, we shouldn't have this problem and should get the
following scores:
inputPredScore_at1 : 1.0
inputPredScore_at2 : 0.444444444444
inputPredScore_at3 : 0.0
inputPredScore_at4 : 0.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=2,
filenameTrain = 'confidence/confidence3.csv',
filenameTest = 'confidence/confidence3.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 5,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,384 | Python | .py | 54 | 39.240741 | 75 | 0.655233 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,758 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/firstOrder/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
The test generates 1st order sequences using a dictionary of 5 different
elements using a transition table with the following probabilities:
1 2 3 4 5
---------------------------
1: 0%, 0%, 65%, 0%, 34%,
2: 14%, 11%, 36%, 31%, 5%,
3: 2%, 17%, 3%, 51%, 25%
4: 52%, 19%, 13%, 0%, 14%
5: 35%, 61%, 2%, 0%, 0%
What this says is that if you see a '1', the best thing to predict is a '3',
if you see a '3', the best thing to predict is a '4', etc.
There is also a reset generated every 10 elements, although this should have
little to no effect on the prediction accuracy.
When you run this dataset against 1st order n-grams, you get 52.6% accuracy,
so we would expect roughly the same accuracy using the TM:
inputPredScore_burnIn1 : 0.526
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=0,
dataSetPackage = 'firstOrder',
iterationCountTest=1000,
spNumActivePerInhArea = 1,
#temporalImp = 'cpp',
tpNCellsPerCol = 1,
tpInitialPerm = 0.11,
tpPermanenceInc = 0.10,
tpPermanenceDec = 0.10,
tpGlobalDecay = 0.10,
tpMaxAge = 50,
tpTimingEvery = 500
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,596 | Python | .py | 58 | 38.051724 | 76 | 0.616987 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,759 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Tests the following set of sequences:
a-b-c: (7X)
a-d-e: (2X)
a-f-g-a-h: (1X)
We want to insure that when we see 'a', that we predict 'b' with highest
confidence, then 'd', then 'f' and 'h' with equally low confidence.
We expect the following input prediction scores:
inputPredScore_at1 : 0.7
inputPredScore_at2 : 1.0
inputPredScore_at3 : 1.0
inputPredScore_at4 : 1.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=3,
filenameTrain = 'confidence/confidence1.csv',
filenameTest = 'confidence/confidence1.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 3,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,024 | Python | .py | 48 | 36.791667 | 72 | 0.643935 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,760 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/base/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import imp
from nupic.encoders import (LogEncoder,
DateEncoder,
MultiEncoder,
CategoryEncoder,
SDRCategoryEncoder,
ScalarEncoder)
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.prediction.callbacks import (printSPCoincidences,
printTPCells,
printTPTiming,
displaySPCoincidences,
setAttribute,
sensorRewind,
sensorOpen)
from nupic.frameworks.prediction.helpers import updateConfigFromSubConfig
# ----------------------------------------------------------------------
# Define this experiment's base configuration, and adjust for any modifications
# if imported from a sub-experiment.
config = dict(
sensorVerbosity = 0,
spVerbosity = 0,
tpVerbosity = 0,
ppVerbosity = 0,
dataSetPackage = None, # This can be specified in place of the next 6:
filenameTrain = 'confidence/confidence1.csv',
filenameTest = 'confidence/confidence1.csv',
filenameCategory = None,
dataGenScript = None,
dataDesc = None,
dataGenNumCategories = None,
dataGenNumTraining = None,
dataGenNumTesting = None,
noiseAmts = [],
iterationCountTrain = None,
iterationCountTest = None,
evalTrainingSetNumIterations = 10000, # Set to 0 to disable completely
trainSP = True,
trainTP = True,
trainTPRepeats = 1,
computeTopDown = 1,
# Encoder
overlappingPatterns = 0,
# SP params
disableSpatial = 1,
spPrintPeriodicStats = 0, # An integer N: print stats every N iterations
spCoincCount = 200,
spNumActivePerInhArea = 3,
# TM params
tpNCellsPerCol = 20,
tpInitialPerm = 0.6,
tpPermanenceInc = 0.1,
tpPermanenceDec = 0.000,
tpGlobalDecay = 0.0,
tpPAMLength = 1,
tpMaxSeqLength = 0,
tpMaxAge = 1,
tpTimingEvery = 0,
temporalImp = 'cpp',
)
updateConfigFromSubConfig(config)
# ==========================================================================
# Was a complete dataset package specified? This is an alternate way to
# specify a bunch of dataset related config parameters at once. They are
# especially helpful when running permutations - it keeps the permutations
# directory names shorter.
if config['dataSetPackage'] is not None:
assert (config['filenameTrain'] == 'confidence/confidence1.csv')
assert (config['filenameTest'] == 'confidence/confidence1.csv')
assert (config['filenameCategory'] == None)
assert (config['dataGenScript'] == None)
assert (config['dataDesc'] == None)
assert (config['dataGenNumCategories'] == None)
assert (config['dataGenNumTraining'] == None)
assert (config['dataGenNumTesting'] == None)
if config['dataSetPackage'] == 'firstOrder':
config['filenameTrain'] = 'extra/firstOrder/fo_1000_10_train_resets.csv'
config['filenameTest'] = 'extra/firstOrder/fo_10000_10_test_resets.csv'
config['filenameCategory'] = 'extra/firstOrder/categories.txt'
elif config['dataSetPackage'] == 'secondOrder0':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model0'
config['dataGenNumCategories'] = 20
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
elif config['dataSetPackage'] == 'secondOrder1':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model1'
config['dataGenNumCategories'] = 25
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
elif config['dataSetPackage'] == 'secondOrder2':
config['filenameTrain'] = None
config['filenameTest'] = None
config['filenameCategory'] = None
config['dataGenScript'] = 'extra/secondOrder/makeDataset.py'
config['dataDesc'] = 'model2'
config['dataGenNumCategories'] = 5
config['dataGenNumTraining'] = 5000
config['dataGenNumTesting'] = 1000
else:
assert False
def getBaseDatasets():
datasets = dict()
for name in ['filenameTrain', 'filenameTest', 'filenameCategory',
'dataGenScript']:
if config[name] is not None:
datasets[name] = config[name]
return datasets
def getDatasets(baseDatasets, generate=False):
# nothing to generate if no script
if not 'dataGenScript' in baseDatasets:
return baseDatasets
# -------------------------------------------------------------------
# Form the path to each dataset
datasets = dict(baseDatasets)
dataPath = os.path.dirname(baseDatasets['dataGenScript'])
# At some point, this prefix will be modified to be unique for each
# possible variation of parameters into the data generation script.
prefix = '%s' % (config['dataDesc'])
datasets['filenameTrain'] = os.path.join(dataPath,
'%s_train.csv' % prefix)
datasets['filenameTest'] = os.path.join(dataPath,
'%s_test.csv' % prefix)
datasets['filenameCategory'] = os.path.join(dataPath,
'%s_categories.txt' % prefix)
if not generate:
return datasets
# -------------------------------------------------------------------
# Generate our data
makeDataset = imp.load_source('makeDataset', baseDatasets['dataGenScript'])
makeDataset.generate(model = config['dataDesc'],
filenameTrain = datasets['filenameTrain'],
filenameTest = datasets['filenameTest'],
filenameCategory = datasets['filenameCategory'],
numCategories=config['dataGenNumCategories'],
numTrainingRecords=config['dataGenNumTraining'],
numTestingRecords=config['dataGenNumTesting'],
numNoise=0, resetsEvery=None)
return datasets
def getDescription(datasets):
# ========================================================================
# Network definition
# Encoder for the sensor
encoder = MultiEncoder()
if 'filenameCategory' in datasets:
categories = [x.strip() for x in
open(datasets['filenameCategory']).xreadlines()]
else:
categories = [chr(x+ord('a')) for x in range(26)]
if config['overlappingPatterns']:
encoder.addEncoder("name", SDRCategoryEncoder(n=200,
w=config['spNumActivePerInhArea'], categoryList=categories, name="name"))
else:
encoder.addEncoder("name", CategoryEncoder(w=config['spNumActivePerInhArea'],
categoryList=categories, name="name"))
# ------------------------------------------------------------------
# Node params
# The inputs are long, horizontal vectors
inputDimensions = (1, encoder.getWidth())
# Layout the coincidences vertically stacked on top of each other, each
# looking at the entire input field.
columnDimensions = (config['spCoincCount'], 1)
# If we have disableSpatial, then set the number of "coincidences" to be the
# same as the encoder width
if config['disableSpatial']:
columnDimensions = (encoder.getWidth(), 1)
config['trainSP'] = 0
sensorParams = dict(
# encoder/datasource are not parameters so don't include here
verbosity=config['sensorVerbosity']
)
CLAParams = dict(
# SP params
disableSpatial = config['disableSpatial'],
inputDimensions = inputDimensions,
columnDimensions = columnDimensions,
potentialRadius = inputDimensions[1]/2,
potentialPct = 1.00,
gaussianDist = 0,
commonDistributions = 0, # should be False if possibly not training
localAreaDensity = -1, #0.05,
numActiveColumnsPerInhArea = config['spNumActivePerInhArea'],
dutyCyclePeriod = 1000,
stimulusThreshold = 1,
synPermInactiveDec=0.11,
synPermActiveInc=0.11,
synPermActiveSharedDec=0.0,
synPermOrphanDec = 0.0,
minPctDutyCycleBeforeInh = 0.001,
minPctDutyCycleAfterInh = 0.001,
spVerbosity = config['spVerbosity'],
spSeed = 1,
printPeriodicStats = int(config['spPrintPeriodicStats']),
# TM params
tpSeed = 1,
disableTemporal = 0 if config['trainTP'] else 1,
temporalImp = config['temporalImp'],
nCellsPerCol = config['tpNCellsPerCol'] if config['trainTP'] else 1,
collectStats = 1,
burnIn = 2,
verbosity = config['tpVerbosity'],
newSynapseCount = config['spNumActivePerInhArea'],
minThreshold = config['spNumActivePerInhArea'],
activationThreshold = config['spNumActivePerInhArea'],
initialPerm = config['tpInitialPerm'],
connectedPerm = 0.5,
permanenceInc = config['tpPermanenceInc'],
permanenceDec = config['tpPermanenceDec'], # perhaps tune this
globalDecay = config['tpGlobalDecay'],
pamLength = config['tpPAMLength'],
maxSeqLength = config['tpMaxSeqLength'],
maxAge = config['tpMaxAge'],
# General params
computeTopDown = config['computeTopDown'],
trainingStep = 'spatial',
)
dataSource = FileRecordStream(datasets['filenameTrain'])
description = dict(
options = dict(
logOutputsDuringInference = False,
),
network = dict(
sensorDataSource = dataSource,
sensorEncoder = encoder,
sensorParams = sensorParams,
CLAType = 'py.CLARegion',
CLAParams = CLAParams,
classifierType = None,
classifierParams = None),
)
if config['trainSP']:
description['spTrain'] = dict(
iterationCount=config['iterationCountTrain'],
#iter=displaySPCoincidences(50),
#finish=printSPCoincidences()
),
else:
description['spTrain'] = dict(
# need to train with one iteration just to initialize data structures
iterationCount=1)
if config['trainTP']:
description['tpTrain'] = []
for i in xrange(config['trainTPRepeats']):
stepDict = dict(name='step_%d' % (i),
setup=sensorRewind,
iterationCount=config['iterationCountTrain'],
)
if config['tpTimingEvery'] > 0:
stepDict['iter'] = printTPTiming(config['tpTimingEvery'])
stepDict['finish'] = [printTPTiming(), printTPCells]
description['tpTrain'].append(stepDict)
# ----------------------------------------------------------------------------
# Inference tests
inferSteps = []
if config['evalTrainingSetNumIterations'] > 0:
# The training set. Used to train the n-grams.
inferSteps.append(
dict(name = 'confidenceTrain_baseline',
iterationCount = min(config['evalTrainingSetNumIterations'],
config['iterationCountTrain']),
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=True,
nGrams='train',
#ipsDetailsFor = "name,None,2",
),
#finish=printTPCells,
)
)
# Testing the training set on both the TM and n-grams.
inferSteps.append(
dict(name = 'confidenceTrain_nonoise',
iterationCount = min(config['evalTrainingSetNumIterations'],
config['iterationCountTrain']),
setup = [sensorOpen(datasets['filenameTrain'])],
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=False,
nGrams='test',
burnIns = [1,2,3,4],
#ipsDetailsFor = "name,None,2",
#ipsAt = [1,2,3,4],
),
)
)
# The test set
if True:
if datasets['filenameTest'] != datasets['filenameTrain']:
inferSteps.append(
dict(name = 'confidenceTest_baseline',
iterationCount = config['iterationCountTest'],
setup = [sensorOpen(datasets['filenameTest'])],
ppOptions = dict(verbosity=config['ppVerbosity'],
printLearnedCoincidences=False,
nGrams='test',
burnIns = [1,2,3,4],
#ipsAt = [1,2,3,4],
ipsDetailsFor = "name,None,2",
),
)
)
description['infer'] = inferSteps
return description
| 13,872 | Python | .py | 329 | 33.699088 | 82 | 0.614252 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,761 | permutations_quick.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/base/permutations_quick.py |
permutations = dict(
dataSetPackage = ['secondOrder1'],
iterationCountTrain = [10, 25, 50, 100],
iterationCountTest = [150],
evalTrainingSetNumIterations = [0],
spNumActivePerInhArea = [5],
temporalImp = ['py'],
tpNCellsPerCol = [4],
tpInitialPerm = [0.11, 0.31, 0.51],
tpPermanenceInc = [0.10],
tpGlobalDecay = [0.10],
tpMaxAge = [1, 3, 5, 7, 15],
)
report = ['overallTime',
'postProc_confidenceTest_baseline:inputPredScore_burnIn1',
]
optimize = 'postProc_confidenceTest_baseline:inputPredScore_burnIn1'
| 774 | Python | .py | 17 | 28.764706 | 68 | 0.520567 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,762 | permutations_firstOrder.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/base/permutations_firstOrder.py |
permutations = dict(
dataSetPackage = ['firstOrder'],
iterationCountTest = [1000],
spNumActivePerInhArea = [1],
tpNCellsPerCol = [1],
tpInitialPerm = [0.11],
tpPermanenceInc = [0.10],
tpGlobalDecay = [0.10],
tpMaxAge = [5, 10, 20, 50, 100],
tpPAMLength = [1],
)
report = ['overallTime',
'.*:inputPredScore_burnIn1',
'.*:ngram:inputPredScore_n1_burnIn1',
]
optimize = 'postProc_confidenceTest_baseline:inputPredScore_burnIn1'
| 668 | Python | .py | 16 | 25.5625 | 68 | 0.504983 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,763 | permutations_secondOrder0.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/base/permutations_secondOrder0.py |
permutations = dict(
dataSetPackage = ['secondOrder0'],
iterationCountTrain = [250, 500, 1000, 1500],
iterationCountTest = [250, 500],
spNumActivePerInhArea = [5],
tpNCellsPerCol = [5],
tpInitialPerm = [0.11, 0.21, 0.31, 0.41],
tpPermanenceInc = [0.05, 0.10],
tpGlobalDecay = [0.05, 0.10],
tpMaxAge = [50, 75, 100, 200, 300],
)
report = ['overallTime',
'postProc_confidenceTest_baseline:inputPredScore_burnIn1',
'postProc_confidenceTest_baseline:ngram:inputPredScore_n2_burnIn1',
]
optimize = 'postProc_confidenceTest_baseline:inputPredScore_burnIn1'
def filter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called for every possible permutation of the variables
in the permutations dict. It should return True for valid a combination of
permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
if perm['tpPermanenceInc'] != perm['tpGlobalDecay']:
return False
return True
| 1,401 | Python | .py | 29 | 36.310345 | 78 | 0.608527 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,764 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/secondOrder0/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test trains and tests on the following set of sequence probabilities:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
We would expect it to learn 4 sub-sequences. We are setting the
permanence decrement decay high enough that an element won't remain
connected to the previous unless it follows at least 2 out of 3 times.
This constraint makes the 3-1, 3-5, 4-1, 4-5 transitions remain
un-connected since they occur only 1 out of every 2 times. It also makes
the 2-4 (in the 1-2-4 sequence) and the 2-3 (in the 5-2-3) sequence remain
unconnected.
If we successfully learn the above set of sequences, we would expect to
be able to predict with the following accuracy:
1 - 2: 5 out of 5
1,2 - 3: 4 out of 4
1,2 - 4: 0 out of 1
1,2,3 - 1: 1 out of 2
1,2,3 - 5: 1 out of 2
1,2,4 - 1: 0.25 out of 0.5
1,2,4 - 5: 0.25 out of 0.5
5 - 2: 5 out of 5
5,2 - 4: 4 out of 4
5,2 - 3: 0 out of 1
5,2,4 - 5: 1 out of 2
5,2,4 - 1: 1 out of 2
5,2,3 - 1: 0.25 out of 0.5
5,2,3 - 5: 0.25 out of 0.5
Total: 23 out of 30 = 0.76666 probability
If things are working correctly then, you should get approximately the following
result from post-processing:
inputPredScore_burnIn1 : 0.7666666666
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=0,
dataSetPackage = 'secondOrder0',
iterationCountTrain=250,
iterationCountTest=500,
#evalTrainingSetNumIterations = 0,
trainTPRepeats = 1,
spNumActivePerInhArea = 5,
tpNCellsPerCol = 5,
tpInitialPerm = 0.11,
tpPermanenceInc = 0.10,
tpPermanenceDec = 0.10,
tpGlobalDecay = 0.10,
tpMaxAge = 50,
tpPAMLength = 1,
#tpMaxSeqLength = 4,
tpTimingEvery = 100,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 3,175 | Python | .py | 78 | 34.820513 | 81 | 0.638602 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,765 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/secondOrder2/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test trains and tests on data generated from a second order markov source model.
If things are working correctly then, you should do as well as a 2-grams model
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=0,
dataSetPackage = 'secondOrder2',
iterationCountTrain=3000,
iterationCountTest=250,
trainTPRepeats = 1,
spNumActivePerInhArea = 5,
trainTP=True,
tpNCellsPerCol = 10,
tpInitialPerm = 0.11,
tpPermanenceInc = 0.05,
tpPermanenceDec = 0.10,
tpGlobalDecay = 0.05,
tpMaxAge = 100,
tpPAMLength = 1,
#tpMaxSeqLength = 3,
tpTimingEvery = 250
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,092 | Python | .py | 48 | 34.895833 | 87 | 0.620531 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,766 | description.py | numenta_nupic-legacy/examples/prediction/experiments/confidenceTest/secondOrder1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test trains and tests on the following set of sequence probabilities:
1-11-16 (1X)
1-12-17 (1X)
1-13-18 (1X)
1-14-19 (1X)
1-15-20 (1X)
2-11-21 (1X)
2-12-22 (1X)
2-13-23 (1X)
2-14-24 (1X)
2-15-25 (1X)
We would expect it to learn 10 sub-sequences. This set of sequences exposes
a problem in the way permanenceDec is used to determine what are sub-sequences.
Basically, it only looks at fan-out - if an element has high fan-out, it will
NOT maintain connections to the following elements and thus we will lose any
high order context when we go through a high fan-out element.
In the example above, the transitions out of 0 have a high fan-out. With any
non-zero permanenceDec and sufficient fan-out, you will always end up remaining
unconnected from 1 to 11,12,13,.... This means you will then burst on the
element following the 1 (for example on the 11), and you will not be able to
predict that you should go to 16 instead of to 21.
If we successfully learn the above set of sequences, we would expect to
be able to predict with the following accuracy:
1 - 11: 0.2 out of 1
11 - 16: 1 out of 1
16 - 1: 0.5 out of 1
16 - 2: 0.5 out of 1
(same for 1-12-17..., 2-11-21...)
Total: 2.2 out of 4 = 0.55 probability
If things are working correctly then, you should get approximately the following
result from post-processing:
inputPredScore_burnIn1 : 0.55
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=0,
dataSetPackage = 'secondOrder1',
iterationCountTrain=2500,
iterationCountTest=250,
trainTPRepeats = 1,
spNumActivePerInhArea = 5,
trainTP=True,
tpNCellsPerCol = 10,
tpInitialPerm = 0.11,
tpPermanenceInc = 0.05,
tpPermanenceDec = 0.10,
tpGlobalDecay = 0.05,
tpMaxAge = 50,
tpPAMLength = 1,
#tpMaxSeqLength = 4,
tpTimingEvery = 250
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 3,324 | Python | .py | 78 | 36.74359 | 81 | 0.662226 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,767 | description.py | numenta_nupic-legacy/examples/prediction/experiments/generated_data/description.py | from nupic.encoders import (LogEncoder, DateEncoder, MultiEncoder, ScalarEncoder)
from nupic.data import FunctionSource
from nupic.frameworks.prediction.callbacks import displaySPCoincidences, printSPCoincidences
from nupic.data.dict_utils import DictObj
nCoincidences = 30
iterationCount = 10000
nRandomFields = 1
randomFieldWidth = 66
# Controls behavior of iteration callback
showSPCoincs = True
def generateFunction(info):
# This function needs to be self-contained so that it can work
# after de-serialization.
# These imports shouldn't be too slow after the first import
import datetime
import random
d = DictObj()
# Generate a random time in a one-month period
t = datetime.datetime.fromtimestamp(1289409426 + random.randint(0, 30*86000))
# Amount varies as follows:
# Most of the day, it has a 90% chance of being between 1 and 10.00
# and a 10% chance of being between 100 and 1000)
# between 8PM and 11PM, the probabilities are reversed
# p = probability of high value
p = 1.0
if 20 <= t.hour < 23:
p = 1.0 - p
if random.random() < p:
amount = random.randint(100, 1000)
else:
amount = random.randint(1, 10)
# Dictionary keys must match the names in the multiencoder
d["date"] = t
d["amount"] = amount
for i in xrange(info['nRandomFields']):
d["random%d" %i] = random.randint(0, info['randomFieldWidth'])
return d
def getBaseDatasets():
return dict()
def getDatasets(baseDatasets, generate=False):
return baseDatasets
def getDescription(datasets):
encoder = MultiEncoder()
encoder.addEncoder("date", DateEncoder(timeOfDay=3))
encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
for i in xrange(0, nRandomFields):
s = ScalarEncoder(name="scalar", minval=0, maxval=randomFieldWidth, resolution=1, w=3)
encoder.addEncoder("random%d" % i, s)
dataSource = FunctionSource(generateFunction, dict(nRandomFields=nRandomFields,
randomFieldWidth=randomFieldWidth))
inputDimensions = (1, encoder.getWidth())
# Layout the coincidences vertically stacked on top of each other, each
# looking at the entire input field.
columnDimensions = (nCoincidences, 1)
nodeParams = dict()
spParams = dict(
commonDistributions=0,
inputDimensions = inputDimensions,
columnDimensions = columnDimensions,
potentialRadius = inputDimensions[1]/2,
potentialPct = 0.75,
gaussianDist = 0,
localAreaDensity = 0.10,
# localAreaDensity = 0.04,
numActiveColumnsPerInhArea = -1,
dutyCyclePeriod = 1000,
stimulusThreshold = 5,
synPermInactiveDec=0.08,
# synPermInactiveDec=0.02,
synPermActiveInc=0.02,
synPermActiveSharedDec=0.0,
synPermOrphanDec = 0.0,
minPctDutyCycleBeforeInh = 0.05,
# minPctDutyCycleAfterInh = 0.1,
# minPctDutyCycleBeforeInh = 0.05,
minPctDutyCycleAfterInh = 0.05,
# minPctDutyCycleAfterInh = 0.4,
seed = 1,
)
otherParams = dict(
disableTemporal=1,
trainingStep='spatial',
)
nodeParams.update(spParams)
nodeParams.update(otherParams)
def mySetupCallback(experiment):
print "Setup function called"
description = dict(
options = dict(
logOutputsDuringInference = False,
),
network = dict(
sensorDataSource = dataSource,
sensorEncoder = encoder,
CLAType = "py.CLARegion",
CLAParams = nodeParams,
classifierType = None,
classifierParams = None),
# step
spTrain = dict(
name="phase1",
setup=mySetupCallback,
iterationCount=5000,
#iter=displaySPCoincidences(100),
finish=printSPCoincidences()),
tpTrain = None, # same format as sptrain if non-empty
infer = None, # same format as sptrain if non-empty
)
return description
| 3,919 | Python | .py | 109 | 30.550459 | 92 | 0.704684 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,768 | test_db.py | numenta_nupic-legacy/examples/swarm/test_db.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script lets the user know which NuPIC config file is being used
and whether or not they are able to connect to the mysql database and create
databases/tables given the configuration information provided.
"""
import pymysql
from nupic.support.configuration import Configuration
DEFAULT_CONFIG = "nupic-default.xml"
USER_CONFIG = "nupic-site.xml"
def getFileUsed():
"""
Determine which NuPIC configuration file is being used and returns the
name of the configuration file it is using. Either DEFAULT_CONFIG or
USER_CONFIG.
"""
# output will be {} if the file passed into Configuration._readConfigFile
# can not be found in the standard paths returned by
# Configuration._getConfigPaths.
output = Configuration._readConfigFile(USER_CONFIG) #pylint: disable=protected-access
if output != {}:
return USER_CONFIG
return DEFAULT_CONFIG
def testDbConnection(host, port, user, passwd):
"""
Determine if the specified host, port, user, passwd is able to connect
to a running mysql database, create test database, create a test table,
insert something into the table, delete the table, and delete the database.
returns true if this is successful, false if there is an error in this
process.
"""
try:
conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd)
cursor = conn.cursor()
cursor.execute("CREATE DATABASE IF NOT EXISTS nupic_db_test")
conn.select_db("nupic_db_test")
cursor.execute("CREATE TABLE db_test \
(teststring VARCHAR(255),\
someint INT)")
cursor.execute("INSERT INTO db_test VALUES ('testing123', 123)")
cursor.execute("DROP TABLE IF EXISTS db_test")
cursor.execute("DROP DATABASE IF EXISTS nupic_db_test")
except pymysql.err.OperationalError:
print ("Couldn't connect to the database or you don't have the "
"permissions required to create databases and tables. "
"Please ensure you have MySQL\n installed, running, "
"accessible using the NuPIC configuration settings, "
"and the user specified has permission to create both "
"databases and tables.")
raise
def dbValidator():
"""
Let the user know what NuPIC config file is being used
and whether or not they have mysql set up correctly for
swarming.
"""
fileused = getFileUsed()
# Get the values we need from NuPIC's configuration
host = Configuration.get("nupic.cluster.database.host")
port = int(Configuration.get("nupic.cluster.database.port"))
user = Configuration.get("nupic.cluster.database.user")
passwd = Configuration.get("nupic.cluster.database.passwd")
print "This script will validate that your MySQL is setup correctly for "
print "NuPIC. MySQL is required for NuPIC swarming. The settings are"
print "defined in a configuration file found in "
print "$NUPIC/src/nupic/support/nupic-default.xml Out of the box those "
print "settings contain MySQL's default access credentials."
print
print "The nupic-default.xml can be duplicated to define user specific "
print "changes calling the copied file "
print "$NUPIC/src/nupic/support/nupic-site.xml Refer to the "
print "nupic-default.xml for additional instructions."
print
print "Defaults: localhost, 3306, root, no password"
print
print "Retrieved the following NuPIC configuration using: ", fileused
print " host : ", host
print " port : ", port
print " user : ", user
print " passwd : ", "*" * len(passwd)
testDbConnection(host, port, user, passwd)
print "Connection successful!!"
if __name__ == "__main__":
dbValidator()
| 4,683 | Python | .py | 103 | 41.485437 | 87 | 0.707558 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,769 | hello_sp.py | numenta_nupic-legacy/examples/sp/hello_sp.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple program that demonstrates the working of the spatial pooler"""
import numpy as np
import random
from nupic.bindings.algorithms import SpatialPooler as SP
uintType = "uint32"
class Example(object):
"""A class to hold our code.
TODO: Get rid of this class, it just makes it more difficult to read the
code.
"""
def __init__(self, inputDimensions, columnDimensions):
"""
Parameters:
----------
_inputDimensions: The size of the input. (m,n) will give a size m x n
_columnDimensions: The size of the 2 dimensional array of columns
"""
self.inputDimensions = inputDimensions
self.columnDimensions = columnDimensions
self.inputSize = np.array(inputDimensions).prod()
self.columnNumber = np.array(columnDimensions).prod()
self.inputArray = np.zeros(self.inputSize, dtype=uintType)
self.activeArray = np.zeros(self.columnNumber, dtype=uintType)
random.seed(1)
self.sp = SP(self.inputDimensions,
self.columnDimensions,
potentialRadius = self.inputSize,
numActiveColumnsPerInhArea = int(0.02*self.columnNumber),
globalInhibition = True,
seed = 1,
synPermActiveInc = 0.01,
synPermInactiveDec = 0.008)
def createInput(self):
"""create a random input vector"""
print "-" * 70 + "Creating a random input vector" + "-" * 70
#clear the inputArray to zero before creating a new input vector
self.inputArray[0:] = 0
for i in range(self.inputSize):
#randrange returns 0 or 1
self.inputArray[i] = random.randrange(2)
def run(self):
"""Run the spatial pooler with the input vector"""
print "-" * 80 + "Computing the SDR" + "-" * 80
#activeArray[column]=1 if column is active after spatial pooling
self.sp.compute(self.inputArray, True, self.activeArray)
print self.activeArray.nonzero()
def addNoise(self, noiseLevel):
"""Flip the value of 10% of input bits (add noise)
:param noiseLevel: The percentage of total input bits that should be flipped
"""
for _ in range(int(noiseLevel * self.inputSize)):
# 0.1*self.inputSize represents 10% of the total input bits
# random.random() returns a float between 0 and 1
randomPosition = int(random.random() * self.inputSize)
# Flipping the bit at the randomly picked position
if self.inputArray[randomPosition] == 1:
self.inputArray[randomPosition] = 0
else:
self.inputArray[randomPosition] = 1
# Uncomment the following line to know which positions had been flipped.
# print "The value at " + str(randomPosition) + " has been flipped"
example = Example((32, 32), (64, 64))
# Lesson 1
print "\n \nFollowing columns represent the SDR"
print "Different set of columns each time since we randomize the input"
print "Lesson - different input vectors give different SDRs\n\n"
# Trying random vectors
for i in range(3):
example.createInput()
example.run()
# Lesson 2
print "\n\nIdentical SDRs because we give identical inputs"
print "Lesson - identical inputs give identical SDRs\n\n"
print "-" * 75 + "Using identical input vectors" + "-" * 75
# Trying identical vectors
for i in range(2):
example.run()
# Lesson 3
print "\n\nNow we are changing the input vector slightly."
print "We change a small percentage of 1s to 0s and 0s to 1s."
print "The resulting SDRs are similar, but not identical to the original SDR"
print "Lesson - Similar input vectors give similar SDRs\n\n"
# Adding 10% noise to the input vector
# Notice how the output SDR hardly changes at all
print "-" * 75 + "After adding 10% noise to the input vector" + "-" * 75
example.addNoise(0.1)
example.run()
# Adding another 20% noise to the already modified input vector
# The output SDR should differ considerably from that of the previous output
print "-" * 75 + "After adding another 20% noise to the input vector" + "-" * 75
example.addNoise(0.2)
example.run()
| 5,011 | Python | .py | 112 | 40.410714 | 80 | 0.696296 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,770 | sp_tutorial.py | numenta_nupic-legacy/examples/sp/sp_tutorial.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Spatial Pooler.
The following program has the purpose of presenting some
basic properties of the Spatial Pooler. It reproduces Figs.
5, 7 and 9 from this paper: http://arxiv.org/abs/1505.02142
To learn more about the Spatial Pooler have a look at BAMI:
http://numenta.com/biological-and-machine-intelligence/
or at its class reference in the NuPIC documentation:
http://numenta.org/docs/nupic/classnupic_1_1research_1_1spatial__pooler_1_1_spatial_pooler.html
The purpose of the Spatial Pooler is to create a sparse representation
of its inputs in such a way that similar inputs will be mapped to similar
sparse representations. Thus, the Spatial Pooler should exhibit some resilience
to noise in its input.
"""
import matplotlib
import numpy as np
import random
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.algorithms.spatial_pooler import SpatialPooler as SP
def percentOverlap(x1, x2, size):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
minX1X2 = min(nonZeroX1, nonZeroX2)
percentOverlap = 0
if minX1X2 > 0:
percentOverlap = float(np.dot(x1, x2))/float(minX1X2)
return percentOverlap
def corruptVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1
def resetVector(x1, x2):
"""
Copies the contents of vector x1 into vector x2.
@param x1 (array) binary vector to be copied
@param x2 (array) binary vector where x1 is copied
"""
size = len(x1)
for i in range(size):
x2[i] = x1[i]
random.seed(1)
uintType = "uint32"
inputDimensions = (1000,1)
columnDimensions = (2048,1)
inputSize = np.array(inputDimensions).prod()
columnNumber = np.array(columnDimensions).prod()
inputArray = np.zeros(inputSize, dtype=uintType)
for i in range(inputSize):
inputArray[i] = random.randrange(2)
activeCols = np.zeros(columnNumber, dtype=uintType)
sp = SP(inputDimensions,
columnDimensions,
potentialRadius = int(0.5*inputSize),
numActiveColumnsPerInhArea = int(0.02*columnNumber),
globalInhibition = True,
seed = 1,
synPermActiveInc = 0.01,
synPermInactiveDec = 0.008
)
# Part 1:
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
# ie. columns with the largest overlap score.
# In this first part, we will create a histogram showing the overlap scores
# of the Spatial Pooler (SP) after feeding it with a random binary
# input. As well, the histogram will show the scores of those columns
# that are chosen to build the sparse representation of the input.
sp.compute(inputArray, False, activeCols)
overlaps = sp.getOverlaps()
activeColsScores = []
for i in activeCols.nonzero():
activeColsScores.append(overlaps[i])
print ""
print "---------------------------------"
print "Figure 1 shows a histogram of the overlap scores"
print "from all the columns in the spatial pooler, as well as the"
print "overlap scores of those columns that were selected to build a"
print "sparse representation of the input (shown in green)."
print "The SP chooses 2% of the columns with the largest overlap score"
print "to make such sparse representation."
print "---------------------------------"
print ""
bins = np.linspace(min(overlaps), max(overlaps), 28)
plt.hist(overlaps, bins, alpha=0.5, label="All cols")
plt.hist(activeColsScores, bins, alpha=0.5, label="Active cols")
plt.legend(loc="upper right")
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title("Figure 1: Column overlap of a SP with random input.")
plt.savefig("figure_1")
plt.close()
# Part 2a:
# -------
# The input overlap between two binary vectors is defined as their dot product.
# In order to normalize this value we divide by the minimum number of active
# inputs (in either vector). This means we are considering the sparser vector as
# reference. Two identical binary vectors will have an input overlap of 1,
# whereas two completely different vectors (one is the logical NOT of the other)
# will yield an overlap of 0. In this section we will see how the input overlap
# of two binary vectors decrease as we add noise to one of them.
inputX1 = np.zeros(inputSize, dtype=uintType)
inputX2 = np.zeros(inputSize, dtype=uintType)
outputX1 = np.zeros(columnNumber, dtype=uintType)
outputX2 = np.zeros(columnNumber, dtype=uintType)
for i in range(inputSize):
inputX1[i] = random.randrange(2)
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
x.append(noiseLevel)
y.append(percentOverlap(inputX1, inputX2, inputSize))
print ""
print "---------------------------------"
print "Figure 2 shows the input overlap between 2 identical binary vectors in"
print "function of the noise applied to one of them."
print "0 noise level means that the vector remains the same, whereas"
print "1 means that the vector is the logical negation of the original vector. "
print "The relationship between overlap and noise level is practically linear "
print "and monotonically decreasing."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Noise level")
plt.ylabel("Input overlap")
plt.title("Figure 2: Input overlap between 2 identical vectors in function of "
"noiseLevel.")
plt.savefig("figure_2")
plt.close()
# Part 2b:
# -------
# The output overlap between two binary input vectors is the overlap of the
# columns that become active once they are fed to the SP. In this part we
# turn learning off, and observe the output of the SP as we input two binary
# input vectors with varying level of noise.
# Starting from two identical vectors (that yield the same active columns)
# we would expect that as we add noise to one of them their output overlap
# decreases.
# In this part we will show how the output overlap behaves in function of the
# input overlap between two vectors.
# Even with an untrained spatial pooler, we see some noise resilience.
# Note that due to the non-linear properties of high dimensional SDRs, overlaps
# greater than 10 bits, or 25% in this example, are considered significant.
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
sp.compute(inputX1, False, outputX1)
sp.compute(inputX2, False, outputX2)
x.append(percentOverlap(inputX1, inputX2, inputSize))
y.append(percentOverlap(outputX1, outputX2, columnNumber))
print ""
print "---------------------------------"
print "Figure 3 shows the output overlap between two sparse representations"
print "in function of their input overlap. Starting from two identical binary "
print "vectors (which yield the same active columns) we add noise two one of "
print "them, feed it to the SP, and estimate the output overlap between the two"
print "representations in terms of the common active columns between them."
print "As expected, as the input overlap decreases, so does the output overlap."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 3: Output overlap in function of input overlap in a SP "
"without training")
plt.savefig("figure_3")
plt.close()
# Part 3:
# -------
# After training, a SP can become less sensitive to noise. For this purpose, we
# train the SP by turning learning on, and by exposing it to a variety of random
# binary vectors. We will expose the SP to a repetition of input patterns in
# order to make it learn and distinguish them once learning is over. This will
# result in robustness to noise in the inputs. In this section we will reproduce
# the plot in the last section after the SP has learned a series of inputs. Here
# we will see how the SP exhibits increased resilience to noise after learning.
# We will present 10 random vectors to the SP, and repeat this 30 times.
# Later you can try changing the number of times we do this to see how it
# changes the last plot. Then, you could also modify the number of examples to
# see how the SP behaves. Is there a relationship between the number of examples
# and the number of times that we expose them to the SP?
numExamples = 10
inputVectors = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumns = np.zeros((numExamples, columnNumber), dtype=uintType)
for i in range(numExamples):
for j in range(inputSize):
inputVectors[i][j] = random.randrange(2)
# This is the number of times that we will present the input vectors to the SP
epochs = 30
for _ in range(epochs):
for i in range(numExamples):
#Feed the examples to the SP
sp.compute(inputVectors[i][:], True, outputColumns[i][:])
print ""
print "---------------------------------"
print "Figure 4a shows the sorted overlap scores of all columns in a spatial"
print "pooler with random input, before and after learning. The top 2% of "
print "columns with the largest overlap scores, comprising the active columns "
print "of the output sparse representation, are highlighted in green."
print "---------------------------------"
print ""
plt.plot(sorted(overlaps)[::-1], label="Before learning")
overlaps = sp.getOverlaps()
plt.plot(sorted(overlaps)[::-1], label="After learning")
plt.axvspan(0, len(activeColsScores[0]), facecolor="g", alpha=0.3,
label="Active columns")
plt.legend(loc="upper right")
plt.xlabel("Columns")
plt.ylabel("Overlap scores")
plt.title("Figure 4a: Sorted column overlaps of a SP with random "
"input.")
plt.savefig("figure_4a")
plt.close()
inputVectorsCorrupted = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumnsCorrupted = np.zeros((numExamples, columnNumber), dtype=uintType)
x = []
y = []
# We will repeat the experiment in the last section for only one input vector
# in the set of input vectors
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputVectors[0][:], inputVectorsCorrupted[0][:])
corruptVector(inputVectorsCorrupted[0][:], noiseLevel)
sp.compute(inputVectors[0][:], False, outputColumns[0][:])
sp.compute(inputVectorsCorrupted[0][:], False, outputColumnsCorrupted[0][:])
x.append(percentOverlap(inputVectors[0][:], inputVectorsCorrupted[0][:],
inputSize))
y.append(percentOverlap(outputColumns[0][:], outputColumnsCorrupted[0][:],
columnNumber))
print ""
print "---------------------------------"
print "How robust is the SP to noise after learning?"
print "Figure 4 shows again the output overlap between two binary vectors in "
print "function of their input overlap. After training, the SP exhibits more "
print "robustness to noise in its input, resulting in a -almost- sigmoid curve."
print "This implies that even if a previous input is presented again with a "
print "certain amount of noise its sparse representation still resembles its "
print "original."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 4: Output overlap in function of input overlap in a SP after "
"training")
plt.savefig("figure_4")
plt.close()
print ""
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print " All images generated by this script will be saved"
print " in your current working directory."
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print ""
| 13,471 | Python | .py | 300 | 42.673333 | 95 | 0.72685 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,771 | demo.py | numenta_nupic-legacy/examples/opf/simple_server/demo.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Shows how to use the simple_server.py NuPIC server using data from the
hotgym dataset.
To run this demo, make sure simple server is running (on port 8888) using the command:
$ ./nupic/simple_server.py 8888
Then run this demo in another terminal window.
"""
import json
import logging
from optparse import OptionParser
import sys
import requests
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_NUM_RECORDS = 1000
def createModel(server, port):
data = {"modelParams": model_params.MODEL_PARAMS,
"predictedFieldName": "consumption"}
requests.post("http://{server}:{port}/models/demo".format(server=server,
port=port),
json.dumps(data))
def runDemo(server, port):
createModel(server, port)
with FileRecordStream(_INPUT_FILE_PATH) as f:
headers = f.getFieldNames()
for i in range(_NUM_RECORDS):
record = f.getNextRecord()
modelInput = dict(zip(headers, record))
modelInput["consumption"] = float(modelInput["consumption"])
modelInput["timestamp"] = modelInput["timestamp"].strftime("%m/%d/%y %H:%M")
res = requests.post(
"http://{server}:{port}/models/demo/run".format(server=server, port=port),
json.dumps(modelInput))
print "result = %s" % res.text
isLast = i == _NUM_RECORDS
if isLast:
break
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-s", help="Server url (default: %default)",
dest="server", default="http://localhost")
parser.add_option("-p", help="Server port (default: %default",
dest="port", default=8888)
opt, arg = parser.parse_args(sys.argv[1:])
runDemo(opt.server, opt.port)
| 2,955 | Python | .py | 70 | 37.571429 | 86 | 0.668531 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,772 | model_params.py | numenta_nupic-legacy/examples/opf/simple_server/model_params.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Include the encoders we use
'encoders': {
u'consumption': {
'fieldname': u'consumption',
'resolution': 0.88,
'seed': 1,
'name': u'consumption',
'type': 'RandomDistributedScalarEncoder',
},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.85,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.04,
'synPermInactiveDec': 0.005,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
'implementation': 'py',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| 9,318 | Python | .py | 200 | 33.585 | 108 | 0.554184 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,773 | model_params.py | numenta_nupic-legacy/examples/opf/clients/nyctaxi/model_params.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
"inferenceArgs": {
"predictionSteps": [1],
"predictedField": "value",
"inputPredictedField": "auto"
},
"aggregationInfo": {
"seconds": 0,
"fields": [],
"months": 0,
"days": 0,
"years": 0,
"hours": 0,
"microseconds": 0,
"weeks": 0,
"minutes": 0,
"milliseconds": 0
},
"model": "HTMPrediction",
"version": 1,
"predictAheadTime": None,
"modelParams": {
"inferenceType": "TemporalAnomaly",
"sensorParams": {
"encoders": {
"timestamp_timeOfDay": {
"type": "DateEncoder",
"timeOfDay": [
21,
9.49
],
"fieldname": "timestamp",
"name": "timestamp"
},
"timestamp_dayOfWeek": None,
"timestamp_weekend": None,
"value": {
"name": "value",
"fieldname": "value",
"seed": 42,
"numBuckets":130,
"type": "RandomDistributedScalarEncoder"
}
},
"sensorAutoReset": None,
"verbosity": 0
},
"spEnable": True,
"spParams": {
"spatialImp": "cpp",
"potentialPct": 0.8,
"columnCount": 2048,
"globalInhibition": 1,
"inputWidth": 0,
"boostStrength": 0.0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"spVerbosity": 0,
"spatialImp": "cpp",
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005
},
"trainSPNetOnlyIfRequested": False,
"tmEnable": True,
"tmParams": {
"activationThreshold": 13,
"cellsPerColumn": 32,
"columnCount": 2048,
"globalDecay": 0.0,
"initialPerm": 0.21,
"inputWidth": 2048,
"maxAge": 0,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"newSynapseCount": 20,
"outputType": "normal",
"pamLength": 3,
"permanenceDec": 0.1,
"permanenceInc": 0.1,
"seed": 1960,
"temporalImp": "cpp",
"verbosity": 0
},
"clEnable": False,
"clParams": {
"alpha": 0.035828933612157998,
"regionName": "SDRClassifierRegion",
"steps": "1",
"verbosity": 0
},
"anomalyParams": {
"anomalyCacheRecords": None,
"autoDetectThreshold": None,
"autoDetectWaitRecords": 5030
}
}
}
| 3,359 | Python | .py | 119 | 22.378151 | 72 | 0.574251 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,774 | nyctaxi_anomaly.py | numenta_nupic-legacy/examples/opf/clients/nyctaxi/nyctaxi_anomaly.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple client to create a HTM anomaly detection model for nyctaxi dataset.
The script prints out all records that have an abnormally high anomaly
score.
"""
import csv
import datetime
import logging
from pkg_resources import resource_filename
from nupic.frameworks.opf.model_factory import ModelFactory
import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_DATA_FILE = resource_filename(
"nupic.datafiles", "extra/nyctaxi/nycTaxi.csv"
)
_OUTPUT_PATH = "anomaly_scores.csv"
_ANOMALY_THRESHOLD = 0.9
# minimum metric value of nycTaxi.csv
_INPUT_MIN = 8
# maximum metric value of nycTaxi.csv
_INPUT_MAX = 39197
def _setRandomEncoderResolution(minResolution=0.001):
"""
Given model params, figure out the correct resolution for the
RandomDistributed encoder. Modifies params in place.
"""
encoder = (
model_params.MODEL_PARAMS["modelParams"]["sensorParams"]["encoders"]["value"]
)
if encoder["type"] == "RandomDistributedScalarEncoder":
rangePadding = abs(_INPUT_MAX - _INPUT_MIN) * 0.2
minValue = _INPUT_MIN - rangePadding
maxValue = _INPUT_MAX + rangePadding
resolution = max(minResolution,
(maxValue - minValue) / encoder.pop("numBuckets")
)
encoder["resolution"] = resolution
def createModel():
_setRandomEncoderResolution()
return ModelFactory.create(model_params.MODEL_PARAMS)
def runNYCTaxiAnomaly():
model = createModel()
model.enableInference({'predictedField': 'value'})
with open (_INPUT_DATA_FILE) as fin:
reader = csv.reader(fin)
csvWriter = csv.writer(open(_OUTPUT_PATH,"wb"))
csvWriter.writerow(["timestamp", "value", "anomaly_score"])
headers = reader.next()
for i, record in enumerate(reader, start=1):
modelInput = dict(zip(headers, record))
modelInput["value"] = float(modelInput["value"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%Y-%m-%d %H:%M:%S")
result = model.run(modelInput)
anomalyScore = result.inferences['anomalyScore']
csvWriter.writerow([modelInput["timestamp"], modelInput["value"],
"%.3f" % anomalyScore])
if anomalyScore > _ANOMALY_THRESHOLD:
_LOGGER.info("Anomaly detected at [%s]. Anomaly score: %f.",
result.rawInput["timestamp"], anomalyScore)
print "Anomaly scores have been written to",_OUTPUT_PATH
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
runNYCTaxiAnomaly()
| 3,499 | Python | .py | 84 | 37.75 | 81 | 0.694052 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,775 | model_params.py | numenta_nupic-legacy/examples/opf/clients/cpu/model_params.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# CPU usage encoder.
'encoders': {
'cpu': {
'fieldname': u'cpu',
'n': 200,
'name': u'cpu',
'type': 'ScalarEncoder',
'minval': 0.0,
'maxval': 100.0,
'w': 21
}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '5',
},
'trainSPNetOnlyIfRequested': False,
},
}
| 8,343 | Python | .py | 181 | 34.690608 | 108 | 0.5832 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,776 | cpu.py | numenta_nupic-legacy/examples/opf/clients/cpu/cpu.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple client to read CPU usage and predict it in real time."""
from collections import deque
import time
import psutil
import matplotlib.pyplot as plt
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.model_factory import ModelFactory
import model_params
SECONDS_PER_STEP = 2
WINDOW = 60
# turn matplotlib interactive mode on (ion)
plt.ion()
fig = plt.figure()
# plot title, legend, etc
plt.title('CPU prediction example')
plt.xlabel('time [s]')
plt.ylabel('CPU usage [%]')
def runCPU():
"""Poll CPU usage, make predictions, and plot the results. Runs forever."""
# Create the model for predicting CPU usage.
model = ModelFactory.create(model_params.MODEL_PARAMS)
model.enableInference({'predictedField': 'cpu'})
# The shifter will align prediction and actual values.
shifter = InferenceShifter()
# Keep the last WINDOW predicted and actual values for plotting.
actHistory = deque([0.0] * WINDOW, maxlen=60)
predHistory = deque([0.0] * WINDOW, maxlen=60)
# Initialize the plot lines that we will update with each new record.
actline, = plt.plot(range(WINDOW), actHistory)
predline, = plt.plot(range(WINDOW), predHistory)
# Set the y-axis range.
actline.axes.set_ylim(0, 100)
predline.axes.set_ylim(0, 100)
while True:
s = time.time()
# Get the CPU usage.
cpu = psutil.cpu_percent()
# Run the input through the model and shift the resulting prediction.
modelInput = {'cpu': cpu}
result = shifter.shift(model.run(modelInput))
# Update the trailing predicted and actual value deques.
inference = result.inferences['multiStepBestPredictions'][5]
if inference is not None:
actHistory.append(result.rawInput['cpu'])
predHistory.append(inference)
# Redraw the chart with the new data.
actline.set_ydata(actHistory) # update the data
predline.set_ydata(predHistory) # update the data
plt.draw()
plt.legend( ('actual','predicted') )
# Make sure we wait a total of 2 seconds per iteration.
try:
plt.pause(SECONDS_PER_STEP)
except:
pass
if __name__ == "__main__":
runCPU()
| 3,122 | Python | .py | 77 | 37.701299 | 77 | 0.708815 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,777 | hotgym_anomaly.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/hotgym_anomaly.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple client to create a HTM anomaly detection model for hotgym.
The script prints out all records that have an abnormally high anomaly
score.
"""
import csv
import datetime
import logging
from pkg_resources import resource_filename
from nupic.frameworks.opf.model_factory import ModelFactory
import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_DATA_FILE = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "anomaly_scores.csv"
_ANOMALY_THRESHOLD = 0.9
def createModel():
return ModelFactory.create(model_params.MODEL_PARAMS)
def runHotgymAnomaly():
model = createModel()
model.enableInference({'predictedField': 'consumption'})
with open (_INPUT_DATA_FILE) as fin:
reader = csv.reader(fin)
csvWriter = csv.writer(open(_OUTPUT_PATH,"wb"))
csvWriter.writerow(["timestamp", "consumption", "anomaly_score"])
headers = reader.next()
reader.next()
reader.next()
for i, record in enumerate(reader, start=1):
modelInput = dict(zip(headers, record))
modelInput["consumption"] = float(modelInput["consumption"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%m/%d/%y %H:%M")
result = model.run(modelInput)
anomalyScore = result.inferences['anomalyScore']
csvWriter.writerow([modelInput["timestamp"], modelInput["consumption"],
anomalyScore])
if anomalyScore > _ANOMALY_THRESHOLD:
_LOGGER.info("Anomaly detected at [%s]. Anomaly score: %f.",
result.rawInput["timestamp"], anomalyScore)
print "Anomaly scores have been written to",_OUTPUT_PATH
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
runHotgymAnomaly()
| 2,757 | Python | .py | 65 | 38.861538 | 77 | 0.696415 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,778 | model_params.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/model_params.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 9.5),
'type': 'DateEncoder'
},
u'timestamp_dayOfWeek': None,
u'timestamp_weekend': None,
u'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21
},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of columns in the SP (must be same as in TM)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.8,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.0001,
'synPermInactiveDec': 0.0005,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 9,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 3,
},
# Don't create the classifier since we don't need predictions.
'clEnable': False,
'clParams': None,
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| 8,821 | Python | .py | 198 | 33.070707 | 108 | 0.567832 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,779 | remove_tuesdays.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/one_gym/remove_tuesdays.py | import csv
import shutil
import datetime
ORIGINAL = "rec-center-hourly.csv"
BACKUP = "rec-center-hourly-backup.csv"
DATE_FORMAT = "%m/%d/%y %H:%M"
def isTuesday(date):
return date.weekday() is 1
def withinOctober(date):
return datetime.datetime(2010, 10, 1) <= date < datetime.datetime(2010, 11, 1)
def run():
# Backup original
shutil.copyfile(ORIGINAL, BACKUP)
with open(ORIGINAL, 'rb') as inputFile:
reader = csv.reader(inputFile)
outputCache = ""
headers = reader.next()
types = reader.next()
flags = reader.next()
for row in [headers, types, flags]:
outputCache += ",".join(row) + "\n"
for row in reader:
dateString = row[0]
date = datetime.datetime.strptime(dateString, DATE_FORMAT)
consumption = float(row[1])
if isTuesday(date) and withinOctober(date):
consumption = 5.0
outputCache += "%s,%f\n" % (dateString, consumption)
with open(ORIGINAL, 'wb') as outputFile:
outputFile.write(outputCache)
if __name__ == "__main__":
run()
| 1,039 | Python | .py | 32 | 28.21875 | 80 | 0.673716 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,780 | run.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/one_gym/run.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.model_factory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| 4,917 | Python | .py | 128 | 35.390625 | 79 | 0.72073 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,781 | nupic_anomaly_output.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/one_gym/nupic_anomaly_output.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num, DateFormatter
except ImportError:
pass
WINDOW = 300
HIGHLIGHT_ALPHA = 0.3
ANOMALY_HIGHLIGHT_COLOR = 'red'
WEEKEND_HIGHLIGHT_COLOR = 'yellow'
ANOMALY_THRESHOLD = 0.9
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
@abstractmethod
def write(self, timestamp, value, predicted, anomalyScore):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCount = 0
headerRow = [
'timestamp', 'kw_energy_consumption', 'prediction',
'anomaly_score', 'anomaly_likelihood'
]
outputFileName = "%s_out.csv" % self.name
print "Preparing to output %s data to %s" % (self.name, outputFileName)
self.outputFile = open(outputFileName, "w")
self.outputWriter = csv.writer(self.outputFile)
self.outputWriter.writerow(headerRow)
def write(self, timestamp, value, predicted, anomalyScore):
if timestamp is not None:
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood]
self.outputWriter.writerow(outputRow)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name)
def extractWeekendHighlights(dates):
weekendsOut = []
weekendSearch = [5, 6]
weekendStart = None
for i, date in enumerate(dates):
if date.weekday() in weekendSearch:
if weekendStart is None:
# Mark start of weekend
weekendStart = i
else:
if weekendStart is not None:
# Mark end of weekend
weekendsOut.append((
weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
weekendStart = None
# Cap it off if we're still in the middle of a weekend
if weekendStart is not None:
weekendsOut.append((
weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return weekendsOut
def extractAnomalyIndices(anomalyLikelihood):
anomaliesOut = []
anomalyStart = None
for i, likelihood in enumerate(anomalyLikelihood):
if likelihood >= ANOMALY_THRESHOLD:
if anomalyStart is None:
# Mark start of anomaly
anomalyStart = i
else:
if anomalyStart is not None:
# Mark end of anomaly
anomaliesOut.append((
anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
anomalyStart = None
# Cap it off if we're still in the middle of an anomaly
if anomalyStart is not None:
anomaliesOut.append((
anomalyStart, len(anomalyLikelihood)-1,
ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return anomaliesOut
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.value = []
self.allValues = []
self.predicted = []
self.anomalyScore = []
self.anomalyLikelihood = []
self.actualLine = None
self.predictedLine = None
self.anomalyScoreLine = None
self.anomalyLikelihoodLine = None
self.linesInitialized = False
self._chartHighlights = []
fig = plt.figure(figsize=(16, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._mainGraph = fig.add_subplot(gs[0, 0])
plt.title(self.name)
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
self._anomalyGraph = fig.add_subplot(gs[1])
plt.ylabel('Percentage')
plt.xlabel('Date')
# Maximizes window
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.tight_layout()
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%m/%d %H:%M')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def highlightChart(self, highlights, chart):
for highlight in highlights:
# Each highlight contains [start-index, stop-index, color, alpha]
self._chartHighlights.append(chart.axvspan(
self.convertedDates[highlight[0]], self.convertedDates[highlight[1]],
color=highlight[2], alpha=highlight[3]
))
def write(self, timestamp, value, predicted, anomalyScore):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.allValues.append(value)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(self.allValues)
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| 9,450 | Python | .py | 245 | 33.763265 | 80 | 0.715225 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,782 | __init__.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/one_gym/model_params/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,783 | rec_center_hourly_model_params.py | numenta_nupic-legacy/examples/opf/clients/hotgym/anomaly/one_gym/model_params/rec_center_hourly_model_params.py | MODEL_PARAMS = \
{ 'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'model': 'HTMPrediction',
'modelParams': { 'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'clParams': { 'alpha': 0.01962508905154251,
'verbosity': 0,
'regionName': 'SDRClassifierRegion',
'steps': '1'},
'inferenceType': 'TemporalAnomaly',
'sensorParams': { 'encoders': { '_classifierInput': { 'classifierOnly': True,
'clipInput': True,
'fieldname': 'kw_energy_consumption',
'maxval': 53.0,
'minval': 0.0,
'n': 115,
'name': '_classifierInput',
'type': 'ScalarEncoder',
'w': 21},
u'kw_energy_consumption': { 'clipInput': True,
'fieldname': 'kw_energy_consumption',
'maxval': 53.0,
'minval': 0.0,
'n': 29,
'name': 'kw_energy_consumption',
'type': 'ScalarEncoder',
'w': 21},
u'timestamp_dayOfWeek': None,
u'timestamp_timeOfDay': { 'fieldname': 'timestamp',
'name': 'timestamp',
'timeOfDay': ( 21,
6.090344152692538),
'type': 'DateEncoder'},
u'timestamp_weekend': { 'fieldname': 'timestamp',
'name': 'timestamp',
'type': 'DateEncoder',
'weekend': ( 21,
1)}},
'sensorAutoReset': None,
'verbosity': 0},
'spEnable': True,
'spParams': { 'columnCount': 2048,
'globalInhibition': 1,
'inputWidth': 0,
'boostStrength': 2.0,
'numActiveColumnsPerInhArea': 40,
'potentialPct': 0.8,
'seed': 1956,
'spVerbosity': 0,
'spatialImp': 'cpp',
'synPermActiveInc': 0.05,
'synPermConnected': 0.1,
'synPermInactiveDec': 0.08568228006654939},
'tmEnable': True,
'tmParams': { 'activationThreshold': 12,
'cellsPerColumn': 32,
'columnCount': 2048,
'globalDecay': 0.0,
'initialPerm': 0.21,
'inputWidth': 2048,
'maxAge': 0,
'maxSegmentsPerCell': 128,
'maxSynapsesPerSegment': 32,
'minThreshold': 10,
'newSynapseCount': 20,
'outputType': 'normal',
'pamLength': 1,
'permanenceDec': 0.1,
'permanenceInc': 0.1,
'seed': 1960,
'temporalImp': 'cpp',
'verbosity': 0},
'trainSPNetOnlyIfRequested': False},
'predictAheadTime': None,
'version': 1}
| 5,565 | Python | .py | 85 | 23.094118 | 116 | 0.25292 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,784 | hotgym.py | numenta_nupic-legacy/examples/opf/clients/hotgym/simple/hotgym.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple client to create a CLA model for hotgym."""
import csv
import datetime
import logging
from pkg_resources import resource_filename
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
import model_params
_LOGGER = logging.getLogger(__name__)
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_METRIC_SPECS = (
MetricSpec(field='consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
MetricSpec(field='consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)
_NUM_RECORDS = 4000
def createModel():
return ModelFactory.create(model_params.MODEL_PARAMS)
def runHotgym():
model = createModel()
model.enableInference({'predictedField': 'consumption'})
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
with open (_INPUT_FILE_PATH) as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for i, record in enumerate(reader, start=1):
modelInput = dict(zip(headers, record))
modelInput["consumption"] = float(modelInput["consumption"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%m/%d/%y %H:%M")
result = model.run(modelInput)
result.metrics = metricsManager.update(result)
isLast = i == _NUM_RECORDS
if i % 100 == 0 or isLast:
_LOGGER.info("After %i records, 1-step altMAPE=%f", i,
result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='altMAPE':steps=1:window=1000:"
"field=consumption"])
if isLast:
break
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
runHotgym()
| 3,515 | Python | .py | 78 | 39.089744 | 79 | 0.660427 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,785 | model_params.py | numenta_nupic-legacy/examples/opf/clients/hotgym/simple/model_params.py |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Include the encoders we use
'encoders': {
u'consumption': {
'fieldname': u'consumption',
'resolution': 0.88,
'seed': 1,
'name': u'consumption',
'type': 'RandomDistributedScalarEncoder',
},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset': None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity': 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp': 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.85,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.04,
'synPermInactiveDec': 0.005,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of SP columns.
'boostStrength': 3.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Memory implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# The number of synapses added to a segment during learning
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial permanence for newly created synapses
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec': 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName': 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity': 0,
# This controls how fast the classifier learns/forgets. Higher
# values make it adapt faster and forget older patterns faster.
'alpha': 0.1,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
'implementation': 'cpp',
},
'trainSPNetOnlyIfRequested': False,
},
}
| 9,208 | Python | .py | 199 | 34.055276 | 108 | 0.573039 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,786 | run.py | numenta_nupic-legacy/examples/opf/clients/hotgym/prediction/one_gym/run.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
import nupic_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
"NOTE: You must run ./swarm.py before this, because model parameters\n"
"are required to run NuPIC.\n"
)
GYM_NAME = "rec-center-hourly" # or use "rec-center-every-15m-large"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
_METRIC_SPECS = (
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_output.NuPICPlotOutput([gymName])
else:
output = nupic_output.NuPICFileOutput([gymName])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
counter = 0
for row in csvReader:
counter += 1
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
result.metrics = metricsManager.update(result)
if counter % 100 == 0:
print "Read %i lines..." % counter
print ("After %i records, 1-step altMAPE=%f" % (counter,
result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='altMAPE':steps=1:window=1000:"
"field=kw_energy_consumption"]))
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
output.write([timestamp], [consumption], [prediction])
if plot and counter % 20 == 0:
output.refreshGUI()
inputFile.close()
output.close()
def runModel(gymName, plot=False):
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| 5,217 | Python | .py | 125 | 36.976 | 78 | 0.683577 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,787 | swarm_description.py | numenta_nupic-legacy/examples/opf/clients/hotgym/prediction/one_gym/swarm_description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
SWARM_DESCRIPTION = {
"includedFields": [
{
"fieldName": "timestamp",
"fieldType": "datetime"
},
{
"fieldName": "kw_energy_consumption",
"fieldType": "float",
"maxValue": 53.0,
"minValue": 0.0
}
],
"streamDef": {
"info": "kw_energy_consumption",
"version": 1,
"streams": [
{
"info": "Rec Center",
"source": "file://rec-center-hourly.csv",
"columns": [
"*"
]
}
]
},
"inferenceType": "TemporalMultiStep",
"inferenceArgs": {
"predictionSteps": [
1
],
"predictedField": "kw_energy_consumption"
},
"iterationCount": -1,
"swarmSize": "medium"
}
| 1,675 | Python | .py | 56 | 26.053571 | 72 | 0.606679 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,788 | cleanup.py | numenta_nupic-legacy/examples/opf/clients/hotgym/prediction/one_gym/cleanup.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Simple module used for cleaning up the file system after running the One Hot Gym
Prediction Tutorial.
"""
import os
import shutil
import re
DESCRIPTION = "Removes all generated files so you can start from scratch.\n"
def cleanDirectoryCruft(directory):
if os.path.exists(directory):
for f in os.listdir(directory):
if re.search(r"_out\.csv$", f)\
or re.search(r"\.pyc$", f):
print "Removing %s" % f
os.remove(os.path.join(directory, f))
def cleanUp(directory=None, workingDirs=None):
if directory is None:
directory = os.getcwd()
# Cleanup this dir.
cleanDirectoryCruft(directory)
# Cleanup model_params dir (for pyc files).
cleanDirectoryCruft("model_params")
# Cleanup working dirs.
if workingDirs is not None:
for doomed in workingDirs:
doomedPath = os.path.join(directory, doomed)
if os.path.exists(doomedPath):
print "Removing %s" % doomedPath
shutil.rmtree(doomedPath)
if __name__ == "__main__":
print DESCRIPTION
cleanUp(workingDirs=["swarm"])
| 2,028 | Python | .py | 52 | 36.134615 | 80 | 0.688358 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,789 | nupic_output.py | numenta_nupic-legacy/examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def refreshGUI(self):
"""Give plot a pause, so data is drawn and GUI's event loop can run.
"""
plt.pause(0.0001)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| 6,193 | Python | .py | 154 | 35.194805 | 79 | 0.697162 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,790 | swarm.py | numenta_nupic-legacy/examples/opf/clients/hotgym/prediction/one_gym/swarm.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together the code dealing with swarming.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import os
import pprint
# add logging to output errors to stdout
import logging
logging.basicConfig()
from nupic.swarming import permutations_runner
from swarm_description import SWARM_DESCRIPTION
INPUT_FILE = "rec-center-hourly.csv"
DESCRIPTION = (
"This script runs a swarm on the input data (rec-center-hourly.csv) and\n"
"creates a model parameters file in the `model_params` directory containing\n"
"the best model found by the swarm. Dumps a bunch of crud to stdout because\n"
"that is just what swarming does at this point. You really don't need to\n"
"pay any attention to it.\n"
)
def modelParamsToString(modelParams):
pp = pprint.PrettyPrinter(indent=2)
return pp.pformat(modelParams)
def writeModelParamsToFile(modelParams, name):
cleanName = name.replace(" ", "_").replace("-", "_")
paramsName = "%s_model_params.py" % cleanName
outDir = os.path.join(os.getcwd(), 'model_params')
if not os.path.isdir(outDir):
os.mkdir(outDir)
# Create an __init__.py so the params are recognized.
initPath = os.path.join(outDir, '__init__.py')
open(initPath, 'a').close()
outPath = os.path.join(os.getcwd(), 'model_params', paramsName)
with open(outPath, "wb") as outFile:
modelParamsString = modelParamsToString(modelParams)
outFile.write("MODEL_PARAMS = \\\n%s" % modelParamsString)
return outPath
def swarmForBestModelParams(swarmConfig, name, maxWorkers=4):
outputLabel = name
permWorkDir = os.path.abspath('swarm')
if not os.path.exists(permWorkDir):
os.mkdir(permWorkDir)
modelParams = permutations_runner.runWithConfig(
swarmConfig,
{"maxWorkers": maxWorkers, "overwrite": True},
outputLabel=outputLabel,
outDir=permWorkDir,
permWorkDir=permWorkDir,
verbosity=0
)
modelParamsFile = writeModelParamsToFile(modelParams, name)
return modelParamsFile
def printSwarmSizeWarning(size):
if size is "small":
print "= THIS IS A DEBUG SWARM. DON'T EXPECT YOUR MODEL RESULTS TO BE GOOD."
elif size is "medium":
print "= Medium swarm. Sit back and relax, this could take awhile."
else:
print "= LARGE SWARM! Might as well load up the Star Wars Trilogy."
def swarm(filePath):
name = os.path.splitext(os.path.basename(filePath))[0]
print "================================================="
print "= Swarming on %s data..." % name
printSwarmSizeWarning(SWARM_DESCRIPTION["swarmSize"])
print "================================================="
modelParams = swarmForBestModelParams(SWARM_DESCRIPTION, name)
print "\nWrote the following model param files:"
print "\t%s" % modelParams
if __name__ == "__main__":
print DESCRIPTION
swarm(INPUT_FILE)
| 3,776 | Python | .py | 90 | 39.3 | 80 | 0.703138 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,791 | data_generator.pyw | numenta_nupic-legacy/examples/opf/tools/data_generator.pyw | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
from Tkinter import *
import math
from nupic.data.generators import data_generator
from nupic.data.generators import distributions
class DataGeneratorApp():
def __init__(self, master, width=1200, height=500):
"""This class can be used to generate artificial datasets for the purpose of
testing, debugging and evaluation. Freehand drawing of data patterns is
supported using a gui in addition to predefined distributions. The gui also
facilitates selection of the sampling rate using a slider
"""
self.canvas = Canvas(master, bg= 'grey', width=width, height=height)
self.canvas.grid(row=0, column=0, columnspan=3, rowspan=1)
self.width, self.height = (width, height)
self.color='midnight blue'
self.master=master
self._defineButtons(master)
self.x, self.y= None, None #Initializing mouse position
self.draw=False
self.numLines, self.records=[], {}
self.pointer = None
self.dg=data_generator.DataGenerator()
self.dg.defineField('xPos', dict(dataType='int',minval=0,maxval=self.width,
forced=True))
self.dg.defineField('yPos', dict(dataType='int',minval=0,maxval=self.height,
forced=True))
#Drawing the vertical grid lines
for i in range(width/10, width, width/10):
self.canvas.create_line(i, 0, i, height, fill='alice blue')
#Drawing the x-axis
self.canvas.create_line(0, height*8.5/9, width, height*8.5/9, fill='alice blue')
def _drawFreeForm(self, event):
""" The last and current x,y cursor coordinates are updated. If in drawing
mode, records are created from the x and y coordinates.
"""
self.lastx, self.lasty = self.x, self.y
self.x, self.y=event.x, event.y
str = "mouse at x=%d y=%d" % (self.x, self.y)
self.master.title(str)
if self.pointer is not None:
self.canvas.delete(self.pointer)
drawPointer=True
for x in range(self.x-5, self.x+5):
if drawPointer:
if x in self.records:
self.pointer = self.canvas.create_oval(x-4, self.records[x][1]-4, x+4,\
self.records[x][1]+4, width=0, fill=self.color)
drawPointer=False
if drawPointer:
self.pointer = self.canvas.create_oval(self.x-4, self.height*8.5/9-4, self.x+4, \
self.height*8.5/9+4, width=0, fill=self.color)
def _createLine(self, fromX, fromY, toX, toY, width=2):
line = self.canvas.create_line(fromX, fromY, toX, toY, fill=self.color, width=width)
self.numLines.append(line)
return line
def motionCallback(self, event, freeForm=True):
""" Free form drawing is permitted whenever the mouse is moving."""
self._drawFreeForm(event)
def buttonReleaseCallback(self, event):
if (self.lastx, self.lasty)<>(None, None):
self._createLine(self.lastx, self.lasty, self.lastx, self.height*8.5/9)
self._createLine(self.x, self.lasty, self.x, self.y)
############################################################################
def mousePressedMotionCallback(self, event):
self.lastx, self.lasty = self.x, self.y
self.x, self.y=event.x, event.y
str = "mouse at x=%d y=%d" % (self.x, self.y)
self.master.title(str)
if (self.lastx, self.lasty)<>(None, None):
line = self._createLine(self.lastx, self.lasty, self.x, self.y)
self.dg.generateRecord([self.x, self.y])
self.records[self.x]=[line, self.y]
self._addToLog([self.x, self.y], 'Adding')
for i in range(self.lastx, self.x):
self.records[i]=['',(self.lasty+self.y)/2]
############################################################################
def mousePressCallback(self, event):
"""Callback for mouse press. The cursor y-position is marked with a vertical
line.
"""
self.lastx, self.lasty = self.x, self.y
self.x, self.y=event.x, event.y
if (self.lastx, self.lasty)<>(None, None):
self._createLine(self.lastx, self.lasty, self.lastx, self.height*8.5/9)
self._createLine(self.x, self.lasty, self.x, self.y)
def refreshCallback(self):
"""Callback for the refresh button. All the currently displayed lines except
the x-axis and y-axis are erased and all stored records are removed.
"""
for i in self.numLines:
self.canvas.delete(i)
self.records={}
self.log.insert('1.0', "Erasing the drawing board\n")
self.dg.removeAllRecords()
def sineWave(self):
"""Callback for drawing square waves"""
sine = distributions.SineWave(dict(amplitude=0.4, period=self.slider.get()))
records = sine.getData(1000)
records = [r+0.5 for r in records]
self.drawWaveform(records, factor=2)
def squareWave(self):
"""Callback for drawing square waves"""
records=[]
for i in range(0,500,10):
for i in range(0,15,10):
for i in range(24):
waveValue = self.square_function(i, 1)
records.append(waveValue)
for i in range(0,15,10):
for i in range(24):
waveValue = self.square_function(i, 1)
if waveValue >= 1:
waveValue = waveValue*2
records.append(waveValue)
records = [r/2.01 for r in records]
self.drawWaveform(records, factor=1)
def sinePlusNoise(self):
"""Callback for drawing noisy sine waves"""
records=[]
for i in range(15):
for i in range(1,360,5):
waveValue = self.sine_function(math.radians(i), 1)
secondWaveValue = self.sine_function(math.radians(i), 32)/4
finalValue = waveValue + secondWaveValue
records.append(finalValue)
records = [r+1.0 for r in records]
self.drawWaveform(records, factor=5)
def sawToothWave(self):
"""Callback for drawing sawtooth waves"""
records=[]
for i in range(15):
for i in range(1,360, int(self.slider.get())):
waveValue = self.sawtooth_function(math.radians(i), 1)
records.append(waveValue)
records = [r+1.0 for r in records]
self.drawWaveform(records, factor=5)
def sineCompositeWave(self):
"""Callback for drawing composite sine waves"""
records=[]
for i in range(500):
for i in range(1,360,10):
waveValue = self.sine_function(math.radians(i), 1)
secondWaveValue = self.sine_function(math.radians(i), 32) / 4
finalValue = waveValue + secondWaveValue
records.append(finalValue)
records = [r+1.0 for r in records]
self.drawWaveform(records,factor=2)
def triangleWave(self):
"""Callback for drawing triangle waves"""
records=[]
for i in range(15):
for i in range(1,360,int(self.slider.get())):
waveValue = self.triangle_function(math.radians(i), 1)
records.append(waveValue)
records = [r+1.0 for r in records]
self.drawWaveform(records,factor=6)
def adjustValues(self, records):
""" The data points that constitute a waveform in the range (0, 1) are
scaled to the height of the window
"""
for i in xrange(len(records)):
#records[i]=records[i]*(self.height*(8.4/9)*0.5)
records[i]=records[i]*(self.height*(8.4/9))
return records
def drawWaveform(self, records, factor=5):
"""Refresh and draw a waveform adjusted to the width of the screen and the
horizontal density of the waveform"""
self.refreshCallback()
records = self.adjustValues(records)
factor = self.slider.get()
for i in range(1,len(records)):
#print (i-1)*factor, records[i-1], i*factor, records[i]
line = self.canvas.create_line((i-1)*factor, records[i-1]+2, i*factor,\
records[i]+2, fill=self.color, width=2)
self.records[i*factor]=[line,records[i]]
self.numLines.append(line)
############################################################################
def _addToLog(self, record, operation):
"""Report creation of new record in the log window."""
self.log.insert('1.0', "%s record %s \n" %(operation, str(record)))
self.log.mark_set(INSERT, '0.0')
self.log.focus()
def _defineButtons(self, master, height=2):
"""Define the buttons and text box and position them"""
twoSine=Button(master, text="Sine Wave", fg="gray77", bg="chocolate1",\
command=self.sineWave)
noisySine=Button(master, text="Noisy Sine", fg="gray77", bg="chocolate1", command=self.sinePlusNoise)
save=Button(master, text="Save", fg="gray77", bg="chocolate1", command=self.saveFile)
refresh=Button(master, text="Clear", fg="gray77", bg="chocolate1", command=self.refreshCallback)
triangle=Button(master, text="Triangle", fg="gray77", bg="chocolate1", command=self.triangleWave)
sineComposite=Button(master, text="Sine Composite", fg="gray77", bg="chocolate1", command=self.sineCompositeWave)
sawTooth=Button(master, text="Saw Tooth", fg="gray77", bg="chocolate1", command=self.sawToothWave)
square=Button(master, text="Square Wave", fg="gray77", bg="chocolate1", command=self.squareWave)
self.slider=Scale(master, from_=1, to=12, orient=HORIZONTAL, resolution=0.1, bg='gray77', bd=4)
#Positioning buttons
refresh.grid(row=2, column=0, rowspan=1, sticky=E+W)
save.grid(row=3, column=0, rowspan=1, sticky=E+W)
noisySine.grid(row=4, column=0, rowspan=1, sticky=E+W)
sineComposite.grid(row=2, column=1, rowspan=1, sticky=E+W)
triangle.grid(row=3, column=1, rowspan=1, sticky=E+W)
sawTooth.grid(row=4, column=1, rowspan=1, sticky=E+W)
square.grid(row=5, column=0, rowspan=1, sticky=E+W)
twoSine.grid(row=5, column=1, rowspan=1, sticky=E+W)
self.slider.grid(row=6, column=0, columnspan=2, rowspan=1, sticky=E+W)
#Text box with scrollbar
frame = Frame(master, bd=1, relief=SUNKEN)
frame.grid(row=2, column=2, rowspan=6)
frame.grid_rowconfigure(0, pad=0, weight=1)
frame.grid_columnconfigure(0, pad=0,weight=1)
xscrollbar = Scrollbar(frame, orient=HORIZONTAL)
xscrollbar.grid(row=1, column=0, sticky=E+W)
yscrollbar = Scrollbar(frame)
yscrollbar.grid(row=0, column=1, sticky=N+S)
self.log=Text(frame, wrap=NONE, bd=0, xscrollcommand=xscrollbar.set, \
yscrollcommand=yscrollbar.set, bg="Black", fg="gray70", height=15, width=70)
self.log.grid(row=0, column=0, sticky=N+S, in_=frame)
xscrollbar.config(command=self.log.xview)
yscrollbar.config(command=self.log.yview)
############################################################################
def saveFile(self, path='output'):
"""Save the records to a file in numenta format."""
self.dg.saveRecords(path=path)
self.log.insert('1.0', "Saving %s records to file %s \n" \
%(str(len(self.records)), str(path+'.csv')))
#Note: The following function definitions will be ported to the distributions
#class in future versions
def sine_function(self,t, f):
return math.sin(t*f)
def triangle_function(self,t,f):
'''
T is our timestep
F changes the speed we get to an inversion point
'''
value = t * f
# Reduce our value range to 0 to 1
remainder = math.fmod(value, 1)
# Mulitply by 4 so we end up with both positive and negative values
q = remainder * 4
# Don't go over 1, invert if we do
if q > 1:
q = 2-q
# Don't go under -1, invert if we do
if q < -1:
rv = -2-q
else:
rv = q
return rv
def square_function(self,t,f):
if(f == 0): return 0
q = 0.5 - math.fmod(t*f,1)
return (0,1)[q > 0]
def sawtooth_function(self,t,f):
# Get our initial y value
value = t * f
# Make sure our values fall between .5 and 1
remainder = math.fmod(value + 0.5, 1)
# Make sure our values fall between 1 and 2
rv = remainder * 2.0
# Make sure our values fall between -1 and 1
rv = rv - 1.0
return rv
############################################################################
def callBacks(app):
app.canvas.bind("<Motion>", app.motionCallback)
app.canvas.bind("<ButtonPress-1>", app.mousePressCallback)
app.canvas.bind("<B1-Motion>", app.mousePressedMotionCallback)
app.canvas.bind("<ButtonRelease-1>", app.buttonReleaseCallback)
root = Tk()
app = DataGeneratorApp(root)
callBacks(app)
root.mainloop()
| 13,486 | Python | .py | 292 | 39.609589 | 117 | 0.645181 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,792 | testDiagnostics.py | numenta_nupic-legacy/examples/opf/tools/testDiagnostics.py | import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
| 1,606 | Python | .py | 34 | 42.852941 | 99 | 0.615979 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,793 | sp_plotter.py | numenta_nupic-legacy/examples/opf/tools/sp_plotter.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import matplotlib
import numpy as np
import os
import sys
import time
from nupic.bindings.math import GetNTAReal
from nupic.algorithms.spatial_pooler import SpatialPooler
matplotlib.use('Agg')
import matplotlib.pyplot as plt
realDType = GetNTAReal()
def generatePlot(outputs, origData):
""" Generates a table where each cell represent a frequency of pairs
as described below.
x coordinate is the % difference between input records (origData list),
y coordinate is the % difference between corresponding output records.
"""
PLOT_PRECISION = 100
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
outputSize = len(outputs)
for i in range(0,outputSize):
for j in range(i+1,outputSize):
in1 = outputs[i]
in2 = outputs[j]
dist = (abs(in1-in2) > 0.1)
intDist = int(dist.sum()/2+0.1)
orig1 = origData[i]
orig2 = origData[j]
origDist = (abs(orig1-orig2) > 0.1)
intOrigDist = int(origDist.sum()/2+0.1)
if intDist < 2 and intOrigDist > 10:
print 'Elements %d,%d has very small SP distance: %d' % (i, j, intDist)
print 'Input elements distance is %d' % intOrigDist
x = int(PLOT_PRECISION*intDist/40.0)
y = int(PLOT_PRECISION*intOrigDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
# Add some elements for the scale drawing
distribMatrix[4, 50] = 3
distribMatrix[4, 52] = 4
distribMatrix[4, 54] = 5
distribMatrix[4, 56] = 6
distribMatrix[4, 58] = 7
distribMatrix[4, 60] = 8
distribMatrix[4, 62] = 9
distribMatrix[4, 64] = 10
return distribMatrix
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
""" Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs
"""
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs
def appendInputWithSimilarValues(inputs):
""" Creates an 'one-off' record for each record in the inputs. Appends new
records to the same inputs list.
"""
numInputs = len(inputs)
for i in xrange(numInputs):
input = inputs[i]
for j in xrange(len(input)-1):
if input[j] == 1 and input[j+1] == 0:
newInput = copy.deepcopy(input)
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
break
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges)
def testSP():
""" Run a SP test
"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 2
wantPlot = True
poolPct = 0.5
itr = 1
doLearn = True
while numRecords < 3:
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
# Generate inputs using rand()
inputs = generateRandomInput(numRecords, elemSize, numSet)
if addNear:
# Append similar entries (distance of 1)
appendInputWithNSimilarValues(inputs, 42)
inputSize = len(inputs)
print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)
# Run a number of iterations, with learning on or off,
# retrieve results from the last iteration only
outputs = np.zeros((inputSize,2048))
numIter = 1
if doLearn:
numIter = itr
for iter in xrange(numIter):
for i in xrange(inputSize):
time.sleep(0.001)
if iter == numIter - 1:
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i])
#print outputs[i].sum(), outputs[i]
else:
# TODO: See https://github.com/numenta/nupic/issues/2072
output = np.zeros(2048)
sp.compute(inputs[i], learn=doLearn, activeArray=output)
# Build a plot from the generated input and output and display it
distribMatrix = generatePlot(outputs, inputs)
# If we don't want a plot, just continue
if wantPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
if doLearn:
title += ', leaning ON'
else:
title += ', learning OFF'
title += ', inputs = %d' % len(inputs)
title += ', iterations = %d' % numIter
title += ', poolPct =%f' % poolPct
plt.suptitle(title, fontsize=12)
plt.show()
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
#plt.clf()
numRecords += 1
return
def testSPNew():
""" New version of the test"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 1000
wantPlot = False
poolPct = 0.5
itr = 5
pattern = [60, 1000]
doLearn = True
start = 1
learnIter = 0
noLearnIter = 0
numLearns = 0
numTests = 0
numIter = 1
numGroups = 1000
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = generateRandomInput(numGroups, elemSize, numSet)
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.12,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
for i in xrange(numRecords):
input1 = getRandomWithMods(inputs, 4)
if i % 2 == 0:
input2 = getRandomWithMods(inputs, 4)
else:
input2 = input1.copy()
input2 = modifyBits(input2, 21)
inDist = (abs(input1-input2) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
#print intInDist
if start == 0:
doLearn = True
learnIter += 1
if learnIter == pattern[start]:
numLearns += 1
start = 1
noLearnIter = 0
elif start == 1:
doLearn = False
noLearnIter += 1
if noLearnIter == pattern[start]:
numTests += 1
start = 0
learnIter = 0
cleanPlot = True
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
time.sleep(0.001)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
if not doLearn and intOutDist < 2 and intInDist > 10:
"""
sp.spVerbosity = 10
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
sp.spVerbosity = 0
print 'Elements has very small SP distance: %d' % intOutDist
print output1.nonzero()
print output2.nonzero()
print sp._firingBoostFactors[output1.nonzero()[0]]
print sp._synPermBoostFactors[output1.nonzero()[0]]
print 'Input elements distance is %d' % intInDist
print input1.nonzero()
print input2.nonzero()
sys.stdin.readline()
"""
if not doLearn:
x = int(PLOT_PRECISION*intOutDist/40.0)
y = int(PLOT_PRECISION*intInDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
#print i
# If we don't want a plot, just continue
if wantPlot and cleanPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
#if doLearn:
# title += ', leaning ON'
#else:
# title += ', learning OFF'
title += ', learn sets = %d' % numLearns
title += ', test sets = %d' % numTests
title += ', iter = %d' % numIter
title += ', groups = %d' % numGroups
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.show()
plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
cleanPlot = False
def testSPFile():
""" Run test on the data file - the file has records previously encoded.
"""
spSize = 2048
spSet = 40
poolPct = 0.5
pattern = [50, 1000]
doLearn = True
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = []
#file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
#elemSize = 400
#numSet = 42
#file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
#elemSize = 499
#numSet = 7
outdir = '~/Desktop/ExperimentResults/Basil100x21'
inputFile = outdir+'.csv'
file = open(inputFile, 'rb')
elemSize = 100
numSet = 21
reader = csv.reader(file)
for row in reader:
input = np.array(map(float, row), dtype=realDType)
if len(input.nonzero()[0]) != numSet:
continue
inputs.append(input.copy())
file.close()
# Setup a SP
sp = SpatialPooler(
columnDimensions=(spSize, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=spSet,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.10,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
doLearn = False
print 'Finished reading file, inputs/outputs to process =', len(inputs)
size = len(inputs)
for iter in xrange(100):
print 'Iteration', iter
# Learn
if iter != 0:
for learnRecs in xrange(pattern[0]):
# TODO: See https://github.com/numenta/nupic/issues/2072
ind = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[ind], learn=True, activeArray=outputs[ind])
# Test
for _ in xrange(pattern[1]):
rand1 = np.random.random_integers(0, size-1, 1)[0]
rand2 = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[rand1], learn=False, activeArray=output1)
sp.compute(inputs[rand2], learn=False, activeArray=output2)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
if intInDist != numSet or intOutDist != spSet:
print rand1, rand2, '-', intInDist, intOutDist
x = int(PLOT_PRECISION*intOutDist/spSet)
y = int(PLOT_PRECISION*intInDist/numSet)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
if True:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))
title = 'SP distribution'
title += ', iter = %d' % iter
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
plt.savefig(os.path.join(outdir, '%s' % iter))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
if __name__ == '__main__':
np.random.seed(83)
#testSP()
#testSPNew()
testSPFile()
| 14,845 | Python | .py | 431 | 28.540603 | 90 | 0.644214 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,794 | mirrorImageViz.py | numenta_nupic-legacy/examples/opf/tools/MirrorImageViz/mirrorImageViz.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| 7,221 | Python | .py | 151 | 43.245033 | 232 | 0.679329 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,795 | make_datasets.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/make_datasets.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import os
import random
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('classification', 'string', ''),
('field1', 'string', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([str(seqIdx), str(x)])
outFile.close()
def _generateScalar(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, stepSize=0.1, includeRandom=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences of scalar values.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
stepSize: how far apart each scalar is
includeRandom: if true, include another random field
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('classification', 'float', ''),
('field1', 'float', '')]
if includeRandom:
fields += [('randomData', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
random.seed(42)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
if includeRandom:
outFile.appendRecord([seqIdx, x*stepSize, random.random()])
else:
outFile.appendRecord([seqIdx, x*stepSize])
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options] <datasetName>
Generate artifical datasets for testing classification """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the category field datasets
_generateCategory('category_0.csv', numSequences=2, elementsPerSeq=1,
numRepeats=20)
_generateCategory('category_1.csv', numSequences=50, elementsPerSeq=1,
numRepeats=20)
# Generate the scalar field datasets
_generateScalar('scalar_0.csv', numSequences=2, elementsPerSeq=1,
numRepeats=20, stepSize=0.1)
_generateScalar('scalar_1.csv', numSequences=50, elementsPerSeq=1,
numRepeats=20, stepSize=0.1, includeRandom=True)
| 5,721 | Python | .py | 133 | 37.541353 | 80 | 0.662882 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,796 | run_exp_generator.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/run_exp_generator.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run ExpGenerator to generate the description and permutations file for a
spatial classification experiment
"""
import os
import json
from optparse import OptionParser
from nupic.swarming.exp_generator.experiment_generator import expGenerator
if __name__ == '__main__':
helpString = \
"""%prog [options] searchDef
This script is used to create the description.py and permutations.py files
for an experiment. The searchDef argument should be the name of a python
script with a getSearch() method which returns the search definition as a
dict. The schema for this dict can be found at
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
"""
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--outDir", dest='outDir', default=None,
help="Where to place generated files. Default is in the same directory"
" as the searchDef script.")
(options, args) = parser.parse_args()
# Must provide the name of a script
if len(args) != 1:
parser.error("Missing required 'searchDef' argument")
searchFileName = args[0]
# ------------------------------------------------------------------------
# Read in the search script and get the search definition
searchFile = open(searchFileName)
vars = {}
exec(searchFile, vars)
searchFile.close()
getSearchFunc = vars.get('getSearch', None)
if getSearchFunc is None:
raise RuntimeError("Error: the %s python script does not provide the "
"required getSearch() method")
searchDef = getSearchFunc(os.path.dirname(__file__))
if not isinstance(searchDef, dict):
raise RuntimeError("The searchDef function should return a dict, but it "
"returned %s" % (str(searchDef)))
# ------------------------------------------------------------------------
# Figure out the output directory if not provided
if options.outDir is None:
options.outDir = os.path.dirname(searchFileName)
# ------------------------------------------------------------------------
# Run through expGenerator
expGenArgs = ['--description=%s' % (json.dumps(searchDef)),
'--version=v2',
'--outDir=%s' % (options.outDir)]
print "Running ExpGenerator with the following arguments: ", expGenArgs
expGenerator(expGenArgs)
# Get the permutations file name
permutationsFilename = os.path.join(options.outDir, 'permutations.py')
print "Successfully generated permutations file: %s" % (permutationsFilename)
| 3,744 | Python | .py | 78 | 44.025641 | 80 | 0.647464 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,797 | searchDef.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/auto_generated/searchDef.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
def getSearch(rootDir):
""" This method returns search description. See the following file for the
schema of the dictionary this method returns:
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
The streamDef element defines the stream for this model. The schema for this
element can be found at:
py/nupicengine/cluster/database/StreamDef.json
"""
# Form the stream definition
dataPath = os.path.abspath(os.path.join(rootDir, 'datasets', 'scalar_1.csv'))
streamDef = dict(
version = 1,
info = "testSpatialClassification",
streams = [
dict(source="file://%s" % (dataPath),
info="scalar_1.csv",
columns=["*"],
),
],
)
# Generate the experiment description
expDesc = {
"environment": 'nupic',
"inferenceArgs":{
"predictedField":"classification",
"predictionSteps": [0],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "float",
},
{ "fieldName": "classification",
"fieldType": "string",
},
{ "fieldName": "randomData",
"fieldType": "float",
},
],
"iterationCount": -1,
}
return expDesc
| 2,277 | Python | .py | 64 | 31.046875 | 79 | 0.647301 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,798 | description.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/category_1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_1.csv'),
'errorMetric': 'avg_err',
'modelParams': {
'sensorParams': { 'verbosity': 0},
'clParams': {
'verbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 1,550 | Python | .py | 38 | 37.894737 | 78 | 0.658925 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,799 | description.py | numenta_nupic-legacy/examples/opf/experiments/spatial_classification/scalar_1/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_1.csv'),
'errorMetric': 'aae',
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'field1': {
'clipInput': True,
'fieldname': u'field1',
'maxval': 5.0,
'minval': 0.0,
'n': 600,
'name': u'field1',
'type': 'ScalarEncoder',
'w': 21
},
'classification': {
'classifierOnly': True,
'clipInput': True,
'fieldname': u'classification',
'maxval': 50.0,
'minval': 0.0,
'n': 600,
'name': u'classification',
'type': 'ScalarEncoder',
'w': 21
},
},
},
'clParams': {
'verbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| 2,138 | Python | .py | 63 | 28.206349 | 78 | 0.585024 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |