Search is not available for this dataset
text
stringlengths
75
104k
def calc_humidity(temp, dewpoint): ''' calculates the humidity via the formula from weatherwise.org return the relative humidity ''' t = fahrenheit_to_celsius(temp) td = fahrenheit_to_celsius(dewpoint) num = 112 - (0.1 * t) + td denom = 112 + (0.9 * t) rh = math.pow((num / denom), 8) return rh
def calc_dewpoint(temp, hum): ''' calculates the dewpoint via the formula from weatherwise.org return the dewpoint in degrees F. ''' c = fahrenheit_to_celsius(temp) x = 1 - 0.01 * hum; dewpoint = (14.55 + 0.114 * c) * x; dewpoint = dewpoint + ((2.5 + 0.007 * c) * x) ** 3; dewpoint = dewpoint + (15.9 + 0.117 * c) * x ** 14; dewpoint = c - dewpoint; return celsius_to_fahrenheit(dewpoint)
def publish(self): ''' Perform HTTP session to transmit defined weather values. ''' return self._publish( self.args, self.server, self.URI)
def get(data): ''' return CRC calc value from raw serial data ''' crc = 0 for byte in array('B', data): crc = (VProCRC.CRC_TABLE[(crc >> 8) ^ byte] ^ ((crc & 0xFF) << 8)) return crc
def verify(data): ''' perform CRC check on raw serial data, return true if valid. a valid CRC == 0. ''' if len(data) == 0: return False crc = VProCRC.get(data) if crc: log.info("CRC Bad") else: log.debug("CRC OK") return not crc
def _unpack_storm_date(date): ''' given a packed storm date field, unpack and return 'YYYY-MM-DD' string. ''' year = (date & 0x7f) + 2000 # 7 bits day = (date >> 7) & 0x01f # 5 bits month = (date >> 12) & 0x0f # 4 bits return "%s-%s-%s" % (year, month, day)
def _use_rev_b_archive(self, records, offset): ''' return True if weather station returns Rev.B archives ''' # if pre-determined, return result if type(self._ARCHIVE_REV_B) is bool: return self._ARCHIVE_REV_B # assume, B and check 'RecType' field data = ArchiveBStruct.unpack_from(records, offset) if data['RecType'] == 0: log.info('detected archive rev. B') self._ARCHIVE_REV_B = True else: log.info('detected archive rev. A') self._ARCHIVE_REV_B = False return self._ARCHIVE_REV_B
def _wakeup(self): ''' issue wakeup command to device to take out of standby mode. ''' log.info("send: WAKEUP") for i in xrange(3): self.port.write('\n') # wakeup device ack = self.port.read(len(self.WAKE_ACK)) # read wakeup string log_raw('read', ack) if ack == self.WAKE_ACK: return raise NoDeviceException('Can not access weather station')
def _cmd(self, cmd, *args, **kw): ''' write a single command, with variable number of arguments. after the command, the device must return ACK ''' ok = kw.setdefault('ok', False) self._wakeup() if args: cmd = "%s %s" % (cmd, ' '.join(str(a) for a in args)) for i in xrange(3): log.info("send: " + cmd) self.port.write(cmd + '\n') if ok: ack = self.port.read(len(self.OK)) # read OK log_raw('read', ack) if ack == self.OK: return else: ack = self.port.read(len(self.ACK)) # read ACK log_raw('read', ack) if ack == self.ACK: return raise NoDeviceException('Can not access weather station')
def _loop_cmd(self): ''' reads a raw string containing data read from the device provided (in /dev/XXX) format. all reads are non-blocking. ''' self._cmd('LOOP', 1) raw = self.port.read(LoopStruct.size) # read data log_raw('read', raw) return raw
def _dmpaft_cmd(self, time_fields): ''' issue a command to read the archive records after a known time stamp. ''' records = [] # convert time stamp fields to buffer tbuf = struct.pack('2H', *time_fields) # 1. send 'DMPAFT' cmd self._cmd('DMPAFT') # 2. send time stamp + crc crc = VProCRC.get(tbuf) crc = struct.pack('>H', crc) # crc in big-endian format log_raw('send', tbuf + crc) self.port.write(tbuf + crc) # send time stamp + crc ack = self.port.read(len(self.ACK)) # read ACK log_raw('read', ack) if ack != self.ACK: return # if bad ack, return # 3. read pre-amble data raw = self.port.read(DmpStruct.size) log_raw('read', raw) if not VProCRC.verify(raw): # check CRC value log_raw('send ESC', self.ESC) self.port.write(self.ESC) # if bad, escape and abort return log_raw('send ACK', self.ACK) self.port.write(self.ACK) # send ACK # 4. loop through all page records dmp = DmpStruct.unpack(raw) log.info('reading %d pages, start offset %d' % (dmp['Pages'], dmp['Offset'])) for i in xrange(dmp['Pages']): # 5. read page data raw = self.port.read(DmpPageStruct.size) log_raw('read', raw) if not VProCRC.verify(raw): # check CRC value log_raw('send ESC', self.ESC) self.port.write(self.ESC) # if bad, escape and abort return log_raw('send ACK', self.ACK) self.port.write(self.ACK) # send ACK # 6. loop through archive records page = DmpPageStruct.unpack(raw) offset = 0 # assume offset at 0 if i == 0: offset = dmp['Offset'] * ArchiveAStruct.size while offset < ArchiveAStruct.size * 5: log.info('page %d, reading record at offset %d' % (page['Index'], offset)) if self._use_rev_b_archive(page['Records'], offset): a = ArchiveBStruct.unpack_from(page['Records'], offset) else: a = ArchiveAStruct.unpack_from(page['Records'], offset) # 7. verify that record has valid data, and store if a['DateStamp'] != 0xffff and a['TimeStamp'] != 0xffff: records.append(a) offset += ArchiveAStruct.size log.info('read all pages') return records
def _get_new_archive_fields(self): ''' returns a dictionary of fields from the newest archive record in the device. return None when no records are new. ''' for i in xrange(3): records = self._dmpaft_cmd(self._archive_time) if records is not None: break time.sleep(1) if records is None: raise NoDeviceException('Can not access weather station') # find the newest record new_rec = None for r in records: new_time = (r['DateStamp'], r['TimeStamp']) if self._archive_time < new_time: self._archive_time = new_time new_rec = r return new_rec
def _calc_derived_fields(self, fields): ''' calculates the derived fields (those fields that are calculated) ''' # convenience variables for the calculations below temp = fields['TempOut'] hum = fields['HumOut'] wind = fields['WindSpeed'] wind10min = fields['WindSpeed10Min'] fields['HeatIndex'] = calc_heat_index(temp, hum) fields['WindChill'] = calc_wind_chill(temp, wind, wind10min) fields['DewPoint'] = calc_dewpoint(temp, hum) # store current data string now = time.localtime() fields['DateStamp'] = time.strftime("%Y-%m-%d %H:%M:%S", now) fields['Year'] = now[0] fields['Month'] = str(now[1]).zfill(2) now = time.gmtime() fields['DateStampUtc'] = time.strftime("%Y-%m-%d %H:%M:%S", now) fields['YearUtc'] = now[0] fields['MonthUtc'] = str(now[1]).zfill(2)
def parse(self): ''' read and parse a set of data read from the console. after the data is parsed it is available in the fields variable. ''' fields = self._get_loop_fields() fields['Archive'] = self._get_new_archive_fields() self._calc_derived_fields(fields) # set the fields variable the the values in the dict self.fields = fields
def unpack_from(self, buf, offset=0 ): ''' unpacks data from 'buf' and returns a dication of named fields. the fields can be post-processed by extending the _post_unpack() method. ''' data = super(Struct,self).unpack_from( buf, offset) items = dict(zip(self.fields,data)) return self._post_unpack(items)
def weather_update(station, pub_sites, interval): ''' main execution loop. query weather data and post to online service. ''' station.parse() # read weather data # santity check weather data if station.fields['TempOut'] > 200: raise NoSensorException( 'Out of range temperature value: %.1f, check sensors' % (station.fields['TempOut'],)) gust, gust_dir = WindGust.get( station, interval ) # upload data in the following order: for ps in pub_sites: try: # try block necessary to attempt every publisher ps.set( pressure = station.fields['Pressure'], dewpoint = station.fields['DewPoint'], humidity = station.fields['HumOut'], tempf = station.fields['TempOut'], rainin = station.fields['RainRate'], rainday = station.fields['RainDay'], dateutc = station.fields['DateStampUtc'], windspeed = station.fields['WindSpeed10Min'], winddir = station.fields['WindDir'], windgust = gust, windgustdir = gust_dir, ) ps.publish() except (Exception) as e: log.warn('publisher %s: %s'%(ps.__class__.__name__,e))
def init_log( quiet, debug ): ''' setup system logging to desired verbosity. ''' from logging.handlers import SysLogHandler fmt = logging.Formatter( os.path.basename(sys.argv[0]) + ".%(name)s %(levelname)s - %(message)s") facility = SysLogHandler.LOG_DAEMON syslog = SysLogHandler(address='/dev/log',facility=facility) syslog.setFormatter( fmt ) log.addHandler(syslog) if not quiet: console = logging.StreamHandler() console.setFormatter( fmt ) log.addHandler(console) log.setLevel(logging.INFO) if debug: log.setLevel(logging.DEBUG)
def get_pub_services(opts): ''' use values in opts data to generate instances of publication services. ''' sites = [] for p_key in vars(opts).keys(): args = getattr(opts,p_key) if p_key in PUB_SERVICES and args: if isinstance(args,tuple): ps = PUB_SERVICES[p_key](*args) else: ps = PUB_SERVICES[p_key](args) sites.append( ps ) return sites
def get_options(parser): ''' read command line options to configure program behavior. ''' # station services # publication services pub_g = optparse.OptionGroup( parser, "Publication Services", '''One or more publication service must be specified to enable upload of weather data.''', ) pub_g.add_option('-w', '--wundergound', nargs=2, type='string', dest='wug', help='Weather Underground service; WUG=[SID(station ID), PASSWORD]') pub_g.add_option('-p', '--pws', nargs=2, type='string', dest='pws', help='PWS service; PWS=[SID(station ID), PASSWORD]') pub_g.add_option('-f', '--file', nargs=1, type='string', dest='file', help='Local file; FILE=[FILE_NAME]') parser.add_option_group(pub_g) parser.add_option('-d', '--debug', dest='debug', action="store_true", default=False, help='enable verbose debug logging') parser.add_option('-q', '--quiet', dest='quiet', action="store_true", default=False, help='disable all console logging') parser.add_option('-t', '--tty', dest='tty', default='/dev/ttyS0', help='set serial port device [/dev/ttyS0]') parser.add_option('-n', '--interval', dest='interval', default=60, type='int', help='polling/update interval in seconds [60]') return parser.parse_args()
def get( self, station, interval ): ''' return gust data, if above threshold value and current time is inside reporting window period ''' rec = station.fields['Archive'] # process new data if rec: threshold = station.fields['WindSpeed10Min'] + GUST_MPH_MIN if rec['WindHi'] >= threshold: self.value = (rec['WindHi'],rec['WindHiDir']) self.count = GUST_TTL * 60 / interval else: self.value = self.NO_VALUE # return gust value, if remaining time is left, and valid if self.count: self.count -= 1 else: self.value = self.NO_VALUE log.debug('wind gust of {0} mph from {1}'.format(*self.value)) return self.value
def set( self, pressure='NA', dewpoint='NA', humidity='NA', tempf='NA', rainin='NA', rainday='NA', dateutc='NA', windgust='NA', windgustdir='NA', windspeed='NA', winddir='NA', clouds='NA', weather='NA', *args, **kw): ''' Useful for defining weather data published to the server. Parameters not set will be reset and not sent to server. Unknown keyword args will be silently ignored, so be careful. This is necessary for publishers that support more fields than others. ''' # see: http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol # unused, but valid, parameters are: # windspdmph_avg2m, winddir_avg2m, windgustmph_10m, windgusdir_10m # soiltempf, soilmoisture, leafwetness, solarradiation, UV # indoortempf, indoorhumidity self.args.update( { 'baromin':pressure, 'clouds':clouds, 'dailyrainin':rainday, 'dateutc':dateutc, 'dewptf':dewpoint, 'humidity':humidity, 'rainin':rainin, 'tempf':tempf, 'weather':weather, 'winddir':winddir, 'windgustdir':windgustdir, 'windgustmph':windgust, 'windspeedmph':windspeed, } ) log.debug( self.args )
def set( self, **kw): ''' Store keyword args to be written to output file. ''' self.args = kw log.debug( self.args )
def publish(self): ''' Write output file. ''' with open( self.file_name, 'w') as fh: for k,v in self.args.iteritems(): buf = StringIO.StringIO() buf.write(k) self._append_vals(buf,v) fh.write(buf.getvalue() + '\n') buf.close()
def requires(*requirements, **opts): """ Standalone decorator to apply requirements to routes, either function handlers or class based views:: @requires(MyRequirement()) def a_view(): pass class AView(View): decorators = [requires(MyRequirement())] :param requirements: The requirements to apply to this route :param throws: Optional. Exception or exception instance to throw if authorization fails. :param on_fail: Optional. Value or function to use when authorization fails. :param identity: Optional. An identity to use in place of the currently loaded identity. """ identity = opts.get("identity") on_fail = opts.get("on_fail") throws = opts.get("throws") def decorator(f): @wraps(f) def allower(*args, **kwargs): result = allows.run( requirements, identity=identity, on_fail=on_fail, throws=throws, f_args=args, f_kwargs=kwargs, ) # authorization failed if result is not None: return result return f(*args, **kwargs) return allower return decorator
def guard_entire(requirements, identity=None, throws=None, on_fail=None): """ Used to protect an entire blueprint with a set of requirements. If a route handler inside the blueprint should be exempt, then it may be decorated with the :func:`~flask_allows.views.exempt_from_requirements` decorator. This function should be registered as a before_request handler on the blueprint and provided with the requirements to guard the blueprint with:: my_bp = Blueprint(__name__, 'namespace') my_bp.before_request(guard_entire(MustBeLoggedIn())) ``identity``, ``on_fail`` and ``throws`` may also be provided but are optional. If on_fails returns a non-None result, that will be considered the return value of the routing:: from flask import flash, redirect def flash_and_redirect(message, level, endpoint): def _(*a, **k): flash(message, level) return redirect(endpoint) return _ bp = Blueprint(__name__, 'namespace') bp.before_request( guard_entire( [MustBeLoggedIn()], on_fail=flash_and_redirect( "Please login in first", "warning", "login" ) ) ) ``on_fail`` will also receive anything found in ``flask.request.view_args`` as keyword arguments. If needed, this guard may be applied multiple times. This may be useful if different conditions should result in different `on_fail` mechanisms being invoked:: bp = Blueprint(__name__, "admin_panel") bp.before_request( guard_entire( [MustBeLoggedIn()], on_fail=flash_and_redirect( "Please login in first", "warning", "login" ) ) ) bp.before_request( guard_entire( [MustBeAdmin()], on_fail=flash_and_redirect( "You are not an admin.", "danger", "index" ) ) ) :param requirements: An iterable of requirements to apply to every request routed to the blueprint. :param identity: Optional. The identity that should be used for fulfilling requirements on the blueprint level. :param throws: Optional. Exception or exception type to be thrown if authorization fails. :param on_fail: Optional. Value or function to use if authorization fails. .. versionadded: 0.7.0 """ def guarder(): if _should_run_requirements(): return allows.run( requirements, identity=identity, on_fail=on_fail, throws=throws, f_kwargs=request.view_args, ) return None return guarder
def wants_request(f): """ Helper decorator for transitioning to user-only requirements, this aids in situations where the request may be marked optional and causes an incorrect flow into user-only requirements. This decorator causes the requirement to look like a user-only requirement but passes the current request context internally to the requirement. This decorator is intended only to assist during a transitionary phase and will be removed in flask-allows 1.0 See: :issue:`20,27` """ @wraps(f) def wrapper(user): return f(user, request) return wrapper
def And(cls, *requirements): """ Short cut helper to construct a combinator that uses :meth:`operator.and_` to reduce requirement results and stops evaluating on the first False. This is also exported at the module level as ``And`` """ return cls(*requirements, op=operator.and_, until=False)
def Or(cls, *requirements): """ Short cut helper to construct a combinator that uses :meth:`operator.or_` to reduce requirement results and stops evaluating on the first True. This is also exported at the module level as ``Or`` """ return cls(*requirements, op=operator.or_, until=True)
def init_app(self, app): """ Initializes the Flask-Allows object against the provided application """ if not hasattr(app, "extensions"): # pragma: no cover app.extensions = {} app.extensions["allows"] = self @app.before_request def start_context(*a, **k): self.overrides.push(Override()) self.additional.push(Additional()) @app.after_request def cleanup(response): self.clear_all_overrides() self.clear_all_additional() return response
def fulfill(self, requirements, identity=None): """ Checks that the provided or current identity meets each requirement passed to this method. This method takes into account both additional and overridden requirements, with overridden requirements taking precedence:: allows.additional.push(Additional(Has('foo'))) allows.overrides.push(Override(Has('foo'))) allows.fulfill([], user_without_foo) # return True :param requirements: The requirements to check the identity against. :param identity: Optional. Identity to use in place of the current identity. """ identity = identity or self._identity_loader() if self.additional.current: all_requirements = chain(iter(self.additional.current), requirements) else: all_requirements = iter(requirements) if self.overrides.current is not None: all_requirements = ( r for r in all_requirements if r not in self.overrides.current ) return all(_call_requirement(r, identity, request) for r in all_requirements)
def run( self, requirements, identity=None, throws=None, on_fail=None, f_args=(), f_kwargs=ImmutableDict(), # noqa: B008 use_on_fail_return=True, ): """ Used to preform a full run of the requirements and the options given, this method will invoke on_fail and/or throw the appropriate exception type. Can be passed arguments to call on_fail with via f_args (which are passed positionally) and f_kwargs (which are passed as keyword). :param requirements: The requirements to check :param identity: Optional. A specific identity to use for the check :param throws: Optional. A specific exception to throw for this check :param on_fail: Optional. A callback to invoke after failure, alternatively a value to return when failure happens :param f_args: Positional arguments to pass to the on_fail callback :param f_kwargs: Keyword arguments to pass to the on_fail callback :param use_on_fail_return: Boolean (default True) flag to determine if the return value should be used. If true, the return value will be considered, else failure will always progress to exception raising. """ throws = throws or self.throws on_fail = _make_callable(on_fail) if on_fail is not None else self.on_fail if not self.fulfill(requirements, identity): result = on_fail(*f_args, **f_kwargs) if use_on_fail_return and result is not None: return result raise throws
def push(self, override, use_parent=False): """ Binds an override to the current context, optionally use the current overrides in conjunction with this override If ``use_parent`` is true, a new override is created from the parent and child overrides rather than manipulating either directly. """ current = self.current if use_parent and current: override = current + override _override_ctx_stack.push((self, override))
def pop(self): """ Pops the latest override context. If the override context was pushed by a different override manager, a ``RuntimeError`` is raised. """ rv = _override_ctx_stack.pop() if rv is None or rv[0] is not self: raise RuntimeError( "popped wrong override context ({} instead of {})".format(rv, self) )
def override(self, override, use_parent=False): """ Allows temporarily pushing an override context, yields the new context into the following block. """ self.push(override, use_parent) yield self.current self.pop()
def push(self, additional, use_parent=False): """ Binds an additional to the current context, optionally use the current additionals in conjunction with this additional If ``use_parent`` is true, a new additional is created from the parent and child additionals rather than manipulating either directly. """ current = self.current if use_parent and current: additional = current + additional _additional_ctx_stack.push((self, additional))
def pop(self): """ Pops the latest additional context. If the additional context was pushed by a different additional manager, a ``RuntimeError`` is raised. """ rv = _additional_ctx_stack.pop() if rv is None or rv[0] is not self: raise RuntimeError( "popped wrong additional context ({} instead of {})".format(rv, self) )
def additional(self, additional, use_parent=False): """ Allows temporarily pushing an additional context, yields the new context into the following block. """ self.push(additional, use_parent) yield self.current self.pop()
def unduplicate_field_names(field_names): """Append a number to duplicate field names to make them unique. """ res = [] for k in field_names: if k in res: i = 1 while k + '_' + str(i) in res: i += 1 k += '_' + str(i) res.append(k) return res
def interpret_stats(results): """Generates the string to be shown as updates after the execution of a Cypher query :param results: ``ResultSet`` with the raw results of the execution of the Cypher query """ stats = results.stats contains_updates = stats.pop("contains_updates", False) if stats else False if not contains_updates: result = '{} rows affected.'.format(len(results)) else: result = '' for stat, value in stats.items(): if value: result = "{}\n{} {}.".format(result, value, stat.replace("_", " ")) return result.strip()
def extract_params_from_query(query, user_ns): """Generates a dictionary with safe keys and values to pass onto Neo4j :param query: string with the Cypher query to execute :param user_ns: dictionary with the IPython user space """ # TODO: Optmize this function params = {} for k, v in user_ns.items(): try: json.dumps(v) params[k] = v except: pass return params
def run(query, params=None, config=None, conn=None, **kwargs): """Executes a query and depending on the options of the extensions will return raw data, a ``ResultSet``, a Pandas ``DataFrame`` or a NetworkX graph. :param query: string with the Cypher query :param params: dictionary with parameters for the query (default=``None``) :param config: Configurable or NamedTuple with extra IPython configuration details. If ``None``, a new object will be created (defaults=``None``) :param conn: connection dictionary or string for the Neo4j backend. If ``None``, a new connection will be created (default=``None``) :param **kwargs: Any of the cell configuration options. """ if params is None: params = {} if conn is None: conn = Connection.get(DEFAULT_CONFIGURABLE["uri"]) elif isinstance(conn, string_types): conn = Connection.get(conn) if config is None: default_config = DEFAULT_CONFIGURABLE.copy() kwargs.update(default_config) config = DefaultConfigurable(**kwargs) if query.strip(): # TODO: Handle multiple queries params = extract_params_from_query(query, params) result = conn.session.query(query, params, data_contents=config.data_contents) if config.feedback: print(interpret_stats(result)) resultset = ResultSet(result, query, config) if config.auto_pandas: return resultset.get_dataframe() elif config.auto_networkx: graph = resultset.get_graph() resultset.draw() return graph else: return resultset # returning only last result, intentionally else: return 'Connected: %s' % conn.name
def get_dataframe(self): """Returns a Pandas DataFrame instance built from the result set.""" if pd is None: raise ImportError("Try installing Pandas first.") frame = pd.DataFrame(self[:], columns=(self and self.keys) or []) return frame
def get_graph(self, directed=True): """Returns a NetworkX multi-graph instance built from the result set :param directed: boolean, optional (default=`True`). Whether to create a direted or an undirected graph. """ if nx is None: raise ImportError("Try installing NetworkX first.") if directed: graph = nx.MultiDiGraph() else: graph = nx.MultiGraph() for item in self._results.graph: for node in item['nodes']: properties = copy.deepcopy(node['properties']) properties['labels'] = node['labels'] graph.add_node(node['id'], **properties) for rel in item['relationships']: properties = copy.deepcopy(rel['properties']) properties.update( id=rel['id'], type=rel['type'] ) graph.add_edge(rel['startNode'], rel['endNode'], key=rel.get('type'), **properties) return graph
def draw(self, directed=True, layout="spring", node_label_attr=None, show_node_labels=True, edge_label_attr=None, show_edge_labels=True, node_size=1600, node_color='blue', node_alpha=0.3, node_text_size=12, edge_color='blue', edge_alpha=0.3, edge_tickness=1, edge_text_pos=0.3, text_font='sans-serif', ax=None): """Plot of a NetworkX multi-graph instance :param directed: boolean, optional (default=`True`). Whether to return a directed graph or not. :param layout: string, optional (default=`"spring"`). Layout to apply. Any of the possible NetworkX layouts will work: ``'circular_layout'``, ``'random_layout'``, ``'shell_layout'``, ``'spring_layout'``, ``'spectral_layout'``, or ``'fruchterman_reingold_layout'``. :param node_label_attr: string, optional (default=`None`). Attribute of the nodes that has to be used as the label. :param show_node_labels: boolean, optional (default=`True`). Whether to show or not the labels of the nodes. :param edge_label_attr: boolean, optional (default=`None`). Attribute of the edges that has to be used as the label. :param show_edge_labels: . optional (default=`True`). Whether to show or not the labels of the edges. :param node_size: integer, optional (default=`1600`). Desired size for nodes. :param node_color: color string, or array of floats, (default=`'blue'`) Node color. Can be a single color format string, or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the ``cmap`` and ``vmin``, ``vmax`` parameters. See ``matplotlib.scatter`` for more details. :param node_alpha: float, optional (default=`0.3`). Between 0 and 1 for transparency of nodes. :param node_text_size: integer, optional (default=`12`). Size of the node text. :param edge_color: color string, or array of floats (default=`'blue'`) Edge color. Can be a single color format string, or a sequence of colors with the same length as edgelist. If numeric values are specified they will be mapped to colors using the ``edge_cmap`` and ``edge_vmin``, ``edge_vmax`` parameters. :param edge_alpha: float, optional (default=`0.3`) Transparency for thee edges. :param edge_tickness: float or integer, optional (default=`1`). Thickness of the lines drawn for the edges. :param edge_text_pos: . Default to optional (d0)= :param text_font: . Default to optional (default=`'sans-serif'`). :param ax: ``matplotlib.Figure``, optional (default=`None`). A ``matplotlib.Figure`` to use when rendering the graph. If `None`, a new object is created and returned.---- :return: a ``matplotlib.Figure`` with the graph rendered. """ graph = self.get_graph(directed=directed) pos = getattr(nx, "{}_layout".format(layout))(graph) node_labels = {} edge_labels = {} node_colors = set() if show_node_labels: for node, props in graph.nodes(data=True): labels = props.pop('labels', []) for label in labels: node_colors.add(label) if node_label_attr is None: node_labels[node] = "$:{}$\n{}".format( ":".join(labels), next(iter(props.values())) if props else "", ) else: props_list = ["{}: {}".format(k, v) for k, v in props.items()] node_labels[node] = "$:{}$\n{}".format( ":".join(labels), "\n".join(props_list) ) node_color = [] node_colors = list(node_colors) legend_colors = [] colors = list(plt.matplotlib.colors.ColorConverter().cache.items())[2:] for _, color_rgb in colors[:len(node_colors)]: node_color.append(color_rgb) legend_colors.append(color_rgb) if show_edge_labels: for start, end, props in graph.edges(data=True): if edge_label_attr is None: edge_label = props.get("type", '') else: edge_label = props.get(edge_label_attr, '') edge_labels[(start, end)] = edge_label if not ax: fig = plt.figure() ax = fig.add_subplot(111) nodes = nx.draw_networkx_nodes( graph, pos=pos, node_color=node_color, node_size=node_size, alpha=node_alpha, ax=ax ) nx.draw_networkx_labels( graph, pos=pos, labels=node_labels, font_size=node_text_size, font_family=text_font, ax=ax ) nx.draw_networkx_edges( graph, pos=pos, width=edge_tickness, alpha=edge_alpha, edge_color=edge_color, ax=ax ) nx.draw_networkx_edge_labels( graph, pos=pos, edge_labels=edge_labels, ax=ax ) ax.legend([plt.Line2D([0], [0], linestyle="none", marker="o", alpha=node_alpha, markersize=10, markerfacecolor=color) for color in legend_colors], node_colors, loc=(-0.25, 1), numpoints=1, frameon=False) ax.set_axis_off() return graph, ax, nodes
def pie(self, key_word_sep=" ", title=None, **kwargs): """Generates a pylab pie chart from the result set. ``matplotlib`` must be installed, and in an IPython Notebook, inlining must be on:: %%matplotlib inline Values (pie slice sizes) are taken from the rightmost column (numerical values required). All other columns are used to label the pie slices. :param key_word_sep: string used to separate column values from each other in pie labels :param title: plot title, defaults to name of value column :kwargs: any additional keyword arguments will be passsed through to ``matplotlib.pylab.pie``. """ if not plt: raise ImportError("Try installing matplotlib first.") self.guess_pie_columns(xlabel_sep=key_word_sep) pie = plt.pie(self.ys[0], labels=self.xlabels, **kwargs) plt.title(title or self.ys[0].name) return pie
def plot(self, title=None, **kwargs): """Generates a pylab plot from the result set. ``matplotlib`` must be installed, and in an IPython Notebook, inlining must be on:: %%matplotlib inline The first and last columns are taken as the X and Y values. Any columns between are ignored. :param title: plot title, defaults to names of Y value columns Any additional keyword arguments will be passsed through to ``matplotlib.pylab.plot``. """ if not plt: raise ImportError("Try installing matplotlib first.") self.guess_plot_columns() self.x = self.x or range(len(self.ys[0])) coords = reduce(operator.add, [(self.x, y) for y in self.ys]) plot = plt.plot(*coords, **kwargs) if hasattr(self.x, 'name'): plt.xlabel(self.x.name) ylabel = ", ".join(y.name for y in self.ys) plt.title(title or ylabel) plt.ylabel(ylabel) return plot
def bar(self, key_word_sep=" ", title=None, **kwargs): """Generates a pylab bar plot from the result set. ``matplotlib`` must be installed, and in an IPython Notebook, inlining must be on:: %%matplotlib inline The last quantitative column is taken as the Y values; all other columns are combined to label the X axis. :param title: plot title, defaults to names of Y value columns :param key_word_sep: string used to separate column values from each other in labels Any additional keyword arguments will be passsed through to ``matplotlib.pylab.bar``. """ if not plt: raise ImportError("Try installing matplotlib first.") self.guess_pie_columns(xlabel_sep=key_word_sep) plot = plt.bar(range(len(self.ys[0])), self.ys[0], **kwargs) if self.xlabels: plt.xticks(range(len(self.xlabels)), self.xlabels, rotation=45) plt.xlabel(self.xlabel) plt.ylabel(self.ys[0].name) return plot
def csv(self, filename=None, **format_params): """Generates results in comma-separated form. Write to ``filename`` if given. Any other parameter will be passed on to ``csv.writer``. :param filename: if given, the CSV will be written to filename. Any additional keyword arguments will be passsed through to ``csv.writer``. """ if not self.pretty: return None # no results if filename: outfile = open(filename, 'w') else: outfile = StringIO() writer = UnicodeWriter(outfile, **format_params) writer.writerow(self.field_names) for row in self: writer.writerow(row) if filename: outfile.close() return CsvResultDescriptor(filename) else: return outfile.getvalue()
def permission_required(perm, login_url=None, raise_exception=False): """ Re-implementation of the permission_required decorator, honors settings. If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return ``True``, otherwise it will check for the permission as usual. """ def check_perms(user): if not getattr(settings, 'DASHBOARD_REQUIRE_LOGIN', app_settings.REQUIRE_LOGIN): return True # First check if the user has the permission (even anon users) if user.has_perm(perm): return True # In case the 403 handler should be called raise the exception if raise_exception: # pragma: no cover raise PermissionDenied # As the last resort, show the login form return False return user_passes_test(check_perms, login_url=login_url)
def get_context_data(self, **kwargs): """ Adds ``is_rendered`` to the context and the widget's context data. ``is_rendered`` signals that the AJAX view has been called and that we are displaying the full widget now. When ``is_rendered`` is not found in the widget template it means that we are seeing the first page load and all widgets still have to get their real data from this AJAX view. """ ctx = super(RenderWidgetMixin, self).get_context_data(**kwargs) ctx.update({ 'is_rendered': True, 'widget': self.widget, }) ctx.update(self.widget.get_context_data()) return ctx
def get_widgets_sorted(self): """Returns the widgets sorted by position.""" result = [] for widget_name, widget in self.get_widgets().items(): result.append((widget_name, widget, widget.position)) result.sort(key=lambda x: x[2]) return result
def get_widgets_that_need_update(self): """ Returns all widgets that need an update. This should be scheduled every minute via crontab. """ result = [] for widget_name, widget in self.get_widgets().items(): if widget.should_update(): result.append(widget) return result
def register_widget(self, widget_cls, **widget_kwargs): """ Registers the given widget. Widgets must inherit ``DashboardWidgetBase`` and you cannot register the same widget twice. :widget_cls: A class that inherits ``DashboardWidgetBase``. """ if not issubclass(widget_cls, DashboardWidgetBase): raise ImproperlyConfigured( 'DashboardWidgets must be subclasses of DashboardWidgetBase,' ' {0} is not.'.format(widget_cls)) widget = widget_cls(**widget_kwargs) widget_name = widget.get_name() if widget_name in self.widgets: raise WidgetAlreadyRegistered( 'Cannot register {0}, a plugin with this name {1} is already ' 'registered.'.format(widget_cls, widget_name)) self.widgets[widget_name] = widget
def unregister_widget(self, widget_cls): """Unregisters the given widget.""" if widget_cls.__name__ in self.widgets: del self.widgets[widget_cls().get_name()]
def get_last_update(self): """Gets or creates the last update object for this widget.""" instance, created = \ models.DashboardWidgetLastUpdate.objects.get_or_create( widget_name=self.get_name()) return instance
def get_setting(self, setting_name, default=None): """ Returns the setting for this widget from the database. :setting_name: The name of the setting. :default: Optional default value if the setting cannot be found. """ try: setting = models.DashboardWidgetSettings.objects.get( widget_name=self.get_name(), setting_name=setting_name) except models.DashboardWidgetSettings.DoesNotExist: setting = default return setting
def save_setting(self, setting_name, value): """Saves the setting value into the database.""" setting = self.get_setting(setting_name) if setting is None: setting = models.DashboardWidgetSettings.objects.create( widget_name=self.get_name(), setting_name=setting_name, value=value) setting.value = value setting.save() return setting
def should_update(self): """ Checks if an update is needed. Checks against ``self.update_interval`` and this widgets ``DashboardWidgetLastUpdate`` instance if an update is overdue. This should be called by ``DashboardWidgetPool.get_widgets_that_need_update()``, which in turn should be called by an admin command which should be scheduled every minute via crontab. """ last_update = self.get_last_update() time_since = now() - last_update.last_update if time_since.seconds < self.update_interval: return False return True
def getCityDetails(self, **kwargs): """ :param q: query by city name :param lat: latitude :param lon: longitude :param city_ids: comma separated city_id values :param count: number of max results to display Find the Zomato ID and other details for a city . You can obtain the Zomato City ID in one of the following ways: -City Name in the Search Query - Returns list of cities matching the query -Using coordinates - Identifies the city details based on the coordinates of any location inside a city If you already know the Zomato City ID, this API can be used to get other details of the city. """ params = {} available_keys = ["q", "lat", "lon", "city_ids", "count"] for key in available_keys: if key in kwargs: params[key] = kwargs[key] cities = self.api.get("/cities", params) return cities
def getCollectionsViaCityId(self, city_id, **kwargs): """ :param city_id: id of the city for which collections are needed :param lat: latitude :param lon: longitude :param count: number of max results to display Returns Zomato Restaurant Collections in a City. The location/City input can be provided in the following ways - Using Zomato City ID - Using coordinates of any location within a city - List of all restaurants listed in any particular Zomato Collection can be obtained using the '/search' API with Collection ID and Zomato City ID as the input """ params = {"city_id": city_id} optional_params = ["lat", "lon", "count"] for key in optional_params: if key in kwargs: params[key] = kwargs[key] collections = self.api.get("/collections", params) return collections
def getEstablishments(self, city_id, **kwargs): """ :param city_id: id of the city for which collections are needed :param lat: latitude :param lon: longitude Get a list of restaurant types in a city. The location/City input can be provided in the following ways - Using Zomato City ID - Using coordinates of any location within a city List of all restaurants categorized under a particular restaurant type can obtained using /Search API with Establishment ID and location details as inputs """ params = {"city_id": city_id} optional_params = ["lat", "lon"] for key in optional_params: if key in kwargs: params[key] = kwargs[key] establishments = self.api.get("/establishments", params) return establishments
def getByGeocode(self, lat, lon): """ :param lat: latitude :param lon: longitude Get Foodie and Nightlife Index, list of popular cuisines and nearby restaurants around the given coordinates """ params = {"lat": lat, "lon": lon} response = self.api.get("/geocode", params) return response
def getLocationDetails(self, entity_id, entity_type): """ :param entity_id: location id obtained from locations api :param entity_type: location type obtained from locations api :return: Get Foodie Index, Nightlife Index, Top Cuisines and Best rated restaurants in a given location """ params = {"entity_id": entity_id, "entity_type": entity_type} location_details = self.api.get("/location_details", params) return location_details
def getLocations(self, query, **kwargs): """ :param query: suggestion for location name :param lat: latitude :param lon: longitude :param count: number of max results to display :return: json response Search for Zomato locations by keyword. Provide coordinates to get better search results """ params = {"query": query} optional_params = ["lat", "lon", "count"] for key in optional_params: if key in kwargs: params[key] = kwargs[key] locations = self.api.get("/locations", params) return locations
def getDailyMenu(self, restaurant_id): """ :param restaurant_id: id of restaurant whose details are requested :return: json response Get daily menu using Zomato restaurant ID. """ params = {"res_id": restaurant_id} daily_menu = self.api.get("/dailymenu", params) return daily_menu
def getRestaurantDetails(self, restaurant_id): """ :param restaurant_id: id of restaurant whose details are requested :return: json response Get detailed restaurant information using Zomato restaurant ID. Partner Access is required to access photos and reviews. """ params = {"res_id": restaurant_id} restaurant_details = self.api.get("/restaurant", params) return restaurant_details
def getRestaurantReviews(self, restaurant_id, **kwargs): """ :param restaurant_id: id of restaurant whose details are requested :param start: fetch results after this offset :param count: max number of results to retrieve :return: json response Get restaurant reviews using the Zomato restaurant ID """ params = {"res_id": restaurant_id} optional_params = ["start", "count"] for key in optional_params: if key in kwargs: params[key] = kwargs[key] reviews = self.api.get("/reviews", params) return reviews
def search(self, **kwargs): """ :param entity_id: location id :param entity_type: location type (city, subzone, zone, lanmark, metro , group) :param q: search keyword :param start: fetch results after offset :param count: max number of results to display :param lat: latitude :param lon: longitude :param radius: radius around (lat,lon); to define search area, defined in meters(M) :param cuisines: list of cuisine id's separated by comma :param establishment_type: estblishment id obtained from establishments call :param collection_id: collection id obtained from collections call :param category: category ids obtained from categories call :param sort: sort restaurants by (cost, rating, real_distance) :param order: used with 'sort' parameter to define ascending / descending :return: json response The location input can be specified using Zomato location ID or coordinates. Cuisine / Establishment / Collection IDs can be obtained from respective api calls. Partner Access is required to access photos and reviews. Examples: - To search for 'Italian' restaurants in 'Manhattan, New York City', set cuisines = 55, entity_id = 94741 and entity_type = zone - To search for 'cafes' in 'Manhattan, New York City', set establishment_type = 1, entity_type = zone and entity_id = 94741 - Get list of all restaurants in 'Trending this Week' collection in 'New York City' by using entity_id = 280, entity_type = city and collection_id = 1 """ params = {} available_params = [ "entity_id", "entity_type", "q", "start", "count", "lat", "lon", "radius", "cuisines", "establishment_type", "collection_id", "category", "sort", "order"] for key in available_params: if key in kwargs: params[key] = kwargs[key] results = self.api.get("/search", params) return results
def array(a, context=None, axis=(0,), dtype=None, npartitions=None): """ Create a spark bolt array from a local array. Parameters ---------- a : array-like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. context : SparkContext A context running Spark. (see pyspark) axis : tuple, optional, default=(0,) Which axes to distribute the array along. The resulting distributed object will use keys to represent these axes, with the remaining axes represented by values. dtype : data-type, optional, default=None The desired data-type for the array. If None, will be determined from the data. (see numpy) npartitions : int Number of partitions for parallization. Returns ------- BoltArraySpark """ if dtype is None: arry = asarray(a) dtype = arry.dtype else: arry = asarray(a, dtype) shape = arry.shape ndim = len(shape) # handle the axes specification and transpose if necessary axes = ConstructSpark._format_axes(axis, arry.shape) key_axes, value_axes = get_kv_axes(arry.shape, axes) permutation = key_axes + value_axes arry = arry.transpose(*permutation) split = len(axes) if split < 1: raise ValueError("split axis must be greater than 0, got %g" % split) if split > len(shape): raise ValueError("split axis must not exceed number of axes %g, got %g" % (ndim, split)) key_shape = shape[:split] val_shape = shape[split:] keys = zip(*unravel_index(arange(0, int(prod(key_shape))), key_shape)) vals = arry.reshape((prod(key_shape),) + val_shape) rdd = context.parallelize(zip(keys, vals), npartitions) return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
def ones(shape, context=None, axis=(0,), dtype=float64, npartitions=None): """ Create a spark bolt array of ones. Parameters ---------- shape : tuple The desired shape of the array. context : SparkContext A context running Spark. (see pyspark) axis : tuple, optional, default=(0,) Which axes to distribute the array along. The resulting distributed object will use keys to represent these axes, with the remaining axes represented by values. dtype : data-type, optional, default=float64 The desired data-type for the array. If None, will be determined from the data. (see numpy) npartitions : int Number of partitions for parallization. Returns ------- BoltArraySpark """ from numpy import ones return ConstructSpark._wrap(ones, shape, context, axis, dtype, npartitions)
def concatenate(arrays, axis=0): """ Join two bolt arrays together, at least one of which is in spark. Parameters ---------- arrays : tuple A pair of arrays. At least one must be a spark array, the other can be a local bolt array, a local numpy array, or an array-like. axis : int, optional, default=0 The axis along which the arrays will be joined. Returns ------- BoltArraySpark """ if not isinstance(arrays, tuple): raise ValueError("data type not understood") if not len(arrays) == 2: raise NotImplementedError("spark concatenation only supports two arrays") first, second = arrays if isinstance(first, BoltArraySpark): return first.concatenate(second, axis) elif isinstance(second, BoltArraySpark): first = ConstructSpark.array(first, second._rdd.context) return first.concatenate(second, axis) else: raise ValueError("at least one array must be a spark bolt array")
def _argcheck(*args, **kwargs): """ Check that arguments are consistent with spark array construction. Conditions are: (1) a positional argument is a SparkContext (2) keyword arg 'context' is a SparkContext (3) an argument is a BoltArraySpark, or (4) an argument is a nested list containing a BoltArraySpark """ try: from pyspark import SparkContext except ImportError: return False cond1 = any([isinstance(arg, SparkContext) for arg in args]) cond2 = isinstance(kwargs.get('context', None), SparkContext) cond3 = any([isinstance(arg, BoltArraySpark) for arg in args]) cond4 = any([any([isinstance(sub, BoltArraySpark) for sub in arg]) if isinstance(arg, (tuple, list)) else False for arg in args]) return cond1 or cond2 or cond3 or cond4
def _format_axes(axes, shape): """ Format target axes given an array shape """ if isinstance(axes, int): axes = (axes,) elif isinstance(axes, list) or hasattr(axes, '__iter__'): axes = tuple(axes) if not isinstance(axes, tuple): raise ValueError("axes argument %s in the constructor not specified correctly" % str(axes)) if min(axes) < 0 or max(axes) > len(shape) - 1: raise ValueError("invalid key axes %s given shape %s" % (str(axes), str(shape))) return axes
def _wrap(func, shape, context=None, axis=(0,), dtype=None, npartitions=None): """ Wrap an existing numpy constructor in a parallelized construction """ if isinstance(shape, int): shape = (shape,) key_shape, value_shape = get_kv_shape(shape, ConstructSpark._format_axes(axis, shape)) split = len(key_shape) # make the keys rdd = context.parallelize(list(product(*[arange(x) for x in key_shape])), npartitions) # use a map to make the arrays in parallel rdd = rdd.map(lambda x: (x, func(value_shape, dtype, order='C'))) return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype)
def _align(self, axes, key_shape=None): """ Align local bolt array so that axes for iteration are in the keys. This operation is applied before most functional operators. It ensures that the specified axes are valid, and might transpose/reshape the underlying array so that the functional operators can be applied over the correct records. Parameters ---------- axes: tuple[int] One or more axes that will be iterated over by a functional operator Returns ------- BoltArrayLocal """ # ensure that the key axes are valid for an ndarray of this shape inshape(self.shape, axes) # compute the set of dimensions/axes that will be used to reshape remaining = [dim for dim in range(len(self.shape)) if dim not in axes] key_shape = key_shape if key_shape else [self.shape[axis] for axis in axes] remaining_shape = [self.shape[axis] for axis in remaining] linearized_shape = [prod(key_shape)] + remaining_shape # compute the transpose permutation transpose_order = axes + remaining # transpose the array so that the keys being mapped over come first, then linearize keys reshaped = self.transpose(*transpose_order).reshape(*linearized_shape) return reshaped
def filter(self, func, axis=(0,)): """ Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. Returns ------- BoltArrayLocal """ axes = sorted(tupleize(axis)) reshaped = self._align(axes) filtered = asarray(list(filter(func, reshaped))) return self._constructor(filtered)
def map(self, func, axis=(0,)): """ Apply a function across an axis. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function of a single array to apply axis : tuple or int, optional, default=(0,) Axis or multiple axes to apply function along. Returns ------- BoltArrayLocal """ axes = sorted(tupleize(axis)) key_shape = [self.shape[axis] for axis in axes] reshaped = self._align(axes, key_shape=key_shape) mapped = asarray(list(map(func, reshaped))) elem_shape = mapped[0].shape # invert the previous reshape operation, using the shape of the map result linearized_shape_inv = key_shape + list(elem_shape) reordered = mapped.reshape(*linearized_shape_inv) return self._constructor(reordered)
def reduce(self, func, axis=0): """ Reduce an array along an axis. Applies an associative/commutative function of two arguments cumulatively to all arrays along an axis. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function of two arrays that returns a single array axis : tuple or int, optional, default=(0,) Axis or multiple axes to reduce along. Returns ------- BoltArrayLocal """ axes = sorted(tupleize(axis)) # if the function is a ufunc, it can automatically handle reducing over multiple axes if isinstance(func, ufunc): inshape(self.shape, axes) reduced = func.reduce(self, axis=tuple(axes)) else: reshaped = self._align(axes) reduced = reduce(func, reshaped) new_array = self._constructor(reduced) # ensure that the shape of the reduced array is valid expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes] if new_array.shape != tuple(expected_shape): raise ValueError("reduce did not yield a BoltArray with valid dimensions") return new_array
def concatenate(self, arry, axis=0): """ Join this array with another array. Paramters --------- arry : ndarray or BoltArrayLocal Another array to concatenate with axis : int, optional, default=0 The axis along which arrays will be joined. Returns ------- BoltArrayLocal """ if isinstance(arry, ndarray): from bolt import concatenate return concatenate((self, arry), axis) else: raise ValueError("other must be local array, got %s" % type(arry))
def tospark(self, sc, axis=0): """ Converts a BoltArrayLocal into a BoltArraySpark Parameters ---------- sc : SparkContext The SparkContext which will be used to create the BoltArraySpark axis : tuple or int, optional, default=0 The axis (or axes) across which this array will be parallelized Returns ------- BoltArraySpark """ from bolt import array return array(self.toarray(), sc, axis=axis)
def tordd(self, sc, axis=0): """ Converts a BoltArrayLocal into an RDD Parameters ---------- sc : SparkContext The SparkContext which will be used to create the BoltArraySpark axis : tuple or int, optional, default=0 The axis (or axes) across which this array will be parallelized Returns ------- RDD[(tuple, ndarray)] """ from bolt import array return array(self.toarray(), sc, axis=axis).tordd()
def stack(self, size): """ Make an intermediate RDD where all records are combined into a list of keys and larger ndarray along a new 0th dimension. """ def tostacks(partition): keys = [] arrs = [] for key, arr in partition: keys.append(key) arrs.append(arr) if size and 0 <= size <= len(keys): yield (keys, asarray(arrs)) keys, arrs = [], [] if keys: yield (keys, asarray(arrs)) rdd = self._rdd.mapPartitions(tostacks) return self._constructor(rdd).__finalize__(self)
def unstack(self): """ Unstack array and return a new BoltArraySpark via flatMap(). """ from bolt.spark.array import BoltArraySpark if self._rekeyed: rdd = self._rdd else: rdd = self._rdd.flatMap(lambda kv: zip(kv[0], list(kv[1]))) return BoltArraySpark(rdd, shape=self.shape, split=self.split)
def map(self, func): """ Apply a function on each subarray. Parameters ---------- func : function This is applied to each value in the intermediate RDD. Returns ------- StackedArray """ vshape = self.shape[self.split:] x = self._rdd.values().first() if x.shape == vshape: a, b = asarray([x]), asarray([x, x]) else: a, b = x, concatenate((x, x)) try: atest = func(a) btest = func(b) except Exception as e: raise RuntimeError("Error evaluating function on test array, got error:\n %s" % e) if not (isinstance(atest, ndarray) and isinstance(btest, ndarray)): raise ValueError("Function must return ndarray") # different shapes map to the same new shape elif atest.shape == btest.shape: if self._rekeyed is True: # we've already rekeyed rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1]))) shape = (self.shape[0],) + atest.shape else: # do the rekeying count, rdd = zip_with_index(self._rdd.values()) rdd = rdd.map(lambda kv: ((kv[1],), func(kv[0]))) shape = (count,) + atest.shape split = 1 rekeyed = True # different shapes stay different (along the first dimension) elif atest.shape[0] == a.shape[0] and btest.shape[0] == b.shape[0]: shape = self.shape[0:self.split] + atest.shape[1:] split = self.split rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1]))) rekeyed = self._rekeyed else: raise ValueError("Cannot infer effect of function on shape") return self._constructor(rdd, rekeyed=rekeyed, shape=shape, split=split).__finalize__(self)
def _chunk(self, size="150", axis=None, padding=None): """ Split values of distributed array into chunks. Transforms an underlying pair RDD of (key, value) into records of the form: (key, chunk id), (chunked value). Here, chunk id is a tuple identifying the chunk and chunked value is a subset of the data from each original value, that has been divided along the specified dimensions. Parameters ---------- size : str or tuple or int If str, the average size (in KB) of the chunks in all value dimensions. If int or tuple, an explicit specification of the number chunks in each value dimension. axis : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding: tuple or int, default = None Number of elements per dimension that will overlap with the adjacent chunk. If a tuple, specifies padding along each chunked dimension; if a int, same padding will be applied to all chunked dimensions. """ if self.split == len(self.shape) and padding is None: self._rdd = self._rdd.map(lambda kv: (kv[0]+(0,), array(kv[1], ndmin=1))) self._shape = self._shape + (1,) self._plan = (1,) self._padding = array([0]) return self rdd = self._rdd self._plan, self._padding = self.getplan(size, axis, padding) if any([x + y > z for x, y, z in zip(self.plan, self.padding, self.vshape)]): raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis" % (tuple(self.plan), tuple(self.padding), tuple(self.vshape))) if any([x > y for x, y in zip(self.padding, self.plan)]): raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis" % (tuple(self.padding), tuple(self.plan))) slices = self.getslices(self.plan, self.padding, self.vshape) labels = list(product(*[list(enumerate(s)) for s in slices])) scheme = [list(zip(*s)) for s in labels] def _chunk(record): k, v = record[0], record[1] for (chk, slc) in scheme: if type(k) is int: k = (k,) yield k + chk, v[slc] rdd = rdd.flatMap(_chunk) return self._constructor(rdd, shape=self.shape, split=self.split, dtype=self.dtype, plan=self.plan, padding=self.padding, ordered=self._ordered)
def unchunk(self): """ Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray. """ plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split nchunks = self.getnumber(plan, vshape) full_shape = concatenate((nchunks, plan)) n = len(vshape) perm = concatenate(list(zip(range(n), range(n, 2*n)))) if self.uniform: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape) else: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) k_chks = [k[split:] for k in keys] arr = empty(nchunks, dtype='object') for (i, d) in zip(k_chks, values): arr[i] = d yield keys[0][:split], allstack(arr.tolist()) # remove padding if self.padded: removepad = self.removepad rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n)))) else: rdd = self._rdd # skip partitionBy if there is not actually any chunking if array_equal(self.plan, self.vshape): rdd = rdd.map(lambda kv: (kv[0][:split], kv[1])) ordered = self._ordered else: ranges = self.kshape npartitions = int(prod(ranges)) if len(self.kshape) == 0: partitioner = lambda k: 0 else: partitioner = lambda k: ravel_multi_index(k[:split], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk) ordered = True if array_equal(self.vshape, [1]): rdd = rdd.mapValues(lambda v: squeeze(v)) newshape = self.shape[:-1] else: newshape = self.shape return BoltArraySpark(rdd, shape=newshape, split=self._split, dtype=self.dtype, ordered=ordered)
def keys_to_values(self, axes, size=None): """ Move indices in the keys into the values. Padding on these new value-dimensions is not currently supported and is set to 0. Parameters ---------- axes : tuple Axes from keys to move to values. size : tuple, optional, default=None Size of chunks for the values along the new dimensions. If None, then no chunking for all axes (number of chunks = 1) Returns ------- ChunkedArray """ if len(axes) == 0: return self kmask = self.kmask(axes) if size is None: size = self.kshape[kmask] # update properties newplan = r_[size, self.plan] newsplit = self._split - len(axes) newshape = tuple(r_[self.kshape[~kmask], self.kshape[kmask], self.vshape].astype(int).tolist()) newpadding = r_[zeros(len(axes), dtype=int), self.padding] result = self._constructor(None, shape=newshape, split=newsplit, dtype=self.dtype, plan=newplan, padding=newpadding, ordered=True) # convert keys into chunk + within-chunk label split = self.split def _relabel(record): k, data = record keys, chks = asarray(k[:split], 'int'), k[split:] movingkeys, stationarykeys = keys[kmask], keys[~kmask] newchks = [int(m) for m in movingkeys/size] # element-wise integer division that works in Python 2 and 3 labels = mod(movingkeys, size) return tuple(stationarykeys) + tuple(newchks) + tuple(chks) + tuple(labels), data rdd = self._rdd.map(_relabel) # group the new chunks together nchunks = result.getnumber(result.plan, result.vshape) npartitions = int(prod(result.kshape) * prod(nchunks)) ranges = tuple(result.kshape) + tuple(nchunks) n = len(axes) if n == 0: s = slice(None) else: s = slice(-n) partitioner = lambda k: ravel_multi_index(k[s], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner) # reassemble the pieces in the chunks by sorting and then stacking uniform = result.uniform def _rebuild(it): ordered = sorted(it, key=lambda kv: kv[0][n:]) keys, data = zip(*ordered) k = keys[0][s] labels = asarray([x[-n:] for x in keys]) if uniform: labelshape = tuple(size) else: labelshape = tuple(amax(labels, axis=0) - amin(labels, axis=0) + 1) valshape = data[0].shape fullshape = labelshape + valshape yield k, asarray(data).reshape(fullshape) result._rdd = rdd.mapPartitions(_rebuild) if array_equal(self.vshape, [1]): result._rdd = result._rdd.mapValues(lambda v: squeeze(v)) result._shape = result.shape[:-1] result._plan = result.plan[:-1] return result
def map(self, func, value_shape=None, dtype=None): """ Apply an array -> array function on each subarray. The function can change the shape of the subarray, but only along dimensions that are not chunked. Parameters ---------- func : function Function of a single subarray to apply value_shape: Known shape of chunking plan after the map dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation Returns ------- ChunkedArray """ if value_shape is None or dtype is None: # try to compute the size of each mapped element by applying func to a random array try: mapped = func(random.randn(*self.plan).astype(self.dtype)) except Exception: first = self._rdd.first() if first: # eval func on the first element mapped = func(first[1]) if value_shape is None: value_shape = mapped.shape if dtype is None: dtype = mapped.dtype chunked_dims = where(self.plan != self.vshape)[0] unchunked_dims = where(self.plan == self.vshape)[0] # check that no dimensions are dropped if len(value_shape) != len(self.plan): raise NotImplementedError('map on ChunkedArray cannot drop dimensions') # check that chunked dimensions did not change shape if any([value_shape[i] != self.plan[i] for i in chunked_dims]): raise ValueError('map cannot change the sizes of chunked dimensions') def check_and_apply(v): new = func(v) if len(unchunked_dims) > 0: if any([new.shape[i] != value_shape[i] for i in unchunked_dims]): raise Exception("Map operation did not produce values of uniform shape.") if len(chunked_dims) > 0: if any([v.shape[i] != new.shape[i] for i in chunked_dims]): raise Exception("Map operation changed the size of a chunked dimension") return new rdd = self._rdd.mapValues(check_and_apply) vshape = [value_shape[i] if i in unchunked_dims else self.vshape[i] for i in range(len(self.vshape))] newshape = r_[self.kshape, vshape].astype(int).tolist() return self._constructor(rdd, shape=tuple(newshape), dtype=dtype, plan=asarray(value_shape)).__finalize__(self)
def map_generic(self, func): """ Apply a generic array -> object to each subarray The resulting object is a BoltArraySpark of dtype object where the blocked dimensions are replaced with indices indication block ID. """ def process_record(val): newval = empty(1, dtype="object") newval[0] = func(val) return newval rdd = self._rdd.mapValues(process_record) nchunks = self.getnumber(self.plan, self.vshape) newshape = tuple([int(s) for s in r_[self.kshape, nchunks]]) newsplit = len(self.shape) return BoltArraySpark(rdd, shape=newshape, split=newsplit, ordered=self._ordered, dtype="object")
def getplan(self, size="150", axes=None, padding=None): """ Identify a plan for chunking values along each dimension. Generates an ndarray with the size (in number of elements) of chunks in each dimension. If provided, will estimate chunks for only a subset of axes, leaving all others to the full size of the axis. Parameters ---------- size : string or tuple If str, the average size (in KB) of the chunks in all value dimensions. If int/tuple, an explicit specification of the number chunks in each moving value dimension. axes : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding : tuple or int, option, default=None Size over overlapping padding between chunks in each dimension. If tuple, specifies padding along each chunked dimension; if int, all dimensions use same padding; if None, no padding """ from numpy import dtype as gettype # initialize with all elements in one chunk plan = self.vshape # check for subset of axes if axes is None: if isinstance(size, str): axes = arange(len(self.vshape)) else: axes = arange(len(size)) else: axes = asarray(axes, 'int') # set padding pad = array(len(self.vshape)*[0, ]) if padding is not None: pad[axes] = padding # set the plan if isinstance(size, tuple): plan[axes] = size elif isinstance(size, str): # convert from kilobytes size = 1000.0 * float(size) # calculate from dtype elsize = gettype(self.dtype).itemsize nelements = prod(self.vshape) dims = self.vshape[self.vmask(axes)] if size <= elsize: s = ones(len(axes)) else: remsize = 1.0 * nelements * elsize s = [] for (i, d) in enumerate(dims): minsize = remsize/d if minsize >= size: s.append(1) remsize = minsize continue else: s.append(min(d, floor(size/minsize))) s[i+1:] = plan[i+1:] break plan[axes] = s else: raise ValueError("Chunk size not understood, must be tuple or int") return plan, pad
def removepad(idx, value, number, padding, axes=None): """ Remove the padding from chunks. Given a chunk and its corresponding index, use the plan and padding to remove any padding from the chunk along with specified axes. Parameters ---------- idx: tuple or array-like The chunk index, indicating which chunk this is. value: ndarray The chunk that goes along with the index. number: ndarray or array-like The number of chunks along each dimension. padding: ndarray or array-like The padding scheme. axes: tuple, optional, default = None The axes (in the values) along which to remove padding. """ if axes is None: axes = range(len(number)) mask = len(number)*[False, ] for i in range(len(mask)): if i in axes and padding[i] != 0: mask[i] = True starts = [0 if (i == 0 or not m) else p for (i, m, p) in zip(idx, mask, padding)] stops = [None if (i == n-1 or not m) else -p for (i, m, p, n) in zip(idx, mask, padding, number)] slices = [slice(i1, i2) for (i1, i2) in zip(starts, stops)] return value[slices]
def getnumber(plan, shape): """ Obtain number of chunks for the given dimensions and chunk sizes. Given a plan for the number of chunks along each dimension, calculate the number of chunks that this will lead to. Parameters ---------- plan: tuple or array-like Size of chunks (in number of elements) along each dimensions. Length must be equal to the number of dimensions. shape : tuple Shape of array to be chunked. """ nchunks = [] for size, d in zip(plan, shape): nchunks.append(int(ceil(1.0 * d/size))) return nchunks
def getslices(plan, padding, shape): """ Obtain slices for the given dimensions, padding, and chunks. Given a plan for the number of chunks along each dimension and the amount of padding, calculate a list of slices required to generate those chunks. Parameters ---------- plan: tuple or array-like Size of chunks (in number of elements) along each dimensions. Length must be equal to the number of dimensions. padding: tuple or array-like Size of overlap (in number of elements) between chunks along each dimension. Length must be equal to the number of dimensions. shape: tuple Dimensions of axes to be chunked. """ slices = [] for size, pad, d in zip(plan, padding, shape): nchunks = int(floor(d/size)) remainder = d % size start = 0 dimslices = [] for idx in range(nchunks): end = start + size # left endpoint if idx == 0: left = start else: left = start - pad # right endpoint if idx == nchunks: right = end else: right = end + pad dimslices.append(slice(left, right, 1)) start = end if remainder: dimslices.append(slice(end - pad, d, 1)) slices.append(dimslices) return slices
def getmask(inds, n): """ Obtain a binary mask by setting a subset of entries to true. Parameters ---------- inds : array-like Which indices to set as true. n : int The length of the target mask. """ inds = asarray(inds, 'int') mask = zeros(n, dtype=bool) mask[inds] = True return mask
def repartition(self, npartitions): """ Repartitions the underlying RDD Parameters ---------- npartitions : int Number of partitions to repartion the underlying RDD to """ rdd = self._rdd.repartition(npartitions) return self._constructor(rdd, ordered=False).__finalize__(self)
def stack(self, size=None): """ Aggregates records of a distributed array. Stacking should improve the performance of vectorized operations, but the resulting StackedArray object only exposes a restricted set of operations (e.g. map, reduce). The unstack method can be used to restore the full bolt array. Parameters ---------- size : int, optional, default=None The maximum size for each stack (number of original records), will aggregate groups of records per partition up to this size, if None will aggregate all records on each partition. Returns ------- StackedArray """ stk = StackedArray(self._rdd, shape=self.shape, split=self.split) return stk.stack(size)
def _align(self, axis): """ Align spark bolt array so that axes for iteration are in the keys. This operation is applied before most functional operators. It ensures that the specified axes are valid, and swaps key/value axes so that functional operators can be applied over the correct records. Parameters ---------- axis: tuple[int] One or more axes that wil be iterated over by a functional operator Returns ------- BoltArraySpark """ # ensure that the specified axes are valid inshape(self.shape, axis) # find the value axes that should be moved into the keys (axis >= split) tokeys = [(a - self.split) for a in axis if a >= self.split] # find the key axes that should be moved into the values (axis < split) tovalues = [a for a in range(self.split) if a not in axis] if tokeys or tovalues: return self.swap(tovalues, tokeys) else: return self
def first(self): """ Return the first element of an array """ from bolt.local.array import BoltArrayLocal rdd = self._rdd if self._ordered else self._rdd.sortByKey() return BoltArrayLocal(rdd.values().first())
def map(self, func, axis=(0,), value_shape=None, dtype=None, with_keys=False): """ Apply a function across an axis. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function of a single array to apply. If with_keys=True, function should be of a (tuple, array) pair. axis : tuple or int, optional, default=(0,) Axis or multiple axes to apply function along. value_shape : tuple, optional, default=None Known shape of values resulting from operation dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation with_keys : bool, optional, default=False Include keys as an argument to the function Returns ------- BoltArraySpark """ axis = tupleize(axis) swapped = self._align(axis) if with_keys: test_func = lambda x: func(((0,), x)) else: test_func = func if value_shape is None or dtype is None: # try to compute the size of each mapped element by applying func to a random array try: mapped = test_func(random.randn(*swapped.values.shape).astype(self.dtype)) except Exception: first = swapped._rdd.first() if first: # eval func on the first element mapped = test_func(first[1]) if value_shape is None: value_shape = mapped.shape if dtype is None: dtype = mapped.dtype shape = tuple([swapped._shape[ax] for ax in range(len(axis))]) + tupleize(value_shape) if with_keys: rdd = swapped._rdd.map(lambda kv: (kv[0], func(kv))) else: rdd = swapped._rdd.mapValues(func) # reshaping will fail if the elements aren't uniformly shaped def check(v): if len(v.shape) > 0 and v.shape != tupleize(value_shape): raise Exception("Map operation did not produce values of uniform shape.") return v rdd = rdd.mapValues(lambda v: check(v)) return self._constructor(rdd, shape=shape, dtype=dtype, split=swapped.split).__finalize__(swapped)
def filter(self, func, axis=(0,), sort=False): """ Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. sort: bool, optional, default=False Whether or not to sort by key before reindexing Returns ------- BoltArraySpark """ axis = tupleize(axis) swapped = self._align(axis) def f(record): return func(record[1]) rdd = swapped._rdd.filter(f) if sort: rdd = rdd.sortByKey().values() else: rdd = rdd.values() # count the resulting array in order to reindex (linearize) the keys count, zipped = zip_with_index(rdd) if not count: count = zipped.count() reindexed = zipped.map(lambda kv: (tupleize(kv[1]), kv[0])) # since we can only filter over one axis, the remaining shape is always the following remaining = list(swapped.shape[len(axis):]) if count != 0: shape = tuple([count] + remaining) else: shape = (0,) return self._constructor(reindexed, shape=shape, split=1).__finalize__(swapped)