text stringlengths 8 6.05M |
|---|
# Codewars.com
# If we list all the natural numbers below
# 10 that are multiples of 3 or 5,
# we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Finish the solution so that it returns the
# sum of all the multiples of 3 or 5 below the number passed in.
# Note: If the number is a multiple of both 3 and 5,
# only count it once.
# сохраняем вcе числа которые делятся на 3 или 5 список x
# и суммируем список
number = int(input())
x = []
for i in range(1, number):
if i % 3 == 0 or i % 5 == 0:
x.append(i)
print(sum(x))
|
from Tkinter import *
from ucasts import ID12LA
import Tkinter as tk
import threading
import RPi.GPIO as GPIO
class App(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
#Main Window
self.start()
self.root = tk.Tk()
self.root.wm_title("Republic Poly Direction Kiosk")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
#Variables
self.RFID = StringVar()
self.Clinic = PhotoImage()
ClinicA = PhotoImage(file='ClinicA.gif')
ClinicB = PhotoImage(file='ClinicB.gif')
ClinicC = PhotoImage(file='ClinicC.gif')
ClinicD = PhotoImage(file='ClinicD.gif')
#LeftFrame for RP logo
leftFrame = Frame(self.root, width=200, height =200)
leftFrame.grid(row=0, column=0)
Label(leftFrame, text = "Welcome to RP Direction Kiosk").grid(row=0, column=0,)
imageRP = PhotoImage(file='RPlogo.gif')
Label(leftFrame, image=imageRP).grid(row=1, column=0)
Label(leftFrame, text = "Please Scan Tag").grid(row=2, column=0)
Entry(leftFrame, textvariable = self.RFID, width=13).grid(row=3,column=0)
#RightFrame for Map/Route
rightFrame = Frame(self.root, width=200, height =200)
rightFrame.grid(row=0, column=1)
Label(rightFrame, text = "test").grid(row=0, column=1)
self.Clinic = PhotoImage(file='Clinic.gif')
Label(self.root, image=self.Clinic).grid(row=0, column=1)
self.root.mainloop()
def callback(self):
self.root.quit()
def run(self):
reader = ID12LA()
tag = reader.wait_for_scan()
print ("Scanned %s") % (tag,)
self.RFID.set(tag)
if tag=="6F005C86AF":
print("Hello")
self.Clinic=PhotoImage(file='ClinicA.gif')
Label(self.root, image=self.Clinic).grid(row=0, column=1)
app = App()
|
#!/usr/bin/env python3
"""
The standard Cartesian axes used for most ProPlot figures.
"""
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import numpy as np
from .. import constructor
from .. import scale as pscale
from .. import ticker as pticker
from ..config import _parse_format, rc
from ..internals import ic # noqa: F401
from ..internals import _not_none, _snippet_manager, docstring, rcsetup, warnings
from . import plot, shared
__all__ = ['CartesianAxes']
_alt_kwargs = ( # TODO: More systematic approach?
'lim', 'min', 'max', 'reverse', 'scale', 'label',
'tickdir', 'grid', 'gridminor',
'tickminor', 'ticklabeldir', 'tickrange', 'wraprange',
'rotation', 'formatter', 'ticklabels',
'ticks', 'locator', 'minorticks', 'minorlocator',
'bounds', 'margin', 'color', 'linewidth', 'ticklen', 'gridcolor',
'label_kw', 'scale_kw', 'locator_kw', 'formatter_kw', 'minorlocator_kw',
)
# Shared docstring
_shared_docstring = """
%(descrip)s
Parameters
----------
%(extra)s{xargs} : optional
Passed to `Axes.format`.
{args} : optional
Prepended with ``'{x}'`` and passed to `Axes.format`.
Note
----
This enforces the following default settings:
* Places the old *{x}* axis on the {x1} and the new *{x}*
axis on the {x2}.
* Makes the old {x2} spine invisible and the new {x1}, {y1},
and {y2} spines invisible.
* Adjusts the *{x}* axis tick, tick label, and axis label positions
according to the visible spine positions.
* Syncs the old and new *{y}* axis limits and scales, and makes the
new *{y}* axis labels invisible.
"""
_shared_x_keys = dict(
x='x', x1='bottom', x2='top',
y='y', y1='left', y2='right',
args=', '.join(_alt_kwargs),
xargs=', '.join('x' + key for key in _alt_kwargs),
)
_shared_y_keys = dict(
x='y', x1='left', x2='right',
y='x', y1='bottom', y2='top',
args=', '.join(_alt_kwargs),
xargs=', '.join('y' + key for key in _alt_kwargs),
)
# Alt docstrings
_alt_descrip = """
Return an axes in the same location as this one but whose {x}
axis is on the {x2}. This is an alias and more intuitive name
for `~CartesianAxes.twin{y}`, which generates two *{x}* axes
with a shared ("twin") *{y}* axes.
"""
_alt_docstring = _shared_docstring % {'descrip': _alt_descrip, 'extra': ''}
_snippet_manager['axes.altx'] = _alt_docstring.format(**_shared_x_keys)
_snippet_manager['axes.alty'] = _alt_docstring.format(**_shared_y_keys)
# Twin docstrings
_twin_descrip = """
Return an axes in the same location as this one but whose {x}
axis is on the {x2}. Mimics `matplotlib.axes.Axes.twin{y}`.
"""
_twin_docstring = _shared_docstring % {'descrip': _twin_descrip, 'extra': ''}
_snippet_manager['axes.twinx'] = _twin_docstring.format(**_shared_y_keys)
_snippet_manager['axes.twiny'] = _twin_docstring.format(**_shared_x_keys)
# Dual docstrings
_dual_descrip = """
Return a secondary *{x}* axis for denoting equivalent *{x}*
coordinates in *alternate units*.
"""
_dual_extra = """
funcscale : callable, 2-tuple of callables, or scale-spec
The scale used to transform units from the parent axis to the secondary
axis. This can be a `~proplot.scale.FuncScale` itself or a function,
(function, function) tuple, or an axis scale specification
interpreted by the `~proplot.constructor.Scale` constructor function,
any of which will be used to build a `~proplot.scale.FuncScale` and
applied to the dual axis (see `~proplot.scale.FuncScale` for details).
"""
_dual_docstring = _shared_docstring % {'descrip': _dual_descrip, 'extra': _dual_extra.lstrip()} # noqa: E501
_snippet_manager['axes.dualx'] = _dual_docstring.format(**_shared_x_keys)
_snippet_manager['axes.dualy'] = _dual_docstring.format(**_shared_y_keys)
class CartesianAxes(shared._SharedAxes, plot.PlotAxes):
"""
Axes subclass for plotting in ordinary Cartesian coordinates.
Adds the `~CartesianAxes.format` method and overrides several existing
methods.
"""
#: The registered projection name.
name = 'proplot_cartesian'
def __init__(self, *args, **kwargs):
"""
See also
--------
proplot.ui.subplots
proplot.axes.Axes
proplot.axes.PlotAxes
"""
# Impose default formatter
self._xaxis_current_rotation = 'horizontal' # current rotation
self._yaxis_current_rotation = 'horizontal'
self._xaxis_isdefault_rotation = True # whether to auto rotate the axis
self._yaxis_isdefault_rotation = True
super().__init__(*args, **kwargs)
formatter = pticker.AutoFormatter()
self.xaxis.set_major_formatter(formatter)
self.yaxis.set_major_formatter(formatter)
self.xaxis.isDefault_majfmt = True
self.yaxis.isDefault_majfmt = True
self._dualx_funcscale = None # for scaling units on dual axes
self._dualx_prevstate = None # prevent excess _dualy_scale calls
self._dualy_funcscale = None
self._dualy_prevstate = None
def _apply_axis_sharing(self):
"""
Enforce the "shared" axis labels and axis tick labels. If this is not
called at drawtime, "shared" labels can be inadvertantly turned off.
"""
# X axis
# NOTE: Critical to apply labels to *shared* axes attributes rather
# than testing extents or we end up sharing labels with twin axes.
# NOTE: Similar to how _align_super_labels() calls _apply_title_above() this
# is called inside _align_axis_labels() so we align the correct text.
# NOTE: The "panel sharing group" refers to axes and panels *above* the
# bottommost or to the *right* of the leftmost panel. But the sharing level
# used for the leftmost and bottommost is the *figure* sharing level.
axis = self.xaxis
if self._sharex is not None:
level = 3 if self._panel_sharex_group else self.figure._sharex
if level > 0:
self._transfer_text(axis.label, self._sharex.xaxis.label)
axis.label.set_visible(False)
if level > 2:
# WARNING: Cannot set NullFormatter because shared axes share the
# same Ticker(). Instead use approach copied from mpl subplots().
axis.set_tick_params(which='both', labelbottom=False, labeltop=False)
# Y axis
axis = self.yaxis
if self._sharey is not None:
level = 3 if self._panel_sharey_group else self.figure._sharey
if level > 0:
self._transfer_text(axis.label, self._sharey.yaxis.label)
axis.label.set_visible(False)
if level > 2:
axis.set_tick_params(which='both', labelleft=False, labelright=False)
axis.set_minor_formatter(mticker.NullFormatter())
def _dualx_scale(self):
"""
Lock the child "dual" *x* axis limits to the parent.
"""
# NOTE: We bypass autoscale_view because we set limits manually, and bypass
# child.stale = True because that is done in call to set_xlim() below.
# NOTE: We set the scale using private API to bypass application of
# set_default_locators_and_formatters: only_if_default=True is critical
# to prevent overriding user settings!
# NOTE: Dual axis only needs to be constrained if the parent axis scale
# and limits have changed, and limits are always applied before we reach
# the child.draw() because always called after parent.draw()
funcscale, parent, child = self._dualx_funcscale, self._altx_parent, self
if funcscale is None or parent is None:
return
olim = parent.get_xlim()
scale = parent.xaxis._scale
if (scale, *olim) == child._dualx_prevstate:
return
funcscale = pscale.FuncScale(funcscale, invert=True, parent_scale=scale)
child.xaxis._scale = funcscale
child._update_transScale()
funcscale.set_default_locators_and_formatters(child.xaxis, only_if_default=True)
nlim = list(map(funcscale.functions[1], np.array(olim)))
if np.sign(np.diff(olim)) != np.sign(np.diff(nlim)):
nlim = nlim[::-1] # if function flips limits, so will set_xlim!
child.set_xlim(nlim, emit=False)
child._dualx_prevstate = (scale, *olim)
def _dualy_scale(self):
"""
Lock the child "dual" *y* axis limits to the parent.
"""
# See _dualx_scale() comments
funcscale, parent, child = self._dualy_funcscale, self._alty_parent, self
if funcscale is None or parent is None:
return
olim = parent.get_ylim()
scale = parent.yaxis._scale
if (scale, *olim) == child._dualy_prevstate:
return
funcscale = pscale.FuncScale(funcscale, invert=True, parent_scale=scale)
child.yaxis._scale = funcscale
child._update_transScale()
funcscale.set_default_locators_and_formatters(child.yaxis, only_if_default=True)
nlim = list(map(funcscale.functions[1], np.array(olim)))
if np.sign(np.diff(olim)) != np.sign(np.diff(nlim)):
nlim = nlim[::-1]
child.set_ylim(nlim, emit=False)
child._dualy_prevstate = (scale, *olim)
def _is_panel_group_member(self, other):
"""
Return whether the axes belong in a panel sharing stack..
"""
return (
self._panel_parent is other # other is child panel
or other._panel_parent is self # other is main subplot
or other._panel_parent and self._panel_parent # ...
and other._panel_parent is self._panel_parent # other is sibling panel
)
@staticmethod
def _parse_alt(x, kwargs):
"""
Interpret keyword args passed to all "twin axis" methods so they
can be passed to Axes.format.
"""
kw_bad, kw_out = {}, {}
for key, value in kwargs.items():
if key in _alt_kwargs:
kw_out[x + key] = value
elif key[0] == x and key[1:] in _alt_kwargs:
# NOTE: We permit both e.g. 'locator' and 'xlocator' because
# while is more elegant and consistent with e.g. colorbar() syntax
# but latter is more consistent and easier to use when refactoring.
kw_out[key] = value
elif key in rcsetup._rc_nodots:
kw_out[key] = value
else:
kw_bad[key] = value
if kw_bad:
raise TypeError(f'Unexpected keyword argument(s): {kw_bad!r}')
return kw_out
def _sharex_limits(self, sharex):
"""
Safely share limits and tickers without resetting things.
"""
# Copy non-default limits and scales. Either this axes or the input
# axes could be a newly-created subplot while the other is a subplot
# with possibly-modified user settings we are careful to preserve.
for (ax1, ax2) in ((self, sharex), (sharex, self)):
if ax1.get_xscale() == 'linear' and ax2.get_xscale() != 'linear':
ax1.set_xscale(ax2.get_xscale()) # non-default scale
if ax1.get_autoscalex_on() and not ax2.get_autoscalex_on():
ax1.set_xlim(ax2.get_xlim()) # non-default limits
# Copy non-default locators and formatters
self._shared_x_axes.join(self, sharex) # share limit/scale changes
if sharex.xaxis.isDefault_majloc and not self.xaxis.isDefault_majloc:
sharex.xaxis.set_major_locator(self.xaxis.get_major_locator())
if sharex.xaxis.isDefault_minloc and not self.xaxis.isDefault_minloc:
sharex.xaxis.set_minor_locator(self.xaxis.get_minor_locator())
if sharex.xaxis.isDefault_majfmt and not self.xaxis.isDefault_majfmt:
sharex.xaxis.set_major_formatter(self.xaxis.get_major_formatter())
if sharex.xaxis.isDefault_minfmt and not self.xaxis.isDefault_minfmt:
sharex.xaxis.set_minor_formatter(self.xaxis.get_minor_formatter())
self.xaxis.major = sharex.xaxis.major
self.xaxis.minor = sharex.xaxis.minor
def _sharey_limits(self, sharey):
"""
Safely share limits and tickers without resetting things.
"""
# NOTE: See _sharex_limits for notes
for (ax1, ax2) in ((self, sharey), (sharey, self)):
if ax1.get_yscale() == 'linear' and ax2.get_yscale() != 'linear':
ax1.set_yscale(ax2.get_yscale())
if ax1.get_autoscaley_on() and not ax2.get_autoscaley_on():
ax1.set_ylim(ax2.get_ylim())
self._shared_y_axes.join(self, sharey) # share limit/scale changes
if sharey.yaxis.isDefault_majloc and not self.yaxis.isDefault_majloc:
sharey.yaxis.set_major_locator(self.yaxis.get_major_locator())
if sharey.yaxis.isDefault_minloc and not self.yaxis.isDefault_minloc:
sharey.yaxis.set_minor_locator(self.yaxis.get_minor_locator())
if sharey.yaxis.isDefault_majfmt and not self.yaxis.isDefault_majfmt:
sharey.yaxis.set_major_formatter(self.yaxis.get_major_formatter())
if sharey.yaxis.isDefault_minfmt and not self.yaxis.isDefault_minfmt:
sharey.yaxis.set_minor_formatter(self.yaxis.get_minor_formatter())
self.yaxis.major = sharey.yaxis.major
self.yaxis.minor = sharey.yaxis.minor
def _sharex_setup(self, sharex, *, labels=True, limits=True):
"""
Configure shared axes accounting. Input is the 'parent' axes from which this
one will draw its properties. Use keyword args to override settings.
"""
# Share panels across *different* subplots
super()._sharex_setup(sharex)
# Get the axis sharing level
level = (
3 if self._panel_sharex_group and self._is_panel_group_member(sharex)
else self.figure._sharex
)
if level not in range(5): # must be internal error
raise ValueError(f'Invalid sharing level sharex={level!r}.')
if sharex in (None, self) or not isinstance(sharex, CartesianAxes):
return
# Share future axis label changes. Implemented in _apply_axis_sharing().
# Matplotlib only uses these attributes in __init__() and cla() to share
# tickers -- all other builtin sharing features derives from _shared_x_axes
if level > 0 and labels:
self._sharex = sharex
# Share future axis tickers, limits, and scales
# NOTE: Only difference between levels 2 and 3 is level 3 hides tick
# labels. But this is done after the fact -- tickers are still shared.
if level > 1 and limits:
self._sharex_limits(sharex)
def _sharey_setup(self, sharey, *, labels=True, limits=True):
"""
Configure shared axes accounting for panels. The input is the
'parent' axes, from which this one will draw its properties.
"""
# NOTE: See _sharex_setup for notes
super()._sharey_setup(sharey)
level = (
3 if self._panel_sharey_group and self._is_panel_group_member(sharey)
else self.figure._sharey
)
if level not in range(5): # must be internal error
raise ValueError(f'Invalid sharing level sharey={level!r}.')
if sharey in (None, self) or not isinstance(sharey, CartesianAxes):
return
if level > 0 and labels:
self._sharey = sharey
if level > 1 and limits:
self._sharey_limits(sharey)
def _update_bounds(self, x, fixticks=False):
"""
Ensure there are no out-of-bounds labels. Mostly a brute-force version of
`~matplotlib.axis.Axis.set_smart_bounds` (which I couldn't get to work).
"""
# NOTE: Previously triggered this every time FixedFormatter was found
# on axis but 1) that seems heavy-handed + strange and 2) internal
# application of FixedFormatter by boxplot resulted in subsequent format()
# successfully calling this and messing up the ticks for some reason.
# So avoid using this when possible, and try to make behavior consistent
# by cacheing the locators before we use them for ticks.
axis = getattr(self, x + 'axis')
sides = ('bottom', 'top') if x == 'x' else ('left', 'right')
bounds = tuple(self.spines[side].get_bounds() is not None for side in sides)
if fixticks or any(bounds) or axis.get_scale() == 'cutoff':
# Major locator
lim = bounds[0] or bounds[1] or getattr(self, 'get_' + x + 'lim')()
locator = getattr(axis, '_major_locator_cached', None)
if locator is None:
locator = axis._major_locator_cached = axis.get_major_locator()
locator = constructor.Locator([x for x in locator() if lim[0] <= x <= lim[1]]) # noqa: E501
axis.set_major_locator(locator)
# Minor locator
locator = getattr(axis, '_minor_locator_cached', None)
if locator is None:
locator = axis._minor_locator_cached = axis.get_minor_locator()
locator = constructor.Locator([x for x in locator() if lim[0] <= x <= lim[1]]) # noqa: E501
axis.set_minor_locator(locator)
def _update_formatter(
self, x, formatter=None, *, formatter_kw=None,
tickrange=None, wraprange=None,
):
"""
Update the axis formatter. Passes `formatter` through `Formatter` with kwargs.
"""
# Test if this is date axes
# See: https://matplotlib.org/api/units_api.html
# And: https://matplotlib.org/api/dates_api.html
axis = getattr(self, x + 'axis')
date = isinstance(axis.converter, mdates.DateConverter)
# Major formatter
# NOTE: The default axis formatter accepts lots of keywords. So unlike
# everywhere else that uses constructor functions we also allow only
# formatter_kw input without formatter and use 'auto' as the default.
formatter_kw = formatter_kw or {}
formatter_kw = formatter_kw.copy()
if formatter is not None or tickrange is not None or wraprange is not None or formatter_kw: # noqa: E501
# Tick range
formatter = _not_none(formatter, 'auto')
if tickrange is not None or wraprange is not None:
if formatter != 'auto':
warnings._warn_proplot(
'The tickrange and autorange features require '
'proplot.AutoFormatter formatter. Overriding the input.'
)
if tickrange is not None:
formatter_kw.setdefault('tickrange', tickrange)
if wraprange is not None:
formatter_kw.setdefault('wraprange', wraprange)
# Set the formatter
# Note some formatters require 'locator' as keyword arg
if formatter in ('date', 'concise'):
locator = axis.get_major_locator()
formatter_kw.setdefault('locator', locator)
formatter = constructor.Formatter(formatter, date=date, **formatter_kw)
axis.set_major_formatter(formatter)
def _update_labels(self, x, *args, **kwargs):
"""
Apply axis labels to the relevant shared axis. If spanning labels are toggled
this keeps the labels synced for all subplots in the same row or column. Label
positions will be adjusted at draw-time with figure._align_axislabels.
"""
# NOTE: Critical to test whether arguments are None or else this
# will set isDefault_label to False every time format() is called.
# NOTE: This always updates the *current* labels and sharing is handled
# later so that labels set with set_xlabel() and set_ylabel() are shared too.
# See notes in _align_axis_labels() and _apply_axis_sharing().
kwargs = self._get_label_props(**kwargs)
if all(a is None for a in args) and all(v is None for v in kwargs.values()):
return # also returns if args and kwargs are empty
getattr(self, 'set_' + x + 'label')(*args, **kwargs)
def _update_locators(
self, x, locator=None, minorlocator=None, *,
tickminor=None, locator_kw=None, minorlocator_kw=None,
):
"""
Update the locators. Requires `Locator` instances.
"""
# Apply input major locator
axis = getattr(self, x + 'axis')
locator_kw = locator_kw or {}
if locator is not None:
locator = constructor.Locator(locator, **locator_kw)
axis.set_major_locator(locator)
if isinstance(locator, mticker.IndexLocator):
tickminor = _not_none(tickminor, False) # disable 'index' minor ticks
# Apply input or default minor locator
# NOTE: Parts of API (dualxy) rely on minor tick toggling preserving the
# isDefault_minloc setting. In future should override mpl minorticks_on()
# NOTE: Unlike matplotlib when "turning on" minor ticks we *always* use the
# scale default, thanks to scale classes refactoring with _ScaleBase.
isdefault = minorlocator is None
minorlocator_kw = minorlocator_kw or {}
if not isdefault:
minorlocator = constructor.Locator(minorlocator, **minorlocator_kw)
elif tickminor:
minorlocator = getattr(axis._scale, '_default_minor_locator', None)
if not minorlocator:
minorlocator = constructor.Locator('minor')
if minorlocator is not None:
axis.set_minor_locator(minorlocator)
axis.isDefault_minloc = isdefault
# Disable minor ticks
# NOTE: Generally if you *enable* minor ticks on a dual axis, want to
# allow FuncScale updates to change the minor tick locators. If you
# *disable* minor ticks, do not want FuncScale applications to turn them
# on. So we allow below to set isDefault_minloc to False.
if tickminor is not None and not tickminor:
axis.set_minor_locator(constructor.Locator('null'))
def _update_limits(self, x, *, min_=None, max_=None, lim=None, reverse=None):
"""
Update the axis limits.
"""
# Set limits for just one side or both at once
axis = getattr(self, x + 'axis')
if min_ is not None or max_ is not None:
if lim is not None:
warnings._warn_proplot(
f'Overriding {x}lim={lim!r} '
f'with {x}min={min_!r} and {x}max={max_!r}.'
)
lim = (min_, max_)
if lim is not None:
getattr(self, 'set_' + x + 'lim')(lim)
# Reverse direction
# NOTE: 3.1+ has axis.set_inverted(), below is from source code
if reverse is not None:
lo, hi = axis.get_view_interval()
if reverse:
lim = (max(lo, hi), min(lo, hi))
else:
lim = (min(lo, hi), max(lo, hi))
axis.set_view_interval(*lim, ignore=True)
def _update_rotation(self, x, *, rotation=None):
"""
Rotate the tick labels. Rotate 90 degrees by default for datetime *x* axes.
"""
# Apply rotation for datetime axes.
# NOTE: Rotation is done *before* horizontal/vertical alignment. Cannot
# change alignment with set_tick_params so we must apply to text objects.
# Note fig.autofmt_date calls subplots_adjust, so we cannot use it.
x = _not_none(x, 'x')
current = '_' + x + 'axis_current_rotation'
default = '_' + x + 'axis_isdefault_rotation'
axis = getattr(self, x + 'axis')
if rotation is not None:
setattr(self, default, False)
elif not getattr(self, default):
return # do not rotate
elif x == 'x' and isinstance(axis.converter, mdates.DateConverter):
rotation = rc['formatter.timerotation']
else:
rotation = 'horizontal'
# Apply tick label rotation if necessary
if rotation != getattr(self, current):
rotation = {'horizontal': 0, 'vertical': 90}.get(rotation, rotation)
kw = {'rotation': rotation}
if rotation not in (0, 90, -90):
kw['ha'] = 'right' if rotation > 0 else 'left'
for label in axis.get_ticklabels():
label.update(kw)
setattr(self, current, rotation)
def _update_spines(self, x, *, loc=None, bounds=None):
"""
Update the spine settings.
"""
# Iterate over spines associated with this axis
sides = ('bottom', 'top') if x == 'x' else ('left', 'right')
for side in sides:
# Change default spine location from 'both' to the first relevant
# side if the user passes 'bounds'.
spine = self.spines[side]
if loc is None and bounds is not None:
loc = _not_none(loc, sides[0])
# Eliminate sides
if loc == 'neither':
spine.set_visible(False)
elif loc == 'both':
spine.set_visible(True)
elif loc in sides: # make relevant spine visible
spine.set_visible(side == loc)
# Special spine location, usually 'zero', 'center', or tuple with
# (units, location) where 'units' can be 'axes', 'data', or 'outward'.
# Matplotlib internally represents these with 'bottom' and 'left'.
elif loc is not None:
if side == sides[1]:
spine.set_visible(False)
else:
spine.set_visible(True)
try:
spine.set_position(loc)
except ValueError:
raise ValueError(
f'Invalid {x} spine location {loc!r}. Options are: '
+ ', '.join(map(repr, (*sides, 'both', 'neither'))) + '.'
)
# Apply spine bounds
if bounds is not None:
spine.set_bounds(*bounds)
def _update_locs(self, x, *, tickloc=None, ticklabelloc=None, labelloc=None):
"""
Update the tick, tick label, and axis label locations.
"""
# The tick and tick label sides for Cartesian axes
kw = {}
sides = ('bottom', 'top') if x == 'x' else ('left', 'right')
sides_active = tuple(side for side in sides if self.spines[side].get_visible())
sides_dict = {None: None, 'both': sides, 'none': (), 'neither': ()}
# The tick side(s)
ticklocs = sides_dict.get(tickloc, (tickloc,))
if ticklocs is not None:
kw.update({side: side in ticklocs for side in sides})
kw.update({side: False for side in sides if side not in sides_active})
# The tick label side(s). Make sure these only appear where ticks are
ticklabellocs = sides_dict.get(ticklabelloc, (ticklabelloc,))
if ticklabellocs is not None:
kw.update({'label' + side: (side in ticklabellocs) for side in sides})
kw.update(
{
'label' + side: False for side in sides
if side not in sides_active
or ticklocs is not None and side not in ticklocs
}
)
# The axis label side(s)
if labelloc is None:
if ticklocs is not None:
options = tuple(_ for _ in sides if _ in ticklocs and _ in sides_active)
if len(options) == 1:
labelloc = options[0]
if labelloc is not None and labelloc not in sides:
raise ValueError(
f'Invalid label location {labelloc!r}. Options are '
+ ', '.join(map(repr, sides)) + '.'
)
# Apply the tick, tick label, and label locations
self.tick_params(axis=x, which='both', **kw)
if labelloc is not None:
getattr(self, x + 'axis').set_label_position(labelloc)
@docstring._obfuscate_signature
@_snippet_manager
def format(
self, *,
aspect=None,
xloc=None, yloc=None,
xspineloc=None, yspineloc=None,
xtickloc=None, ytickloc=None, fixticks=False,
xlabelloc=None, ylabelloc=None,
xticklabelloc=None, yticklabelloc=None,
xtickdir=None, ytickdir=None,
xgrid=None, ygrid=None,
xgridminor=None, ygridminor=None,
xtickminor=None, ytickminor=None,
xticklabeldir=None, yticklabeldir=None,
xticklabelpad=None, yticklabelpad=None,
xtickrange=None, ytickrange=None,
xwraprange=None, ywraprange=None,
xreverse=None, yreverse=None,
xlabel=None, ylabel=None,
xlabelpad=None, ylabelpad=None,
xlim=None, ylim=None,
xmin=None, ymin=None,
xmax=None, ymax=None,
xscale=None, yscale=None,
xrotation=None, yrotation=None,
xformatter=None, yformatter=None,
xticklabels=None, yticklabels=None,
xticks=None, yticks=None,
xlocator=None, ylocator=None,
xminorticks=None, yminorticks=None,
xminorlocator=None, yminorlocator=None,
xbounds=None, ybounds=None,
xmargin=None, ymargin=None,
xticklen=None, yticklen=None,
xlinewidth=None, ylinewidth=None,
xcolor=None, ycolor=None,
xgridcolor=None, ygridcolor=None,
xlabel_kw=None, ylabel_kw=None,
xscale_kw=None, yscale_kw=None,
xlocator_kw=None, ylocator_kw=None,
xformatter_kw=None, yformatter_kw=None,
xminorlocator_kw=None, yminorlocator_kw=None,
**kwargs
):
"""
Modify the x and y axis labels, tick locations, tick labels, axis scales,
spine settings, and more. Additional keyword arguments are passed to
`Axes.format` and `~proplot.config.Configurator.context`.
Parameters
----------
aspect : {'auto', 'equal'}, optional
The aspect ratio mode. See `~matplotlib.axes.Axes.set_aspect`
for details.
xlabel, ylabel : str, optional
The x and y axis labels. Applied with `~matplotlib.axes.Axes.set_xlabel`
and `~matplotlib.axes.Axes.set_ylabel`.
xlabelpad, ylabelpad : unit-spec, optional
The padding between the x and y axis bounding box and the
x and y axis labels. Default is :rc:`label.pad`.
%(units.pt)s
xlabel_kw, ylabel_kw : dict-like, optional
Additional settings used to update the axis labels with ``text.update()``.
xlim, ylim : 2-tuple of floats or None, optional
The x and y axis data limits. Applied with
`~matplotlib.axes.Axes.set_xlim` and
`~matplotlib.axes.Axes.set_ylim`.
xmin, ymin : float, optional
The x and y minimum data limits. Useful if you do not want
to set the maximum limits.
xmax, ymax : float, optional
The x and y maximum data limits. Useful if you do not want
to set the minimum limits.
xreverse, yreverse : bool, optional
Whether to "reverse" the x and y axis direction. Makes the x and
y axes ascend left-to-right and top-to-bottom, respectively.
xscale, yscale : scale-spec, optional
The x and y axis scales. Passed to the `~proplot.scale.Scale` constructor.
For example, ``xscale='log'`` applies logarithmic scaling, and
``xscale=('cutoff', 100, 2)`` applies a `~proplot.scale.CutoffScale`.
xscale_kw, yscale_kw : dict-like, optional
The x and y axis scale settings. Passed to `~proplot.scale.Scale`.
xspineloc, yspineloc \
: {'both', 'bottom', 'top', 'left', 'right', 'neither', 'center', 'zero'}, optional
The x and y axis spine locations.
xloc, yloc : optional
Aliases for `xspineloc`, `yspineloc`.
xtickloc, ytickloc \
: {'both', 'bottom', 'top', 'left', 'right', 'neither'}, optional
Which x and y axis spines should have major and minor tick marks.
xtickminor, ytickminor, tickminor : bool, optional
Whether to draw minor ticks on the x and y axes.
xtickdir, ytickdir, tickdir : {'out', 'in', 'inout'}
Direction that major and minor tick marks point for the x and y axis.
Use `tickdir` to control both.
xticklabeldir, yticklabeldir : {'in', 'out'}
Whether to place x and y axis tick label text inside
or outside the axes.
xticklabelpad, yticklabelpad : unit-spec, optional
The padding between the x and y axis ticks and
tick labels. Default is :rcraw:`tick.labelpad`.
%(units.pt)s
xgrid, ygrid, grid : bool, optional
Whether to draw major gridlines on the x and y axis.
Use `grid` to toggle both.
xgridminor, ygridminor, gridminor : bool, optional
Whether to draw minor gridlines for the x and y axis.
Use `gridminor` to toggle both.
xlocator, ylocator : locator-spec, optional
Used to determine the x and y axis tick mark positions. Passed
to the `~proplot.constructor.Locator` constructor. Can be float,
list of float, string, or `matplotlib.ticker.Locator` instance.
Use ``[]``, ``'null'``, or ``'none'`` for no ticks.
xticks, yticks : optional
Aliases for `xlocator`, `ylocator`.
xlocator_kw, ylocator_kw : dict-like, optional
Keyword arguments passed to the `matplotlib.ticker.Locator` class.
xminorlocator, yminorlocator : optional
As for `xlocator`, `ylocator`, but for the minor ticks.
xminorticks, yminorticks : optional
Aliases for `xminorlocator`, `yminorlocator`.
xminorlocator_kw, yminorlocator_kw
As for `xlocator_kw`, `ylocator_kw`, but for the minor locator.
xformatter, yformatter : formatter-spec, optional
Used to determine the x and y axis tick label string format.
Passed to the `~proplot.constructor.Formatter` constructor.
Can be string, list of strings, or `matplotlib.ticker.Formatter`
instance. Use ``[]``, ``'null'``, or ``'none'`` for no labels.
xticklabels, yticklabels : optional
Aliases for `xformatter`, `yformatter`.
xformatter_kw, yformatter_kw : dict-like, optional
Keyword arguments passed to the `matplotlib.ticker.Formatter` class.
xrotation, yrotation : float, optional
The rotation for x and y axis tick labels. Default is ``0``
for normal axes, :rc:`formatter.timerotation` for time x axes.
xbounds, ybounds : 2-tuple of float, optional
The x and y axis data bounds within which to draw the spines.
For example, the axis range ``(0, 4)`` with bounds ``(1, 4)``
will prevent the spines from meeting at the origin.
xtickrange, ytickrange : 2-tuple of float, optional
The x and y axis data ranges within which major tick marks
are labelled. For example, the tick range ``(-1, 1)`` with
axis range ``(-5, 5)`` and a tick interval of 1 will only
label the ticks marks at -1, 0, and 1. See
`~proplot.ticker.AutoFormatter` for details.
xwraprange, ywraprange : 2-tuple of float, optional
The x and y axis data ranges with which major tick mark values are
wrapped. For example, the wrap range ``(0, 3)`` causes the values 0
through 9 to be formatted as 0, 1, 2, 0, 1, 2, 0, 1, 2, 0. See
`~proplot.ticker.AutoFormatter` for details.
xmargin, ymargin, margin : float, optional
The default margin between plotted content and the x and y axis
spines. Value is proportional to the width, height of the axes.
Use this if you want whitespace between plotted content
and the spines, but don't want to explicitly set `xlim` or `ylim`.
xticklen, yticklen, ticklen : float or str, optional
Tick lengths for the x and y axis. Units are interpreted by
`~proplot.utils.units`, with "points" as the numeric unit.
Default is :rc:`ticklen`. Minor tick lengths are scaled according
to :rc:`tick.lenratio`. Use `ticklen` and `ticklenratio` to set
both at once.
xlinewidth, ylinewidth, linewidth : color-spec, optional
Line width for the x and y axis spines and major ticks.
Use `linewidth` to set both at once.
xcolor, ycolor, color : color-spec, optional
Color for the x and y axis spines, ticks, tick labels, and
axis labels. Use `color` to set both at once.
xgridcolor, ygridcolor, gridcolor : color-spec, optional
Color for the x and y axis major and minor gridlines.
Use `gridcolor` to set both at once.
fixticks : bool, optional
Whether to always transform the tick locators to a
`~matplotlib.ticker.FixedLocator` instance. Default is ``False``.
If your axis ticks are doing weird things (for example, ticks
drawn outside of the axis spine), try setting this to ``True``.
Other parameters
----------------
%(axes.format)s
%(figure.format)s
%(axes.rc)s
See also
--------
proplot.axes.Axes.format
proplot.config.Configurator.context
Note
----
If you plot something with a `datetime64 \
<https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`__,
`pandas.Timestamp`, `pandas.DatetimeIndex`, `datetime.date`, `datetime.time`,
or `datetime.datetime` array as the x or y axis coordinate, the axis ticks
and tick labels will be automatically formatted as dates.
"""
rc_kw, rc_mode, kwargs = _parse_format(**kwargs)
with rc.context(rc_kw, mode=rc_mode):
# No mutable default args
xlabel_kw = xlabel_kw or {}
ylabel_kw = ylabel_kw or {}
xscale_kw = xscale_kw or {}
yscale_kw = yscale_kw or {}
xlocator_kw = xlocator_kw or {}
ylocator_kw = ylocator_kw or {}
xformatter_kw = xformatter_kw or {}
yformatter_kw = yformatter_kw or {}
xminorlocator_kw = xminorlocator_kw or {}
yminorlocator_kw = yminorlocator_kw or {}
# Flexible keyword args, declare defaults
xtickdir = _not_none(xtickdir, rc.find('xtick.direction', context=True))
ytickdir = _not_none(ytickdir, rc.find('ytick.direction', context=True))
xformatter = _not_none(xformatter=xformatter, xticklabels=xticklabels)
yformatter = _not_none(yformatter=yformatter, yticklabels=yticklabels)
xlocator = _not_none(xlocator=xlocator, xticks=xticks)
ylocator = _not_none(ylocator=ylocator, yticks=yticks)
xtickminor = _not_none(xtickminor, rc.find('xtick.minor.visible', context=True)) # noqa: E501
ytickminor = _not_none(ytickminor, rc.find('ytick.minor.visible', context=True)) # noqa: E501
xminorlocator = _not_none(xminorlocator=xminorlocator, xminorticks=xminorticks) # noqa: E501
yminorlocator = _not_none(yminorlocator=yminorlocator, yminorticks=yminorticks) # noqa: E501
# Sensible defaults for spine, tick, tick label, and label locs
# NOTE: Allow tick labels to be present without ticks! User may
# want this sometimes! Same goes for spines!
xspineloc = _not_none(xloc=xloc, xspineloc=xspineloc)
yspineloc = _not_none(yloc=yloc, yspineloc=yspineloc)
xtickloc = _not_none(xtickloc, xspineloc, self._get_loc('x', 'xtick'))
ytickloc = _not_none(ytickloc, yspineloc, self._get_loc('y', 'ytick'))
xspineloc = _not_none(xspineloc, self._get_loc('x', 'axes.spines'))
yspineloc = _not_none(yspineloc, self._get_loc('y', 'axes.spines'))
if xtickloc != 'both':
xticklabelloc = _not_none(xticklabelloc, xtickloc)
xlabelloc = _not_none(xlabelloc, xticklabelloc)
if xlabelloc not in (None, 'bottom', 'top'): # e.g. "both"
xlabelloc = 'bottom'
if ytickloc != 'both':
yticklabelloc = _not_none(yticklabelloc, ytickloc)
ylabelloc = _not_none(ylabelloc, yticklabelloc)
if ylabelloc not in (None, 'left', 'right'):
ylabelloc = 'left'
# Loop over axes
for (
x, label,
labelpad, ticklabelpad,
color, gridcolor,
ticklen, linewidth,
margin, bounds,
tickloc, spineloc,
ticklabelloc, labelloc,
grid, gridminor,
tickminor, minorlocator,
min_, max_, lim,
reverse, scale,
locator, tickrange,
wraprange,
formatter, tickdir,
ticklabeldir, rotation,
label_kw, scale_kw,
locator_kw, minorlocator_kw,
formatter_kw
) in zip(
('x', 'y'), (xlabel, ylabel),
(xlabelpad, ylabelpad), (xticklabelpad, yticklabelpad),
(xcolor, ycolor), (xgridcolor, ygridcolor),
(xticklen, yticklen), (xlinewidth, ylinewidth),
(xmargin, ymargin), (xbounds, ybounds),
(xtickloc, ytickloc), (xspineloc, yspineloc),
(xticklabelloc, yticklabelloc), (xlabelloc, ylabelloc),
(xgrid, ygrid), (xgridminor, ygridminor),
(xtickminor, ytickminor), (xminorlocator, yminorlocator),
(xmin, ymin), (xmax, ymax), (xlim, ylim),
(xreverse, yreverse), (xscale, yscale),
(xlocator, ylocator), (xtickrange, ytickrange),
(xwraprange, ywraprange),
(xformatter, yformatter), (xtickdir, ytickdir),
(xticklabeldir, yticklabeldir), (xrotation, yrotation),
(xlabel_kw, ylabel_kw), (xscale_kw, yscale_kw),
(xlocator_kw, ylocator_kw),
(xminorlocator_kw, yminorlocator_kw),
(xformatter_kw, yformatter_kw),
):
# Axis scale
# WARNING: This relies on monkey patch of mscale.scale_factory
# that allows it to accept a custom scale class!
# WARNING: Changing axis scale also changes default locators
# and formatters, and restricts possible range of axis limits,
# so critical to do it first.
if scale is not None:
scale = constructor.Scale(scale, **scale_kw)
getattr(self, 'set_' + x + 'scale')(scale)
# Axis limits
self._update_limits(
x, min_=min_, max_=max_, lim=lim, reverse=reverse
)
if margin is not None:
self.margins(**{x: margin})
# Axis spine settings
# NOTE: This sets spine-specific color and linewidth settings. For
# non-specific settings _update_background is called in Axes.format()
self._update_spines(x, loc=spineloc, bounds=bounds)
self._update_background(x, edgecolor=color, linewidth=linewidth)
# Axis tick settings
self._update_locs(
x, tickloc=tickloc, ticklabelloc=ticklabelloc, labelloc=labelloc
)
self._update_rotation(
x, rotation=rotation
)
self._update_ticks(
x, grid=grid, gridminor=gridminor,
ticklen=ticklen, tickcolor=color, gridcolor=gridcolor,
tickdir=tickdir, ticklabeldir=ticklabeldir,
labelpad=ticklabelpad,
)
# Axis label settings
# NOTE: This must come after set_label_position, or ha or va overrides
# in label_kw are overwritten.
self._update_labels(
x, label, color=color, labelpad=labelpad, **label_kw
)
# Axis locator
if minorlocator is True or minorlocator is False: # must test identity
warnings._warn_proplot(
f'You passed {x}minorticks={minorlocator}, but this '
'argument is used to specify tick *locations*. If '
'you just want to *toggle* minor ticks on and off, '
f'please use {x}tickminor=True or {x}tickminor=False.'
)
minorlocator = None
self._update_locators(
x, locator, minorlocator, tickminor=tickminor,
locator_kw=locator_kw, minorlocator_kw=minorlocator_kw,
)
# Axis formatter
self._update_formatter(
x, formatter, formatter_kw=formatter_kw,
tickrange=tickrange, wraprange=wraprange,
)
# Ensure ticks are within axis bounds
self._update_bounds(x, fixticks=fixticks)
# Parent format method
if aspect is not None:
self.set_aspect(aspect)
super().format(rc_kw=rc_kw, rc_mode=rc_mode, **kwargs)
@_snippet_manager
def altx(self, **kwargs):
"""
%(axes.altx)s
"""
# WARNING: This repairs a matplotlib bug where twins fail to inherit the minor
# locator due to application of `AutoMinorLocator` when `ytick.minor.visible`
# is ``True`` in `Axes.cla` and due to the fact that passing ``sharey=self``
# to the alternate axes means that they share the same major and minor Tickers.
# >>> import matplotlib.pyplot as plt
# ... fig, ax = plt.subplots()
# ... ax.set_yscale('log')
# ... ax.twiny()
# WARNING: We add axes as children for tight layout algorithm convenience and
# to support eventual paradigm of arbitrarily many duplicates with spines
# arranged in an edge stack. However this means all artists drawn there take
# on zorder of their axes when drawn inside the "parent" (see Axes.draw()).
# To restore matplotlib behavior, which draws "child" artists on top simply
# because the axes was created after the "parent" one, use the inset_axes
# zorder of 4 and make the background transparent.
minorlocator = self.yaxis.get_minor_locator()
ax = self._make_twin_axes(
sharey=self, number=False, autoshare=False, projection='proplot_cartesian'
)
# Child defaults
ax._altx_parent = self
ax.yaxis.set_minor_locator(minorlocator)
ax.yaxis.isDefault_minloc = True
for side, spine in ax.spines.items():
spine.set_visible(side == 'top')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.yaxis.set_visible(False)
ax.patch.set_visible(False)
ax.grid(False)
ax.set_zorder(4)
ax.set_autoscaley_on(self.get_autoscaley_on())
# Parent defaults
self.spines['top'].set_visible(False)
self.spines['bottom'].set_visible(True)
self.xaxis.tick_bottom()
self.xaxis.set_label_position('bottom')
# Add axes
self.add_child_axes(ax) # to facilitate tight layout
self.figure._axstack.remove(ax) # or gets drawn twice!
ax.format(**self._parse_alt('x', kwargs))
return ax
@_snippet_manager
def alty(self, **kwargs):
"""
%(axes.alty)s
"""
# See altx() comments
minorlocator = self.xaxis.get_minor_locator()
ax = self._make_twin_axes(
sharex=self, number=False, autoshare=False, projection='proplot_cartesian'
)
# Child defaults
ax._alty_parent = self
ax.xaxis.set_minor_locator(minorlocator)
ax.xaxis.isDefault_minloc = True
for side, spine in ax.spines.items():
spine.set_visible(side == 'right')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
ax.xaxis.set_visible(False)
ax.patch.set_visible(False)
ax.grid(False)
ax.set_zorder(4)
ax.set_autoscalex_on(self.get_autoscalex_on())
# Parent defaults
self.spines['right'].set_visible(False)
self.spines['left'].set_visible(True)
self.yaxis.tick_left()
self.yaxis.set_label_position('left')
# Add axes
self.add_child_axes(ax) # to facilitate tight layout
self.figure._axstack.remove(ax) # or gets drawn twice!
ax.format(**self._parse_alt('y', kwargs))
return ax
@_snippet_manager
def dualx(self, funcscale, **kwargs):
"""
%(axes.dualx)s
"""
# NOTE: Matplotlib 3.1 has a 'secondary axis' feature. For the time
# being, our version is more robust (see FuncScale) and simpler, since
# we do not create an entirely separate _SecondaryAxis class.
ax = self.altx(**kwargs)
ax._dualx_funcscale = funcscale
ax._dualx_scale()
return ax
@_snippet_manager
def dualy(self, funcscale, **kwargs):
"""
%(axes.dualy)s
"""
# See dualx comments
ax = self.alty(**kwargs)
ax._dualy_funcscale = funcscale
ax._dualy_scale()
return ax
@_snippet_manager
def twinx(self):
"""
%(axes.twinx)s
"""
return self.alty()
@_snippet_manager
def twiny(self):
"""
%(axes.twiny)s
"""
return self.altx()
def draw(self, renderer=None, *args, **kwargs):
# Perform extra post-processing steps
# NOTE: In *principle* axis sharing application step goes here. But should
# already be complete because auto_layout() (called by figure pre-processor)
# has to run it before aligning labels. So this is harmless no-op.
self._dualx_scale()
self._dualy_scale()
self._apply_axis_sharing()
self._update_rotation('x')
if self._inset_parent is not None and self._inset_zoom:
self.indicate_inset_zoom()
super().draw(renderer, *args, **kwargs)
def get_tightbbox(self, renderer, *args, **kwargs):
# Perform extra post-processing steps
self._dualx_scale()
self._dualy_scale()
self._apply_axis_sharing()
self._update_rotation('x')
if self._inset_parent is not None and self._inset_zoom:
self.indicate_inset_zoom()
return super().get_tightbbox(renderer, *args, **kwargs)
|
__author__ = 'Justin'
import os
import geojson
import networkx as nx
import DefaultRoadSpeed as DRS
from geopy.distance import vincenty as latlondist
# DESCRIPTION:
# This script converts geojson road geometry files to a tractable networkx object (.gexf)
# Geojson map data such as node positions, edge connections, edge types, max speeds, and edge names are extracted
# The Geojson network is pruned to remove redundant network nodes.
#
# This reduced load does not include residential sections as part of the core network.
#
# INPUT:
# converted Open Street Map extract (.geojson format)
#
# OUTPUT:
# networkx object file (.gexf)
# Load geoJSON strings from file
cwd = os.getcwd()
filename = 'cstat_map.geojson'
string = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Geojson',filename))
with open(string, 'r') as myfile:
geoJSONstring=myfile.read().replace('\n', '')
# Load geoJSON strings into geojson objects
roaddata = geojson.loads(geoJSONstring)
# Extract nodes and edges
meterconv = 1609.344 # miles to meters
FullGraph = nx.DiGraph()
restrictedTypes = ['service','track','rail','footway','path','steps',
'raceway','unknown','pedestrian','construction','road']
for feature in roaddata.features:
# Check if feature is a road
if(feature.geometry['type'] == "LineString"):
# get road edge properties
edgename = feature.properties.get("name",'unknown')
edgetype = feature.properties.get("highway",'unknown')
oneway = feature.properties.get("oneway",'unknown')
speedstr = feature.properties.get("maxspeed",'unknown')
# obtain default speed (mph) if unknown
if(speedstr == 'unknown'):
speed = DRS.getdefaultspeed(speedstr)
else:
speednums = [int(s) for s in speedstr.split() if s.isdigit()]
speed = speednums[0]
# check for restricted road types or odd cases (road is restricted but important)
if((edgetype not in restrictedTypes) or (speed >= 35 )):
# check for oneway vs. twoway streets (twoway streets need two directed edges)
if(oneway != 'yes'):
#add directed edges and associated nodes for feature element
for latlon in feature.geometry.coordinates:
FullGraph.add_node(str(latlon),lon =latlon[0] ,lat =latlon[1],usage = 0)
for counter in range(len(feature.geometry.coordinates)-1,0,-1):
#find distance between node pair
distance = meterconv*latlondist(feature.geometry.coordinates[counter],feature.geometry.coordinates[counter-1]).miles
#add edge with edge properties
basetime = distance/meterconv/speed*3600.0
edgedict = {'weight':0,'type':edgetype,'distance':distance,'basetime':basetime,'name':edgename}
FullGraph.add_edge(str(feature.geometry.coordinates[counter]),str(feature.geometry.coordinates[counter-1]),edgedict)
#add directed edges and associated nodes for feature element (opposite direction of edges within if statement)
for latlon in feature.geometry.coordinates:
FullGraph.add_node(str(latlon),lon =latlon[0] ,lat =latlon[1],usage = 0)
for counter in range(0,len(feature.geometry.coordinates)-1):
#find distance between node pair
distance = meterconv*latlondist(feature.geometry.coordinates[counter],feature.geometry.coordinates[counter+1]).miles
#add edge with distance weight
basetime = distance/meterconv/speed*3600.0
edgedict = {'weight':0,'type':edgetype,'distance':distance,'basetime':basetime,'name':edgename}
FullGraph.add_edge(str(feature.geometry.coordinates[counter]),str(feature.geometry.coordinates[counter+1]),edgedict)
print('Number of FullGraph Edges',len(FullGraph.edges()))
# Remove Unnecessary nodes (exactly two neighbors or one neighbor and two edges)
Intersections = FullGraph
count = 0
print('Number of FullGraph Nodes',len(FullGraph.nodes()))
for node in Intersections.nodes():
neighbors = Intersections.neighbors(node)
if(len(neighbors)==2):
A = neighbors[0]
B = neighbors[1]
Aneighbors = Intersections.neighbors(A)
Bneighbors = Intersections.neighbors(B)
if((node in Aneighbors) and (node in Bneighbors)):
basetimeA_node = Intersections[A][node]['basetime']
basetimenode_A = Intersections[node][A]['basetime']
basetimeB_node = Intersections[B][node]['basetime']
basetimenode_B = Intersections[node][B]['basetime']
distanceA_node = Intersections[A][node]['distance']
distancenode_A = Intersections[node][A]['distance']
distanceB_node = Intersections[B][node]['distance']
distancenode_B = Intersections[node][B]['distance']
edgedataAB = Intersections.get_edge_data(A,node)
edgedataBA = Intersections.get_edge_data(B,node)
edgedataAB['basetime'] = basetimeA_node+basetimenode_B
edgedataBA['basetime'] = basetimeB_node+basetimenode_A
edgedataAB['distance'] = distanceA_node+distancenode_B
edgedataBA['distance'] = distanceB_node+distancenode_A
Intersections.add_edge(A,B,attr_dict=edgedataAB)
Intersections.add_edge(B,A,attr_dict=edgedataBA)
Intersections.remove_node(node)
else:
count += 1
elif(len(neighbors)== 1 and Intersections.degree(node)==2):
end = neighbors[0]
start = Intersections.in_edges(node)[0][0]
basetimestart = Intersections[start][node]['basetime']
basetimeend = Intersections[node][end]['basetime']
distancestart = Intersections[start][node]['distance']
distanceend = Intersections[node][end]['distance']
edgedata = Intersections.get_edge_data(start,node)
edgedata['basetime'] = basetimestart+basetimeend
edgedata['distance'] = distancestart+distanceend
Intersections.add_edge(start,end,edgedata)
Intersections.remove_node(node)
rescount = 0
for edge in Intersections.edges():
edgedict = Intersections.get_edge_data(edge[0],edge[1])
if(edgedict['type'] == 'residential'):
rescount += 1
print('Number of ReducedGraph Edges',len(Intersections.edges()))
print('Number of ReducedGraph Residential Edges',rescount)
# Save Network Graph
cwd = os.getcwd()
filename = 'OSMNetwork.gexf'
string = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Networks',filename))
nx.write_gexf(Intersections,string)
# Export Intersections Nodes (geoJSON format)
Features =[]
lons = nx.get_node_attributes(Intersections,'lon')
lats = nx.get_node_attributes(Intersections,'lat')
for point in Intersections.nodes():
Features.append(geojson.Feature(geometry=geojson.Point((lons[point], lats[point]))))
Collection = geojson.FeatureCollection(Features)
dump = geojson.dumps(Collection)
filename = 'OSMIntersections.txt'
string = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Display',filename))
text_file = open(string, "w")
text_file.write(dump)
text_file.close()
# Export Edges (geoJSON format)
Features = []
for edge in Intersections.edges():
pointa = (lons[edge[0]], lats[edge[0]])
pointb = (lons[edge[1]], lats[edge[1]])
Features.append(geojson.Feature(geometry=geojson.LineString([pointa,pointb])))
Collection = geojson.FeatureCollection(Features)
dump = geojson.dumps(Collection)
filename = 'OSMEdges.txt'
string = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Display',filename))
text_file = open(string, "w")
text_file.write(dump)
text_file.close() |
#!/usr/bin/env python
""" Do a couple tasks needed for Debosscher paper tables (20101118)
"""
import os, sys
import pprint
import MySQLdb
import cPickle
import gzip
import analysis_deboss_tcp_source_compare
import glob
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Algorithms'))
import simbad_id_lookup
### This is a list of science class associations which Josh discerned when looking at double classified sources:
josh_classifs = {\
5267:{'tutorid':148038L, 'tcp_class':'LBV', 'comment':'2nd tutorid=161329L'},
19893:{'tutorid':148103L, 'tcp_class':'GDOR', 'comment':'2nd tutorid=148841L'},
23428:{'tutorid':148137L, 'tcp_class':'LBV', 'comment':'2nd tutorid=161330L'},
26403:{'tutorid':148174L, 'tcp_class':'HAEBE','comment':'2nd tutorid=161337L'},
54413:{'tutorid':148375L, 'tcp_class':'HAEBE','comment':'2nd tutorid=161335L'},
86624:{'tutorid':148583L, 'tcp_class':'LBV', 'comment':'2nd tutorid=161328L'},
89956:{'tutorid':148614L, 'tcp_class':'LBV', 'comment':'2nd tutorid=161327L'},
89963:{'tutorid':148615L, 'tcp_class':'LBV', 'comment':'2nd tutorid=161326L'}}
# 20101202: dstarr finds that some of the new-sources (not found in project=122 tutor) have double classes
# and a specific dict is needed in this case to disambiguate the sources for these classes:
# NOTE: class=='' means we skip this source
#dstarr_newsrc_classes = {30326:'',#'MIRA',# simbad found 'mira', debossfile list.dat: 'RVTAU MIRA'
# 53461:''}#'BE'} #simbad finds Be*, debossfile list.dat: 'WR LBV'
dstarr_newsrc_classes = {26304:'CP', #HD 37151 #joey 20100211 email of 11 srcs sent to Josh
27400:'RRAB',
30326:'RVTAU',
33165:'WR', # HD 50896
53461:'WR', # HD 94910
54283:'PVSG', # HD 96548
75377:'PVSG'}#HD 136488
simbad_classes = { \
8:'Mira',
320:'RRLyr',
781:'Mira',
1067:'PulsV*bCep',
1162:'deltaCep',
1222:'RRLyr',
4541:'RRLyr',
6115:'RRLyr',
6325:'semi-regV*',
6539:'PulsV*delSct',
7149:'RRLyr',
7398:'RRLyr',
7588:'Be*',
8210:'RotV*alf2CVn',
9306:'Mira',
9361:'RRLyr',
11390:'PulsV*delSct',
12113:'PulsV*delSct',
12193:'Mira',
12235:'EB*Algol',
12387:'PulsV*bCep',
12817:'deltaCep',
13064:'semi-regV*',
14856:'RRLyr',
16126:'Mira',
16826:'Be*',
19762:'**',
19978:'deltaCep',
20045:'Mira',
20922:'Be*',
22024:'Be*',
22127:'Mira',
22256:'Mira',
23165:'Mira',
23602:'Rapid_Irreg_V*',
23733:'RotV*alf2CVn',
23972:'Be*',
24105:'deltaCep',
24126:'Mira',
24281:'deltaCep',
24471:'RRLyr',
24500:'deltaCep',
25281:'**',
26064:'Be*',
26594:'Be*',
28945:'deltaCep',
29441:'Mira',
29655:'**',
29771:'Be*',
30326:'Mira',
31137:'**',
31400:'RRLyr',
32292:'Be*',
32516:'deltaCep',
32675:'PulsV*WVir',
32759:'Be*',
34360:'**',
34743:'RRLyr',
34895:'deltaCep',
35037:'Be*',
35281:'RRLyr',
35487:'EB*Algol',
35795:'Be*',
35951:'Be*',
36088:'deltaCep',
36394:'Mira',
36500:'PulsV*bCep',
36547:'semi-regV*',
37207:'deltaCep',
37440:'EB*Algol',
37459:'Mira',
38241:'deltaCep',
38772:'Mira',
39144:'deltaCep',
39172:'Be*',
39666:'deltaCep',
40285:'EB*betLyr',
42257:'deltaCep',
42794:'EB*Algol',
42799:'PulsV*bCep',
43589:'Star',
43778:'WR*',
44093:'V*',
44213:'Be*',
45091:'V*',
47522:'Be*',
47886:'Mira',
49751:'Mira',
49934:'Be*',
50676:'**',
50697:'Mira',
51576:'Be*',
51894:'deltaCep',
52308:'WR*',
52742:'Be*',
53461:'Be*',
54066:'deltaCep',
54891:'deltaCep',
55726:'deltaCep',
55825:'RRLyr',
56327:'V*',
56350:'RRLyr',
56379:'Be*',
56409:'RRLyr',
56898:'PulsV*WVir',
57009:'Mira',
57498:'V*',
57625:'RRLyr',
57669:'Be*',
58002:'V*',
58520:'PulsV*delSct',
59093:'PulsV*delSct',
59173:'Be*',
59196:'Be*',
59232:'Be*',
59995:'RRLyr',
60189:'Be*',
61009:'Mira',
61029:'RRLyr',
61281:'Be*',
61286:'Mira',
61809:'RRLyr',
62956:'RotV*alf2CVn',
63054:'RRLyr',
64844:'PulsV*delSct',
65063:'RRLyr',
65445:'RRLyr',
65531:'PulsV*WVir',
65547:'RRLyr',
66100:'Mira',
66189:'deltaCep',
66657:'PulsV*bCep',
67359:'Mira',
67472:'Be*',
67653:'RRLyr',
69346:'Mira',
69759:'RRLyr',
70590:'Mira',
71352:'Be*',
71995:'V*',
72300:'Mira',
72721:'Cepheid',
74556:'RRLyr',
75141:'PulsV*bCep',
75170:'Mira',
76013:'Be*',
77663:'RRLyr',
77913:'deltaCep',
78207:'Be*',
78317:'Orion_V*',
78417:'RRLyr',
78539:'RRLyr',
78771:'deltaCep',
80569:'Be*',
83304:'Mira',
83323:'Be*',
83582:'Mira',
85079:'Be*',
85792:'Be*',
86414:'PulsV*bCep',
87314:'EB*Algol',
89164:'PulsV*bCep',
89290:'V*',
89596:'deltaCep',
90474:'Mira',
91389:'**',
92013:'deltaCep',
92491:'deltaCep',
92609:'Be*',
92862:'semi-regV*',
93476:'RRLyr',
93990:'deltaCep',
94289:'WR*',
94706:'Mira',
95032:'Mira',
95118:'deltaCep',
95929:'Be*',
96031:'Mira',
96458:'deltaCep',
96580:'Mira',
98212:'deltaCep',
98217:'deltaCep',
98265:'RRLyr',
98546:'PulsV*WVir',
98675:'Cepheid',
99303:'Be*',
100048:'Mira',
100214:'WR*',
102082:'Mira',
102088:'WR*',
103803:'V*',
104015:'Mira',
104986:'PulsV*WVir',
105026:'RRLyr',
105138:'Be*',
106649:'RRLyr',
106723:'Be*',
107004:'**',
107935:'RRLyr',
108975:'Be*',
110451:'Mira',
110697:'Mira',
110836:'WR*',
111278:'gammaDor',
111633:'WR*',
112784:'Mira',
113327:'Be*',
113561:'semi-regV*',
113652:'Mira',
114114:'Mira',
114995:'Orion_V*',
116958:'RRLyr',
117863:'semi-regV*'}
simbadclass_to_debclass = { \
'Be*':'BE',
'V*':'',
'Mira':'MIRA',
'deltaCep':'CLCEP',
'RRLyr':'RRAB',
'RotV*alf2CVn':'',
'PulsV*WVir':'PTCEP',
'PulsV*bCep':'BCEP',
'Rapid_Irreg_V*':'',
'**':'',
'EB*Algol':'EB',
'Star':'',
'WR*':'WR',
'semi-regV*':'SR',
'PulsV*delSct':'DSCUT',
'gammaDor':'GDOR',
'Cepheid':'CLCEP',
'Orion_V*':'TTAU',
'EB*betLyr':'EB'}
### This was retrieved from previous SIMBAD database queries:
simbad_hd_hip_dict = { \
'HD 2724':'HIP 2388',
'HD 2842':'HIP 2510',
'HD 4919':'HIP 3949',
'HD 21071':'HIP 15988',
'HD 24587':'HIP 18216',
'HD 26326':'HIP 19398',
'HD 28475':'HIP 20963',
'HD 269006':'HIP 23428',
'HD 34282':'HIP 24552',
'HD 34798':'HIP 24825',
'HD 34797':'HIP 24827',
'HD 35715':'HIP 25473',
'HD 37151':'HIP 26304',
'HD 46328':'HIP 31125',
'HD 50896':'HIP 33165',
'HD 52918':'HIP 33971',
'HD 59693':'HIP 36521',
'HD 61068':'HIP 37036',
'HD 63949':'HIP 38159',
'HD 64365':'HIP 38370',
'HD 64722':'HIP 38438',
'HD 69715':'HIP 40791',
'HD 77581':'HIP 44368',
'HD 78616':'HIP 44790',
'HD 81009':'HIP 45999',
'HD 88824':'HIP 50070',
'HD 90177':'HIP 50843',
'HD 92207':'HIP 52004',
'HD 92287':'HIP 52043',
'HD 96008':'HIP 54060',
'HD 96548':'HIP 54283',
'HD 102567':'HIP 57569',
'HD 107447':'HIP 60259',
'HD 107805':'HIP 60455',
'HD 108100':'HIP 60571',
'HD 112044':'HIP 62986',
'HD 112481':'HIP 63250',
'HD 113904':'HIP 64094',
'HD 123515':'HIP 69174',
'HD 126341':'HIP 70574',
'HD 136488':'HIP 75377',
'HD 138003':'HIP 75641',
'HD 142527':'HIP 78092',
'HD 147010':'HIP 80024',
'HD 147985':'HIP 80563',
'HD 156385':'HIP 84757',
'HD 160529':'HIP 86624',
'HD 163296':'HIP 87819',
'HD 164975':'HIP 88567',
'HD 165763':'HIP 88856',
'HD 170756':'HIP 90697',
'HD 177863':'HIP 93887',
'HD 179588':'HIP 94377',
'HD 181558':'HIP 95159',
'HD 206540':'HIP 107173',
'HD 207223':'HIP 107558',
'HD 210111':'HIP 109306',
'HD 214441':'HIP 111833',
'HD 223640':'HIP 117629',
'HD 27290':'HIP 19893',
'HD 160529':'HIP 86624',
'HD 269006':'HIP 23428',
'HD 35715':'HIP 25473'}
class Deb_Paper_Analysis(analysis_deboss_tcp_source_compare.Analysis_Deboss_TCP_Source_Compare):
"""
Since this inherits Analysis_Deboss_TCP_Source_Compare() class, I'm assuming that __init__() is used and
is used to load database connections.
"""
def main(self):
"""
"""
pkl_fpath = '/home/pteluser/scratch/debosscher_paper_tools.pkl.gz'
if not os.path.exists(pkl_fpath):
debos_tcp_dict = self.get_deboss_dotastro_source_lookup__modified()
fp = gzip.open(pkl_fpath,'wb')
cPickle.dump(debos_tcp_dict, fp, 1) # ,1) means a binary pkl is used.
fp.close()
else:
fp = gzip.open(pkl_fpath,'rb')
debos_tcp_dict = cPickle.load(fp)
fp.close()
### NOTE: it seems there are no sources with more than 1:1 srcid matchups (so don't need to do the following):
#for tcp_srcid, deb_srcid_list in debos_tcp_dict['dotastro_srcid_to_debos_srcname'].iteritems():
# if len(deb_srcid_list) > 1:
# print tcp_srcid, deb_srcid_list
### Retrieve the n_epochs for each srcid from TCP RDB
### NOTE: it seems that all of the sources in debos_tcp_dict['dotastro_srcid_to_debos_srcname']
#### have more than 0 epochs in TCP
debos_tcp_dict['srcid_nepochs'] = {}
for tcp_srcid, deb_srcid_list in debos_tcp_dict['dotastro_srcid_to_debos_srcname'].iteritems():
select_str = "select nobjs from srcid_lookup where src_id = %d" % (tcp_srcid + 100000000)
self.tcp_cursor.execute(select_str)
results = self.tcp_cursor.fetchall()
if len(results) == 0:
print "NONE!!!:", tcp_srcid
elif results[0][0] < 1:
print "NO epochs in TCP", tcp_srcid, results[0], deb_srcid_list, debos_tcp_dict['dotastro_srcid_to_attribfiles'][tcp_srcid]
else:
debos_tcp_dict['srcid_nepochs'][tcp_srcid] = results[0][0]
self.determine_deboss_classes_for_srcids(debos_tcp_dict)
#20110102disable# final_assocations_dict = self.determine_which_joey_downloaded_hip_is_not_in_tutor(debos_tcp_dict)
new_debos_tcp_dict = self.confirm_idsdebdat_in_tutor()
#self.count_sources(debos_tcp_dict)
self.count_sources(new_debos_tcp_dict)
if 0:
final_assocations_dict = self.determine_which_joey_downloaded_hip_is_not_in_tutor(debos_tcp_dict)
# # # TODO: need to add ogle accociations to final_assocations_dict
# -> ensure that classifications are correct.
pkl_fpath = '/home/pteluser/scratch/debosscher_paper_tools__assocdict.pkl'
if os.path.exists(pkl_fpath):
os.system('rm ' + pkl_fpath)
fp = open(pkl_fpath, 'w')
cPickle.dump(final_assocations_dict, fp)
fp.close()
import pdb; pdb.set_trace()
### TODO: count sources we have per deboss class, compare with pars['deb_src_counts']
### TODO: use pars['debosscher_class_lookup'] and .... ms.text table to determine how many classes debosscher orig had
### TODO: count how many sources for each survey
print
def determine_which_joey_downloaded_hip_is_not_in_tutor(self, debos_tcp_dict):
"""
It seems "HD xxxx" sources also may have HIPPARCOS numbers not listed in Debos datasets
"""
all_hipids_dict = {}
joey_hip_xmls_dirpath = '/home/pteluser/scratch/debos_newHIPP_data/xmls'
files = glob.glob("%s/*xml" % (joey_hip_xmls_dirpath))
joey_hip_ids = []
for fpath in files:
fname = fpath[fpath.rfind('/')+1:]
hip_str = fname[fname.find('HIP') + 3 :fname.rfind('.')]
joey_hip_ids.append(int(hip_str))
all_hipids_dict[int(hip_str)] = {'xml':fname}
# I actually want to do this in reverse: look for all HIP
tcpid_to_hipid = {}
for tcp_id, sublist in debos_tcp_dict['dotastro_srcid_to_debos_srcname'].iteritems():
hipid_str_list = sublist
if not debos_tcp_dict['tcpsrcid_surveys'].has_key(tcp_id):
hipid_str_list = []
for elem in sublist:
html_str = simbad_id_lookup.query_html(src_name=elem)
hip_ids = simbad_id_lookup.parse_html_for_ids(html_str, instr_identifier='HIP')
print "NOT in debos_tcp_dict['tcpsrcid_surveys'], adding:", hip_ids, 'tcp_id:', tcp_id, 'deb_id:', sublist
hipid_str_list.extend(hip_ids)
#import pdb; pdb.set_trace()
# TODO: want to store that this is a deboss source which is HIP, but no tcp_id
if len(hipid_str_list) == 0:
print '!!!', tcp_id, sublist
elif debos_tcp_dict['tcpsrcid_surveys'][tcp_id] != 'hip':
continue
assert(len(hipid_str_list) == 1)
if 'HD' in hipid_str_list[0]:
if simbad_hd_hip_dict.has_key(hipid_str_list[0]):
hipid_str_list = [simbad_hd_hip_dict[hipid_str_list[0]]]
else:
html_str = simbad_id_lookup.query_html(src_name=hipid_str_list[0])
hip_ids = simbad_id_lookup.parse_html_for_ids(html_str, instr_identifier='HIP')
print "HD with Simbad HIP: ", hip_ids, 'tcp_id:', tcp_id, 'deb_id:', sublist
hipid_str_list = hip_ids
### So now we can store hip_id:tcp_id
hipid_str = hipid_str_list[0][hipid_str_list[0].find('HIP ') + 4:]
tcpid_to_hipid[tcp_id] = int(hipid_str)
if not all_hipids_dict.has_key(int(hipid_str)):
#all_hipids_dict[int(hipid_str)] = ['NONE', tcp_id, sublist[0], debos_tcp_dict['tcpsrcid_classes'][tcp_id]]
all_hipids_dict[int(hipid_str)] = {'xml':'NO XML', 'tutorid':tcp_id, 'tutor_name':sublist[0], 'tcp_class':debos_tcp_dict['tcpsrcid_classes'][tcp_id]}
else:
#all_hipids_dict[int(hipid_str)].extend([tcp_id, sublist[0], debos_tcp_dict['tcpsrcid_classes'][tcp_id]])
if all_hipids_dict[int(hipid_str)].has_key('tutorid'):
#all_hipids_dict[int(hipid_str)].update({'extra':[tcp_id, sublist[0], debos_tcp_dict['tcpsrcid_classes'][tcp_id]], 'comment':"extra tutor srcid"})
all_hipids_dict[int(hipid_str)].update({'comment':"extra tutor srcid"})
else:
all_hipids_dict[int(hipid_str)].update({'tutorid':tcp_id, 'tutor_name':sublist[0], 'tcp_class':debos_tcp_dict['tcpsrcid_classes'][tcp_id]})
##### Some summary metrics:
#import pprint
#pprint.pprint(all_hipids_dict)
hids_sort = all_hipids_dict.keys()
hids_sort.sort()
#for hid in hids_sort:
# hlist = all_hipids_dict[hid]
# print hid, hlist
joey_hipids_not_in_tutor = []
for hip_id in joey_hip_ids:
if hip_id not in tcpid_to_hipid.values():
joey_hipids_not_in_tutor.append(hip_id)
print
print "num of hip_ids that joey found but which are not in TUTOR:%d" % (len(joey_hipids_not_in_tutor))
print "There are 845 HIP sources with TCP_ids, 1044 Joey HIP sources"
print "this means there should be 199 HIP sources which joey found which are not in TCP"
print "There are supposedly 272 joey sources which do not have TCP matches, using above compare algos"
print
##### Summary of final associations for each joey-xml file:
hids_sort = all_hipids_dict.keys()
hids_sort.sort()
final_hipids_dict = {}
(debos_classifs, position_dict) = self.get_deboss_listdat_classifs()
return_dict = {}
### Josh says remove one multiclass source
#remove_hipids = [104029, 25473, 34042, 36750, 39009, 57812, 58907]
remove_hipids = [104029, 25473, 34042, 36750, 39009, 57812, 58907, 26304, 33165, 34042, 36750, 39009, 53461, 57812, 58907, 75377, 104029]
for hipid in remove_hipids:
try:
hids_sort.remove(hipid)
except:
pass
for hid in hids_sort:
# # # # # # # # #
#if hid == 26403:
# import pdb; pdb.set_trace()
hdict = all_hipids_dict[hid]
if josh_classifs.has_key(hid):
hdict.update(josh_classifs[hid])
if hdict.has_key('tcp_class'):
if type(hdict['tcp_class']) == type([]):
#assert(len(hdict['tcp_class']) == 1)
if len(hdict['tcp_class']) == 2:
print "DOUBLE CLASS:", hdict
hdict['tcp_class'] = [hdict['tcp_class'][0]]
elif len(hdict['tcp_class']) == 0:
print "$$$ NO CLASS$$$:", hdict
else:
hdict['tcp_class'] = [hdict['tcp_class']]
else:
if not hid in simbad_classes.keys():
a_str = simbad_id_lookup.query_votable(src_name = "HIP %d" % (hid))
sci_class = simbad_id_lookup.parse_class(a_str)
else:
sci_class = simbad_classes[hid]
deb_sci_class = simbadclass_to_debclass[sci_class]
### ok, now check whether this corresponds to debos_classifs
test_srcname = "HIP %d" % (hid)
if dstarr_newsrc_classes.has_key(hid):
deb_listdat_class = dstarr_newsrc_classes[hid]
if deb_listdat_class == '':
continue
if position_dict.has_key(test_srcname):
radec_dict = position_dict[test_srcname]
else:
html_str = simbad_id_lookup.query_html(src_name=test_srcname)
hd_ids = simbad_id_lookup.parse_html_for_ids(html_str, instr_identifier='HD')
if debos_classifs.has_key(hd_ids[0]):
radec_dict = position_dict[hd_ids[0]]
else:
print hid, "4_NO_MATCH", test_srcname, deb_src_class, hd_ids
elif debos_classifs.has_key(test_srcname):
#print hid, '1_deb:', debos_classifs[test_srcname], 'mine:', deb_sci_class
deb_listdat_class = debos_classifs[test_srcname]
radec_dict = position_dict[test_srcname]
elif debos_classifs.has_key(hdict.get('tutor_name','xxx')):
#print hid, '2_deb:', debos_classifs[hdict['tutor_name']], 'mine:', deb_sci_class
deb_listdat_class = debos_classifs[hdict['tutor_name']]
radec_dict = position_dict[hdict['tutor_name']]
else:
html_str = simbad_id_lookup.query_html(src_name=test_srcname)
hd_ids = simbad_id_lookup.parse_html_for_ids(html_str, instr_identifier='HD')
if debos_classifs.has_key(hd_ids[0]):
#print hid, '3_deb:', debos_classifs[hd_ids[0]], 'mine:', deb_sci_class
deb_listdat_class = debos_classifs[hd_ids[0]]
radec_dict = position_dict[hd_ids[0]]
else:
print hid, "4_NO_MATCH", test_srcname, deb_src_class, hd_ids
if deb_listdat_class != deb_sci_class:
#print '!!!mismatch classes: ', hid, 'listdat:', deb_listdat_class, 'mine:', deb_sci_class
deb_sci_class = deb_listdat_class
hdict['tcp_class'] = deb_sci_class
hdict['comment'] = "%s class from SIMBAD=%s" % (hdict.get('comment',''), sci_class)
hdict.update(radec_dict)
#print hid, sci_class
print "HIP=%d\txml='%s'\tclass='%s'\tTutorID=%s\tcomment='%s'\tra=%4.4f\tdec=%4.4f" % ( \
hid,
hdict['xml'],
hdict['tcp_class'],
hdict.get('tutorid',"''"),
hdict.get('comment',''),
hdict.get('ra',0.0),
hdict.get('dec',0.0))
return_dict[hid] = hdict
return return_dict
#unmatched_deb_ids = []
#for sublist in debos_tcp_dict['dotastro_srcid_to_debos_srcname'].values():
# unmatched_deb_ids.extend(sublist)
"""
debid_joeyid_matches = {}
for jhip_id in joey_hip_ids:
# KLUDGEY due to inconsistant hip file names/strings:
for debid in unmatched_deb_ids:
if str(jhip_id) in debid:
debid_joeyid_matches[]
"""
def get_deboss_listdat_classifs(self):
""" Joey used this file to get all classifications for all 1044 HIP sources
"""
deb_all_class_fpath = '/home/pteluser/analysis/debos_newHIPP_data/list.dat'
# # #TODO: need to make sure these above classes correlate
lines = open(deb_all_class_fpath).readlines()
classif_dict = {}
position_dict = {}
for line in lines:
source_name = line[:22].strip()
class_name = line[50:63].strip()
ra_str = line[23:36]
ra_tup = ra_str.split()
ra = 15 * (float(ra_tup[0]) + float(ra_tup[1])/60. + float(ra_tup[2])/3600.)
dec_str = line[37:50]
dec_tup = dec_str.split()
dec_sign = 1.
if float(dec_tup[0]) < 0:
dec_sign = -1.
dec = dec_sign * (abs(float(dec_tup[0])) + float(dec_tup[1])/60. + float(dec_tup[2])/3600.)
#print 'RA:', ra_str, ra, 'Dec:', dec_str, dec
if (('HIP' in source_name) or ('HD' in source_name)):
classif_dict[source_name] = class_name
position_dict[source_name] = {'ra':ra, 'dec':dec}
#print source_name, class_name
return (classif_dict, position_dict)
def fill_debclass_dict(self, data_fpath):
"""
"""
debclass_dict = {}
lines = open(data_fpath).readlines()
for line in lines:
vals = line.split()
fname = vals[0]
class_list = vals[1:]
assert(not debclass_dict.has_key(fname))
debclass_dict[fname] = class_list
return debclass_dict
def get_somefeats_for_tutor_sourceid(self, src_id=None):
""" parse existing xmls with extracted features, to determine the source period
"""
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + 'Software/feature_extract/Code/extractors'))
import mlens3
d = mlens3.EventData("/home/pteluser/scratch/vosource_xml_writedir/%d.xml" % \
(100000000 + src_id))
filt_list = d.feat_dict.keys()
filt_list.remove('multiband')
filt = filt_list[0]
f1_str = d.feat_dict[filt]['freq1_harmonics_freq_0']['val']['_text']
f1_flt = float(f1_str)
return {'f1':f1_flt,
'nepoch':len(d.data['ts'][filt][0]['val'])}
def count_sources(self, debos_tcp_dict):
""" Count the number of sources we use for each sci-class and for each survey
"""
class_count = {}
class_nepoch_lists = {}
class_period_lists = {}
surveys_for_class = {}
nepoch_list_per_survey = {'ogle':[], 'hip':[]}
i = 0
for src_id, class_list in debos_tcp_dict['tcpsrcid_classes'].iteritems():
print "count_sources(%d/%d)" % (i, len(debos_tcp_dict['tcpsrcid_classes']))
i+= 1
#pre20110101# for a_class in class_list:
#### So, post 20110101: we assume the first class is the chosen class
for a_class in class_list[:1]:
if not class_count.has_key(a_class):
class_count[a_class] = 0
class_nepoch_lists[a_class] = []
class_period_lists[a_class] = []
surveys_for_class[a_class] = {'ogle':0, 'hip':0}
class_count[a_class] += 1
surveys_for_class[a_class][debos_tcp_dict['tcpsrcid_surveys'][src_id]] += 1 # will count each class for double-clasified sources
#class_nepoch_lists[a_class].append(debos_tcp_dict['srcid_nepochs'][src_id])
feat_dict = self.get_somefeats_for_tutor_sourceid(src_id=src_id)
freq1 = feat_dict['f1']
nepochs = feat_dict['nepoch']
nepoch_list_per_survey[debos_tcp_dict['tcpsrcid_surveys'][src_id]].append(nepochs)
class_period_lists[a_class].append(freq1)
class_nepoch_lists[a_class].append(nepochs)
#try:
# class_period_lists[a_class].append(float(debos_tcp_dict['dotastro_srcid_to_debos_attribs'][src_id]['f1']))
#except:
# freq1 = self.get_somefeats_for_tutor_sourceid(src_id=src_id)
# class_period_lists[a_class].append(freq1)
# print "No srcid=% in debos_tcp_dict['dotastro_srcid_to_debos_attribs']{}" % (src_id)
survey_count = {'hip':0, 'ogle':0}
survey_nepoch_lists = {'hip':[], 'ogle':[]}
for src_id, survey in debos_tcp_dict['tcpsrcid_surveys'].iteritems():
survey_count[survey] += 1
survey_nepoch_lists[survey].append(debos_tcp_dict['srcid_nepochs'][src_id])
#class_keys = class_count.keys()
#class_keys.sort()
#class_keys = ['BCEP', 'CLCEP', 'CP', 'DMCEP', 'DSCUT', 'EA', 'EB', 'ELL', 'EW', 'GDOR', 'HAEBE', 'LBOO', 'LBV', 'MIRA', 'PTCEP', 'PVSG', 'RRAB', 'RRC', 'RRD', 'RVTAU', 'SPB', 'SR', 'SXPHE', 'TTAU', 'WR', 'XB']
class_keys = [ \
'MIRA',
'SR',
'RVTAU',
'CLCEP',
'PTCEP',
'DMCEP',
'RRAB',
'RRC',
'RRD',
'DSCUT',
'LBOO',
'BCEP',
'SPB',
'GDOR',
'BE',
'PVSG',
'CP',
'WR',
'TTAU',
'HAEBE',
'LBV', # s doradus
'ELL',
'EA', # Algol, beta persei
'EB', # beta lyrae
'EW', # w ursae major
]
print "class\t NLC\t %NLCdeb\tsurvey\t<Npts>\t min(f1)\t<f1>\t max(f1)"
for deb_class in class_keys:
count = class_count[deb_class]
survey_n_total = surveys_for_class[deb_class]['ogle'] + surveys_for_class[deb_class]['hip']
if ((surveys_for_class[deb_class]['ogle'] > 0) and
(surveys_for_class[deb_class]['hip'] > 0)):
survey_str = "%0.1fOGLE, %0.1HIP" % (100. * surveys_for_class[deb_class]['ogle'] / float(survey_n_total),
100. * surveys_for_class[deb_class]['hip'] / float(survey_n_total))
elif (surveys_for_class[deb_class]['ogle'] > 0):
survey_str = "OGLE"
elif (surveys_for_class[deb_class]['hip'] > 0):
survey_str = "HIPPARCOS"
else:
survey_str = "XXXXXXXXXXXXXXXXXXXX"
#print "%s\t %d\t %0.1f\t\t%d\t %0.4f\t\t%0.2f\t %0.4f" % ( \
print "%s\t &%d\t &%0.1f\t\t&%s&%d\t &%0.4f\t&%0.2f\t &%0.4f\\\\" % ( \
deb_class,
count,
count / float(self.pars['deb_src_counts'][deb_class]) * 100.,
survey_str,
int(sum(class_nepoch_lists[deb_class])/ \
float(len(class_nepoch_lists[deb_class]))),
min(class_period_lists[deb_class]),
sum(class_period_lists[deb_class])/ \
float(len(class_period_lists[deb_class])),
max(class_period_lists[deb_class]))
print
print "survey\t NLC\t %NLCdeb <Npts>"
survey_names = {'hip':'HIPPARCOS', 'ogle':'OGLE'}
nepochs_debos = {'hip':1044, 'ogle':527}
for survey in ['hip', 'ogle']:
count = survey_count[survey]
#nepoch_avg = sum(survey_nepoch_lists[survey]) / float(len(survey_nepoch_lists[survey]))
nepoch_avg = sum(nepoch_list_per_survey[survey]) / float(len(nepoch_list_per_survey[survey]))
#print "%s\t %d\t %0.1f\t %d" % ( \
print "%s\t &%d\t &%0.1f\t &%d\\\\" % ( \
survey_names[survey],
count,
count / float(nepochs_debos[survey]) * 100.,
int(nepoch_avg))
import pdb; pdb.set_trace()
def confirm_idsdebdat_in_tutor(self):
""" Check that the sources in 20110101 Joey's IDs_deb.dat are also in TUTOR RDB.
"""
out_dict = {'tcpsrcid_classes':{},
'tcpsrcid_surveys':{},
'srcid_nepochs':{}}
lines = open('/home/pteluser/scratch/IDs_deb.dat').readlines()
for raw_line in lines:
line = raw_line.strip()
if len(line) == 0:
continue
f_id = int(line)
if f_id < 140000:
survey = 'hip'
else:
survey = 'ogle'
if survey == 'hip':
#select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name like "%' + str(f_id) + '%"'
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name="HIP%d"' % (f_id)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
print "matches > 1:", f_id, survey
for res in results:
print ' ', res
elif len(results) == 1:
(source_id, source_name, class_short_name) = results[0]
else:
#print "matches ==0:", f_id, survey
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name="HD%d"' % (f_id)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
print "matches > 1:", f_id, survey
for res in results:
print ' ', res
elif len(results) == 1:
(source_id, source_name, class_short_name) = results[0]
else:
f_hdname = 'HD ' + str(f_id)
f_hipname = 'HIP ' + str(f_id)
for hd_name, hip_name in simbad_hd_hip_dict.iteritems():
if f_hdname == hd_name:
# then we query using the hip_name since query using hd_name nowork
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name="%s"' % (hip_name.replace(' ',''))
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
import pdb; pdb.set_trace()
elif len(results) == 0:
import pdb; pdb.set_trace()
else:
(source_id, source_name, class_short_name) = results[0]
print results
import pdb; pdb.set_trace()
print
break
elif f_hipname == hip_name:
# then we query using the hd_name
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name="%s"' % (hd_name.replace(' ',''))
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
import pdb; pdb.set_trace()
elif len(results) == 0:
import pdb; pdb.set_trace()
else:
(source_id, source_name, class_short_name) = results[0]
break
elif survey == 'ogle':
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=122 and source_id=%d' % (f_id)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
print "matches > 1:(in proj=122)", f_id, survey
for res in results:
print ' ', res
continue
elif len(results) == 0:
print "matches ==0:(in proj=122)", f_id, survey
continue
(source_id_122, source_name_122, class_short_name_122) = results[0]
select_str = 'select source_id, source_name, class_short_name from sources JOIN classes using (class_id) where project_id=123 and source_name="%s"' % (source_name_122)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) > 1:
print "source_name matches > 1:(in proj=123)", f_id, survey
for res in results:
print ' ', res
continue
elif len(results) == 0:
print "source_name matches ==0:(in proj=123)", f_id, survey
continue
(source_id, source_name, class_short_name) = results[0]
if class_short_name != class_short_name_122:
print "id_123=%d source_name=%s class_123=%s class_122=%s" % \
(source_id, source_name, class_short_name, class_short_name_122)
# KLUDGEY:
class_short_name_upper = class_short_name.upper()
matched_class = False
for deb_cname, tut_cname in self.pars['debosscher_class_lookup'].iteritems():
if class_short_name == tut_cname:
deb_class = deb_cname
matched_class = True
break
elif class_short_name_upper == tut_cname:
deb_class = deb_cname
matched_class = True
break
if matched_class == False:
print tcp_srcid, fname_list, class_short_name
import pdb; pdb.set_trace()
print
#print '#', f_id, source_id, source_name, class_short_name
print f_id, '\t', deb_cname, '\t', source_name
out_dict['tcpsrcid_classes'][source_id] = [deb_cname]
out_dict['tcpsrcid_surveys'][source_id] = survey
out_dict['srcid_nepochs'][source_id] = 1
return out_dict
def determine_deboss_classes_for_srcids(self, debos_tcp_dict):
""" For each tcp_srcid, determine the 1 or more debosscher classes taken from the Deboss datafiles.
"""
debclass_dict_ogle = self.fill_debclass_dict(self.pars['debclass_ogle_fpath'])
ogle_fnames = debclass_dict_ogle.keys()
debclass_dict_hip = self.fill_debclass_dict(self.pars['debclass_hip_fpath'])
hip_fnames = debclass_dict_hip.keys()
tcpsrcid_surveys = {}
tcpsrcid_classes = {}
for tcp_srcid, fname_list in debos_tcp_dict['dotastro_srcid_to_attribfiles'].iteritems():
select_str = "select class_short_name, source_name from sources JOIN classes using (class_id) where project_id=123 and source_id=%d" % (tcp_srcid)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) == 0:
print "NONE!!!:", tcp_srcid, "Data file not in ts-OGLE / ts-HIPPARCOS files:", fname, tcp_srcid
continue
class_short_name = results[0][0]
source_name = results[0][1]
if 'hip' in source_name.lower():
tcpsrcid_surveys[tcp_srcid] = 'hip'
elif 'ogle' in source_name.lower():
tcpsrcid_surveys[tcp_srcid] = 'ogle'
else:
hdname = source_name.replace('HD','HD ')
if hdname in simbad_hd_hip_dict.keys():
tcpsrcid_surveys[tcp_srcid] = 'hip'
else:
print '!!! survey not known!', tcp_srcid, source_name
# KLUDGEY:
class_short_name_upper = class_short_name.upper()
matched_class = False
for deb_cname, tut_cname in self.pars['debosscher_class_lookup'].iteritems():
if class_short_name == tut_cname:
tcpsrcid_classes[tcp_srcid] = [deb_cname]
matched_class = True
break
elif class_short_name_upper == tut_cname:
tcpsrcid_classes[tcp_srcid] = [deb_cname]
matched_class = True
break
if matched_class == False:
print tcp_srcid, fname_list, class_short_name
import pdb; pdb.set_trace()
print
debos_tcp_dict['tcpsrcid_classes'] = tcpsrcid_classes
debos_tcp_dict['tcpsrcid_surveys'] = tcpsrcid_surveys
def determine_deboss_classes_for_srcids__old(self, debos_tcp_dict):
""" For each tcp_srcid, determine the 1 or more debosscher classes taken from the Deboss datafiles.
"""
debclass_dict = {} # 'LC_fname':[<class1>, <class2>]
debclass_dict_ogle = self.fill_debclass_dict(self.pars['debclass_ogle_fpath'])
debclass_dict.update(debclass_dict_ogle)
ogle_fnames = debclass_dict_ogle.keys()
debclass_dict_hip = self.fill_debclass_dict(self.pars['debclass_hip_fpath'])
debclass_dict.update(debclass_dict_hip)
hip_fnames = debclass_dict_hip.keys()
tcpsrcid_surveys = {}
tcpsrcid_classes = {}
for tcp_srcid, fname_list in debos_tcp_dict['dotastro_srcid_to_attribfiles'].iteritems():
#if tcp_srcid == 163844:
# print 'yo' #164149:
tcpsrcid_classes[tcp_srcid] = []
sorted_fname_list = []
for fname in fname_list:
if 'hip' in fname:
sorted_fname_list.insert(0,fname)
else:
sorted_fname_list.append(fname)
for fname in sorted_fname_list:
if not debclass_dict.has_key(fname):
# NOTE: I checked a selection of these and indeed there dont seem to be these fnames or similar numbers in the ts-OGLE / ts-HIPPARCOS files:
#print "Data file not in ts-OGLE / ts-HIPPARCOS files:", fname, tcp_srcid
select_str = "select class_short_name, source_name from sources JOIN classes using (class_id) where project_id=123 and source_id=%d" % (tcp_srcid)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) == 0:
print "NONE!!!:", tcp_srcid, "Data file not in ts-OGLE / ts-HIPPARCOS files:", fname, tcp_srcid
continue
class_short_name = results[0][0]
source_name = results[0][1]
if not tcpsrcid_surveys.has_key(tcp_srcid):
if 'hip' in source_name.lower():
tcpsrcid_surveys[tcp_srcid] = 'hip'
else:
tcpsrcid_surveys[tcp_srcid] = 'ogle' # if below doesnt match, then we use this
for a_fname in hip_fnames:
if fname in a_fname:
print "! ! ! ! tcp_srcid=%d: source_name=%s not HIPlike, but fname=%s is in ts-HIPPARCOS" % (tcp_src_id, source_name, fname)
tcpsrcid_surveys[tcp_srcid] = 'hip'
break
# KLUDGEY:
for deb_cname, tut_cname in self.pars['debosscher_class_lookup'].iteritems():
if class_short_name == tut_cname:
tcpsrcid_classes[tcp_srcid].append(deb_cname)
break
continue
if fname in ogle_fnames:
assert(tcpsrcid_surveys.get(tcp_srcid, 'ogle') == 'ogle')
tcpsrcid_surveys[tcp_srcid] = 'ogle'
elif fname in hip_fnames:
assert(tcpsrcid_surveys.get(tcp_srcid, 'hip') == 'hip')
tcpsrcid_surveys[tcp_srcid] = 'hip'
for a_class in debclass_dict[fname]:
assert(not a_class in tcpsrcid_classes[tcp_srcid])
tcpsrcid_classes[tcp_srcid].extend(debclass_dict[fname])
### For each srcid:[filename], get the classes
### -> ensure that for a srcid, there are not more than the classes what are defined for a single data file
#for tcp_srcid, deb_class_list in tcpsrcid_classes.iteritems():
# if len(deb_class_list) <= 1:
# continue
# print tcp_srcid,
# for deb_class in deb_class_list:
# print deb_class,
# print ';',
# for deb_class in deb_class_list:
# print self.pars['debosscher_class_lookup'][deb_class],
# print '; Nepochs:', debos_tcp_dict['srcid_nepochs'][tcp_srcid], tcpsrcid_surveys[tcp_srcid]
debos_tcp_dict['tcpsrcid_classes'] = tcpsrcid_classes
debos_tcp_dict['tcpsrcid_surveys'] = tcpsrcid_surveys
def get_deboss_dotastro_source_lookup__modified(self):
""" Parsing Debosscher data files and querying DotAstro.org,
determine which sources match and get related lookup info for each source.
"""
##### This gets a LC "filename" and its associated features/attributes
deboss_attrib_dict = {}
lines = open(self.pars['deboss_src_attribs_fpath']).readlines()
for line in lines:
line_list = line.split()
fname = line_list[0]
deboss_attrib_dict[fname] = {}
for i, attrib in enumerate(self.pars['deboss_src_attribs_list'][1:]):
deboss_attrib_dict[fname][attrib] = line_list[i+1]
##### This gets a source_name and all of the >=1 related lightcurve filenames:
# - NOTE: right now we just associate a single lightcurve per source_name since I assume . . .
deboss_srcname_fname_lookup = {}
lines = open(self.pars['deboss_src_datafile_lookup_fpath']).readlines()
for line in lines:
source_name = line[:23].strip()
deboss_srcname_fname_lookup[source_name] = []
filename_list = line[63:].split()
for file_name in filename_list:
file_name_sripped = file_name.strip()
deboss_srcname_fname_lookup[source_name].append(file_name_sripped)
debos_srcname_to_dotastro_srcid = {}
dotastro_srcid_to_attribfiles = {}
dotastro_srcid_to_debos_srcname = {}
# # #import pdb; pdb.set_trace()
for source_name, fname_list in deboss_srcname_fname_lookup.iteritems():
#if 'HIP 54413' in source_name:
# print 'yo'
debos_srcname_to_dotastro_srcid[source_name] = []
fname_match_found = False
for filename in fname_list:
select_str = 'SELECT source_id, source_name, project_id, class_id, pclass_id FROM sources WHERE project_id=123 and source_name = "%s"' % (source_name)
print select_str
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) != 0:
if results[0][0] not in debos_srcname_to_dotastro_srcid[source_name]:
debos_srcname_to_dotastro_srcid[source_name].append(results[0][0])
if not dotastro_srcid_to_attribfiles.has_key(results[0][0]):
dotastro_srcid_to_attribfiles[results[0][0]] = []
dotastro_srcid_to_attribfiles[results[0][0]].append(filename)
if not dotastro_srcid_to_debos_srcname.has_key(results[0][0]):
dotastro_srcid_to_debos_srcname[results[0][0]] = []
if not source_name in dotastro_srcid_to_debos_srcname[results[0][0]]:
dotastro_srcid_to_debos_srcname[results[0][0]].append(source_name)
continue
select_str = 'SELECT source_id, source_name, project_id, class_id, pclass_id FROM sources WHERE project_id=123 and source_name like "%s"' % (source_name.replace(' ','\_'))
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) != 0:
if results[0][0] not in debos_srcname_to_dotastro_srcid[source_name]:
debos_srcname_to_dotastro_srcid[source_name].append(results[0][0])
if not dotastro_srcid_to_attribfiles.has_key(results[0][0]):
dotastro_srcid_to_attribfiles[results[0][0]] = []
dotastro_srcid_to_attribfiles[results[0][0]].append(filename)
if not dotastro_srcid_to_debos_srcname.has_key(results[0][0]):
dotastro_srcid_to_debos_srcname[results[0][0]] = []
if not source_name in dotastro_srcid_to_debos_srcname[results[0][0]]:
dotastro_srcid_to_debos_srcname[results[0][0]].append(source_name)
continue
select_str = 'SELECT source_id, source_name, project_id, class_id, pclass_id FROM sources WHERE project_id=123 and source_name = "%s"' % (filename)
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) != 0:
if results[0][0] not in debos_srcname_to_dotastro_srcid[source_name]:
debos_srcname_to_dotastro_srcid[source_name].append(results[0][0])
if not dotastro_srcid_to_attribfiles.has_key(results[0][0]):
dotastro_srcid_to_attribfiles[results[0][0]] = []
dotastro_srcid_to_attribfiles[results[0][0]].append(filename)
if not dotastro_srcid_to_debos_srcname.has_key(results[0][0]):
dotastro_srcid_to_debos_srcname[results[0][0]] = []
if not source_name in dotastro_srcid_to_debos_srcname[results[0][0]]:
dotastro_srcid_to_debos_srcname[results[0][0]].append(source_name)
continue
select_str = 'SELECT source_id, source_name, project_id, class_id, pclass_id FROM sources WHERE project_id=123 and source_name = "%s"' % (source_name.replace(' ',''))
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) != 0:
if results[0][0] not in debos_srcname_to_dotastro_srcid[source_name]:
debos_srcname_to_dotastro_srcid[source_name].append(results[0][0])
if not dotastro_srcid_to_attribfiles.has_key(results[0][0]):
dotastro_srcid_to_attribfiles[results[0][0]] = []
dotastro_srcid_to_attribfiles[results[0][0]].append(filename)
if not dotastro_srcid_to_debos_srcname.has_key(results[0][0]):
dotastro_srcid_to_debos_srcname[results[0][0]] = []
if not source_name in dotastro_srcid_to_debos_srcname[results[0][0]]:
dotastro_srcid_to_debos_srcname[results[0][0]].append(source_name)
###20100802 dstarr disables this since sometimes double classifications are given in Debos:
### and the other sci-class is entered as *_a into DotAstro: HIP54413, HIP54413_a
#continue
select_str = 'SELECT source_id, source_name, project_id, class_id, pclass_id FROM sources WHERE project_id=123 and source_name like "%s\_' % (source_name.replace(' ','')) + '%"'
self.tutor_cursor.execute(select_str)
results = self.tutor_cursor.fetchall()
if len(results) != 0:
if results[0][0] not in debos_srcname_to_dotastro_srcid[source_name]:
debos_srcname_to_dotastro_srcid[source_name].append(results[0][0])
if not dotastro_srcid_to_attribfiles.has_key(results[0][0]):
dotastro_srcid_to_attribfiles[results[0][0]] = []
dotastro_srcid_to_attribfiles[results[0][0]].append(filename)
if not dotastro_srcid_to_debos_srcname.has_key(results[0][0]):
dotastro_srcid_to_debos_srcname[results[0][0]] = []
if not source_name in dotastro_srcid_to_debos_srcname[results[0][0]]:
dotastro_srcid_to_debos_srcname[results[0][0]].append(source_name)
continue
#for debos_srcname, dotastro_srcid_list in debos_srcname_to_dotastro_srcid.iteritems():
# if len(dotastro_srcid_list) > 1:
# print "len(dotastro_srcid_list) > 1:", debos_srcname, dotastro_srcid_list
#for dotastro_srcid, debos_srcname in dotastro_srcid_to_debos_srcname.iteritems():
# print dotastro_srcid, debos_srcname
dotastro_srcid_to_debos_attribs = {}
for dotastro_srcid, attribfiles in dotastro_srcid_to_attribfiles.iteritems():
matches_found = 0
for attrib_file in attribfiles:
if deboss_attrib_dict.has_key(attrib_file):
if os.path.exists("/home/pteluser/analysis/debosscher_20100707/TS-HIPPARCOS/" + attrib_file):
matches_found += 1
# I've checked that this only occurs once per dotastro sourceid (no multi LCs in there):
dotastro_srcid_to_debos_attribs[dotastro_srcid] = deboss_attrib_dict[attrib_file]
elif os.path.exists("/home/pteluser/analysis/debosscher_20100707/TS-OGLE/" + attrib_file):
matches_found += 1
# I've checked that this only occurs once per dotastro sourceid (no multi LCs in there):
dotastro_srcid_to_debos_attribs[dotastro_srcid] = deboss_attrib_dict[attrib_file]
##### DEBUG PRINTING:
#if matches_found == 0:
# print ' NO ATTRIB FILE:', dotastro_srcid, attribfiles
#elif matches_found > 1:
# print 'MULTIPLE ATTRIBS:', dotastro_srcid, attribfiles
# for fname in attribfiles:
# pprint.pprint((fname, deboss_attrib_dict[fname]))
return {'dotastro_srcid_to_debos_attribs':dotastro_srcid_to_debos_attribs,
'dotastro_srcid_to_debos_srcname':dotastro_srcid_to_debos_srcname,
'dotastro_srcid_to_attribfiles':dotastro_srcid_to_attribfiles}
class tutor_db:
"""
"""
def __init__(self):
self.pars ={'tcptutor_hostname':'192.168.1.103',
'tcptutor_username':'tutor', # guest
'tcptutor_password':'ilove2mass', #'iamaguest',
'tcptutor_database':'tutor',
'tcptutor_port':3306}
self.tutor_db = MySQLdb.connect(host=self.pars['tcptutor_hostname'], \
user=self.pars['tcptutor_username'], \
passwd=self.pars['tcptutor_password'],\
db=self.pars['tcptutor_database'],\
port=self.pars['tcptutor_port'])
self.tutor_cursor = self.tutor_db.cursor()
if __name__ == '__main__':
#db = tutor_db()
### 20101118: This pars{} is taken from analysis_deboss_tcp_source_compare.py:
pars_tutor = {'num_percent_epoch_error_iterations':2, # !!! NOTE: This must be the same in pairwise_classification.py:pars[]
'subsample_percents_to_generate_xmls':[0.1], # This takes 22 cores ??? considering that tranx will do a second round (count=22 + 8) # # # #TODO: 16perc * 9sets is much more reasonable, memory/resource/ipython-node wise
'tcp_hostname':'192.168.1.25',
'tcp_username':'pteluser',
'tcp_port': 3306,
'tcp_database':'source_test_db',
'tcptutor_hostname':'192.168.1.103',
'tcptutor_username':'tutor',
'tcptutor_password':'ilove2mass',
'tcptutor_port': 3306, # 13306,
'tcptutor_database':'tutor',
'srcid_debos_attribs_pkl_fpath':'/home/pteluser/analysis/debosscher_20100707/srcid_debos_attribs.pkl.gz',
#OBSOLETE#'arff_fpath':os.path.expandvars(os.path.expanduser("~/scratch/dotastro_ge1srcs_period_nonper__exclude_non_debosscher.arff")),
'trainset_pruned_pklgz_fpath':"/home/pteluser/Dropbox/work/WEKAj48_dotastro_ge1srcs_period_nonper__exclude_non_debosscher/pairwise_trainset__debosscher_table3.pkl.gz",
'deboss_src_datafile_lookup_fpath':'/home/pteluser/analysis/debosscher_20100707/list.dat', # essentially the same as debos_newHIPP_data/list.dat(19893.hip -> c-19893.hip)
'deboss_src_attribs_fpath':'/home/pteluser/analysis/debosscher_20100707/defatts.dat',
'deboss_src_attribs_list':['filename','f1','f2','f3','amp11','amp12','amp13','amp14','amp21','amp22','amp23','amp24','amp31','amp32','amp33','amp34','phi12','phi13','phi14','phi21','phi22','phi23','phi24','phi31','phi32','phi33','phi34','trend','varrat','varred','class'],
}
# 20101118: Take from pairwise_classification.py:
pars = {'num_percent_epoch_error_iterations':2, # !!! NOTE: This must be the same in analysis_deboss_tcp_source_compare.py:pars[]
'crossvalid_nfolds':10, # None == use n_folds equal to the minimum number of sources for a science class. If this number is > 10, then n_folds is set to 10
'crossvalid_do_stratified':False, # False: randomly sample sources for each fold, True: exclude a fold group of sources which is not excluded in the other folds.
'crossvalid_fold_percent':40., #NOTE: only valid if do_stratified=False #float x in x/y OR None==just use the percent 1/nfolds
'tcp_hostname':'192.168.1.25',
'tcp_username':'pteluser',
'tcp_port': 3306,
'tcp_database':'source_test_db',
'dotastro_arff_fpath':os.path.expandvars('$HOME/scratch/train_output_20100517_dotastro_xml_with_features__default.arff'),#os.path.expandvars('$HOME/scratch/train_output_20100517_dotastro_xml_with_features.arff'),
'arff_sciclass_dict_pkl_fpath':os.path.expandvars('$HOME/scratch/arff_sciclass_dict.pkl'),
'trainset_pruned_pklgz_fpath':os.path.expandvars('$HOME/scratch/trainset_pruned.pkl.gz'),
'pruned_classif_summary_stats_pkl_fpath': \
os.path.expandvars('$HOME/scratch/pruned_classif_summary_stats.pkl'),
'weka_pairwise_classifiers_pkl_fpath': \
os.path.expandvars('$HOME/scratch/weka_pairwise_classifiers_pkl_fpath.pkl'),
'pairwise_trainingset_dirpath':os.path.expandvars('$HOME/scratch/pairwise_trainingsets'),
'pairwise_classifier_dirpath':os.path.expandvars('$HOME/scratch/pairwise_classifiers'),
'pairwise_classifier_pklgz_dirpath':os.path.expandvars('$HOME/scratch/pairwise_classifiers'),
'pairwise_scratch_dirpath':'/media/raid_0/pairwise_scratch',
'classification_summary_pklgz_fpath':'',#os.path.expandvars('$HOME/scratch/pairwise_classifiers'),
'confusion_stats_html_fpath':os.path.expandvars('$HOME/Dropbox/Public/work/pairwise_confusion_matrix.html'),
'cyto_work_final_fpath':'/home/pteluser/Dropbox/work/',
'cyto_network_fname':'pairwise_class.cyto.network',
'cyto_nodeattrib_fname':'pairwise_class.cyto.nodeattrib',
'pairwise_schema_name':'noprune', # represents how the class heirarchy pruning was done.
't_sleep':0.2,
'number_threads':13, # on transx : 10
'min_num_sources_for_pairwise_class_inclusion':6,
#'feat_dist_image_fpath':"/home/pteluser/Dropbox/Public/work/feat_distribution.png",#OBSOLETE
#'feat_dist_image_url':"http://dl.dropbox.com/u/4221040/work/feat_distribution.png",#OBSOLETE
'feat_dist_image_local_dirpath':'/media/raid_0/pairwise_scratch/pairwise_scp_data',#"/home/pteluser/scratch/pairwise_scp_data",
'feat_dist_image_remote_scp_str':"pteluser@lyra.berkeley.edu:www/dstarr/pairwise_images/",
'feat_dist_image_rooturl':"http://lyra.berkeley.edu/~jbloom/dstarr/pairwise_images",
'feat_distrib_classes':{'target_class':'lboo',#adding anything here is OBSOLETE
'comparison_classes':['pvsg', 'gd', 'ds']},#adding anything here is OBSOLETE
'plot_symb':['o','s','v','d','<'], # ,'+','x','.', ,'>','^'
'feat_distrib_colors':['#000000',
'#ff3366',
'#660000',
'#aa0000',
'#ff0000',
'#ff6600',
'#996600',
'#cc9900',
'#ffff00',
'#ffcc33',
'#ffff99',
'#99ff99',
'#666600',
'#99cc00',
'#00cc00',
'#006600',
'#339966',
'#33ff99',
'#006666',
'#66ffff',
'#0066ff',
'#0000cc',
'#660099',
'#993366',
'#ff99ff',
'#440044'],
#'feat_distrib_colors':['b','g','r','c','m','y','k','0.25','0.5','0.75', (0.5,0,0), (0,0.5,0), (0,0,0.5), (0.75,0,0), (0,0.75,0), (0,0,0.75), (0.25,0,0), (0,0.25,0), (0,0,0.25), '#eeefff', '#bbbfff', '#888fff', '#555fff', '#000fff', '#000aaa', '#fffaaa'],
'taxonomy_prune_defs':{
'terminating_classes':['be', 'bc', 'sreg', 'rr-lyr', 'c', 'bly', 'sne','nov']},
'debosscher_confusion_table3_fpath':os.path.abspath(os.environ.get("TCP_DIR") + '/Data/debosscher_table3.html'),
'debosscher_confusion_table4_fpath':os.path.abspath(os.environ.get("TCP_DIR") + '/Data/debosscher_table4.html'),
'debosscher_class_lookup':{ \
'BCEP':'bc',
'BE':'be', #NO LCs # Pulsating Be-stars (57) : HIP, GENEVA
'CLCEP':'dc',
'CP':'CP',
'CV':'cv', #NO LCs # Cataclysmic variables (3) : ULTRACAM
'DAV':'pwd', #NO LCs # Pulsating DA white dwarfs (2) : WET
'DBV':'pwd', #NO LCs # Pulsating DB white dwarfs (1) : WET / CFHT
'DMCEP':'cm',
'DSCUT':'ds',
'EA':'alg',
'EB':'bly',
'ELL':'ell',
'EW':'wu',
'FUORI':'fuor', #NO LCs # FU-Ori stars (3) : ROTOR
'GDOR':'gd',
'GWVIR':'gw', #NO LCs # GW-Virginis stars (2) : CFHT
'HAEBE':'haebe',
'LBOO':'lboo',
'LBV':'sdorad',
'MIRA':'mira',
'PTCEP':'piic',
'PVSG':'pvsg', # Periodically variable supergiants (76) : HIP, GENEVA, ESO
'ROAP':'rot', #NO LCs # Rapidly oscillationg Ap stars (4) : WET/ESO # 13587 is given class_id='rot' in Dotastro, but the dotastro projectclass is 'Rapidly Osc Ap stars'.
'RRAB':'rr-ab',
'RRC':'rr-c',
'RRD':'rr-d',
'RVTAU':'rv',
'SDBV':'sdbv', #NO LCs # Pulsating subdwarf B stars (16) : ULTRACAM
'SLR':'NOTMATCHED', # NOT in projid=123 # NOT MATCHED Solar-like oscillations in red giants (1) : MOST
'SPB':'spb', # Slowly-pulsating B stars (47) : HIP / GENEVA, MOST
'SR':'sreg',
'SXPHE':'sx', ### NOT in current Debosscher confusion matrix
'TTAU':'tt',
'WR':'wr',
'XB':'xrbin', ### NOT in current Debosscher confusion matrix
},
'deb_src_counts':{
'PVSG': 76 ,
'BE': 57 ,
'BCEP': 58 ,
'CLCEP': 195 ,
'DMCEP': 95 ,
'PTCEP': 24 ,
'CP': 63 ,
'DSCUT': 139 ,
'LBOO': 13 ,
'SXPHE': 7 ,
'GDOR': 35 ,
'LBV': 21 ,
'MIRA': 144 ,
'SR': 42 ,
'RRAB': 129 ,
'RRC': 29 ,
'RRD': 57 ,
'RVTAU': 13 ,
'SPB': 47 ,
'SLR': 1 ,
'SDBV': 16 ,
'DAV': 2 ,
'DBV': 1 ,
'GWVIR': 2 ,
'ROAP': 4 ,
'TTAU': 17 ,
'HAEBE': 21 ,
'FUORI': 3 ,
'WR': 63 ,
'XB': 9 ,
'CV': 3 ,
'EA': 169 ,
'EB': 147 ,
'EW': 59 ,
'ELL': 16},
'debclass_ogle_fpath':'/home/pteluser/analysis/debosscher_20100707/ts-OGLE',
'debclass_hip_fpath':'/home/pteluser/analysis/debosscher_20100707/ts-HIPPARCOS',
}
pars.update(pars_tutor)
DebPaperAnalysis = Deb_Paper_Analysis(pars=pars)
DebPaperAnalysis.main()
#AnalysisDebossTcpSourceCompare = Analysis_Deboss_TCP_Source_Compare(pars=pars)
#srcid_to_debos_attribs = self.get_deboss_dotastro_source_lookup()
|
import subprocess
def brightness(value):
"""
Control screen brightness
@value : int
"""
try:
assert 0 <= int(value) <= 100
except AssertionError:
return 1
subprocess.call(["xbacklight", "-set", value], shell=False)
return 0
def volume(value):
"""
Control sound volume
@value : int
"""
if value == "mute":
subprocess.call(["pactl", "set-sink-mute", "0", "toggle"], shell=False)
return 0
try:
value = int(value)
except ValueError:
return 1
subprocess.call(["pactl", "set-sink-volume", "0", str(value) + "%"], shell=False)
return 0
|
"""
@author: David
inspired by Telmo Menezes's work : telmomenezes.com
"""
import sys
import numpy as np
import network_evaluation as ne
from draw import genetic_algorithm as ga
np.seterr('ignore')
'''
This is the main file of the program :
it stores datas from the real network necessary to the chosen evaluation method
define the genetic algorithm and its grammar
and call it
'''
# @profile
def main():
evaluation_method = "communities_degrees_distances_clustering_importance"
tree_type = "with_constants"
network = sys.argv[1]
extension = ".gexf"
network_type = "undirected_unweighted"
multiprocessing = True
dynamic = False
data_path = 'data/{}/'.format(network)
results_path = 'results/{}/{}.xml'.format(network, evaluation_method)
stats_path = 'results/{}/{}_stats.txt'.format(network, evaluation_method)
dot_path = 'results/{}/{}_trees.jpeg'.format(network, network)
nb_generations = 31
freq_stats = 30
# do not display numpy warnings
# arguments : path to the real network, path to print datas
# possible arguments :
# *evaluation_method : the method used to evaluate the proximity between real network and generated network
# possible values : "(nodes)_(vertices)_(clustering)_(importance)_(communities)_(distances)_(degrees)"
ne.get_datas_from_real_network(data_path,
results_path,
name=network,
evaluation_method=evaluation_method,
network_type=network_type,
dynamic=dynamic,
extension=extension)
# arguments : path to datas about the real network
# optional arguments for genome :
# *max_depth : maximal depth of the decision tree that defines a genome
# possible values : int > 0
# evaluation_method : the method used to evaluate the proximity between real network and generated network
# possible values : "(nodes)_(vertices)_(clustering)_(importance)_(communities)_(distances)_(degrees)"
# tree_type : the type of the trees used to stores genomes : with or without constants in leaves
# possible values : "with_constants" "simple"
# network_type : the type of the networks studied and generated : directed or not
# possible values : "(un)weighted_(un)directed"
# dynamic : if the network is dynamic or not
genome = ga.new_genome(
results_path,
name=network,
data_path=data_path,
evaluation_method=evaluation_method,
dynamic=dynamic,
tree_type=tree_type,
network_type=network_type,
extension=extension
)
# optional arguments for evolve :
# *nb_generations : number of generations of the evolution
# possible values : int > 0 : default : 100
# *freq_stats : number of generations between two prints of statistics
# possible values : int > 0 : default : 5
# *stats_path : path to the file where the stats will be printed
# *multiprocessing : will use or not multiprocessing
# possible values : True False
ga.evolve(genome, stats_path=stats_path, dot_path=dot_path, nb_generations=nb_generations, freq_stats=freq_stats,
multiprocessing=multiprocessing)
if __name__ == "__main__":
main()
|
TABLE_SCHEMA = (
'IDKEY:STRING, '
'FECHA:STRING, '
'CREDITO_NRO:STRING, '
'NIT:STRING, '
'FECHA_GESTION:STRING, '
'GRABADOR:STRING, '
'ESTADO_DE_COBRO:STRING, '
'TELEFONO:STRING, '
'ESTADO_CARTERA:STRING, '
'DIAS_DE_MORA:STRING, '
'NOTA:STRING '
) |
import numpy as np
from LSTM import LSTM
from BiLSTM import BiLSTM
def save_model_parameters_theano(model, folder, status):
outfile = folder+'/'+ model.__class__.__name__+status
np.savez(outfile,
W=model.W.get_value(),
U=model.U.get_value(),
V=model.V.get_value(),
E=model.E.get_value(),
b=model.b.get_value(),
c=model.c.get_value(),
)
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=LSTM):
npzfile = np.load(path)
E = npzfile["E"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model model from %s with hidden_dim=%d word_dim=%d" % (path, hidden_dim, word_dim)
model = modelClass(word_dim, hidden_dim)
model.W.set_value(npzfile["W"])
model.U.set_value(npzfile["U"])
model.V.set_value(npzfile["V"])
model.E.set_value(npzfile["E"])
model.b.set_value(npzfile["b"])
model.c.set_value(npzfile["c"])
return model
|
from time import sleep
from random import random, randint
from selenium.webdriver import Firefox
def init_driver(logger, options):
driver = Firefox()
driver.get("https://visa.vfsglobal.com/blr/en/pol/login")
logger.info("open site")
sleep(randint(5, 8) + random())
driver.find_element_by_css_selector("#onetrust-accept-btn-handler").click()
logger.info("skip cookie message")
user_name = driver.find_element_by_xpath(
"/html/body/app-root/div/app-login/section/div/div/mat-card/form/div[1]/mat-form-field/div/div[1]/div[3]/input"
)
password = driver.find_element_by_xpath(
"/html/body/app-root/div/app-login/section/div/div/mat-card/form/div[2]/mat-form-field/div/div[1]/div[3]/input"
)
submit = driver.find_element_by_xpath(
"/html/body/app-root/div/app-login/section/div/div/mat-card/form/button"
)
sleep(randint(3, 5) + random())
for letter in options['login']:
user_name.send_keys(letter)
sleep(random())
for letter in options['password']:
password.send_keys(letter)
sleep(random())
logger.info("set login and password")
submit.click()
logger.info("click login button")
sleep(randint(3, 5) + random())
driver.find_element_by_xpath(
"/html/body/app-root/div/app-dashboard/section/div/div[2]/button/span"
).click()
logger.info("open modal window for booking")
return driver
def get_centre_category_sub_category(driver):
centre = driver.find_element_by_xpath(
"/html/body/app-root/div/app-eligibility-criteria/section/form/"
"mat-card[1]/form/div[1]/mat-form-field/div/div[1]/div[3]/mat-select"
)
category = driver.find_element_by_xpath(
"/html/body/app-root/div/app-eligibility-criteria/section/form/mat-card[1]"
"/form/div[2]/mat-form-field/div/div[1]/div[3]/mat-select"
)
sub_category = driver.find_element_by_xpath(
"/html/body/app-root/div/app-eligibility-criteria/section/form/mat-card[1]"
"/form/div[3]/mat-form-field/div/div[1]/div[3]/mat-select"
)
return centre, category, sub_category
def add_applicate(driver, data):
sleep(3 + random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/"
"mat-card[1]/form/app-dynamic-form/div/div/app-dynamic-control[2]"
"/div/div/div/app-input-control/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['name']:
tmp.send_keys(letter)
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]"
"/form/app-dynamic-form/div/div/app-dynamic-control[3]/div/div/div"
"/app-input-control/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['last_name']:
tmp.send_keys(letter)
sleep(random())
driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[4]/div/div[1]/div/"
"app-dropdown/div/mat-form-field/div/div[1]/div[3]/mat-select"
).send_keys(data['gender'])
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[4]/div/div[2]/div/app-ngb-datepicker/div/div[2]/input"
)
for letter in data['date_born']:
tmp.send_keys(letter)
sleep(random())
driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[5]/div/div/div/app-dropdown"
"/div/mat-form-field/div/div[1]/div[3]/mat-select"
).send_keys(data['nationality'])
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[6]/div/div[1]/div/"
"app-input-control/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['passport_number']:
tmp.send_keys(letter)
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[6]/div/div[2]/div/app-ngb-datepicker/div/div[2]/input"
)
for letter in data['expare_date']:
tmp.send_keys(letter)
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]"
"/form/app-dynamic-form/div/div/app-dynamic-control[9]/div/div/div[2]"
"/div[1]/app-input-control/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['code_phone']:
tmp.send_keys(letter)
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/"
"form/app-dynamic-form/div/div/app-dynamic-control[9]/div/div/div[2]"
"/div[2]/app-input-control/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['phone_number']:
tmp.send_keys(letter)
sleep(random())
tmp = driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[1]/form"
"/app-dynamic-form/div/div/app-dynamic-control[10]/div/div/div/app-input-control"
"/div/mat-form-field/div/div[1]/div[3]/input"
)
for letter in data['email']:
tmp.send_keys(letter)
sleep(random())
driver.find_element_by_xpath(
"/html/body/app-root/div/app-applicant-details/section/mat-card[2]/app-dynamic-form"
"/div/div/app-dynamic-control/div/div/div[2]/button"
).click() |
from skimage import io
from skimage.transform import downscale_local_mean
from skimage.filters import threshold_sauvola as threshold
from skimage.segmentation import clear_border, random_walker
from skimage.measure import label, regionprops
from skimage.morphology import binary_opening, square, remove_small_objects
from skimage.color import gray2rgb
from skimage.draw import circle
import config
from mpyx.F import EZ, As, By, F, Seq, Data as Datagram
# from mpyx.F import Serial, Parallel, Broadcast, S, P, B
# from mpyx.F import Iter, Const, Print, Stamp, Map, Filter, Batch, Seq, Zip, Read, Write
# from mpyx.Vid import BG
from mpyx.Vid import FFmpeg
# from mpyx.Compress import VideoFile, VideoStream
from lib.Database import Database, DBWriter
from lib.Crop import Crop
from dateutil.parser import parse as dateparse
from base64 import b64encode
from itertools import repeat
import warnings
from PIL import Image
from io import BytesIO
from uuid import uuid4
# import matplotlib.pyplot as plt
import numpy as np
import traceback
import multiprocessing
import subprocess
import threading
import concurrent.futures
import shutil
import shlex
import pexpect
import queue
import asyncio
import fcntl
import tempfile
import time
import os
import sys
import cv2
# import matplotlib.pyplot as plt
async def main(args):
if len(args) < 2:
print("""path/to/video/file.avi 2017-10-31 Name-of_video "Notes. Notes." """)
else:
print(await detect_video(*args))
async def detect_video(video_file, date, name="", notes=""):
cpus = multiprocessing.cpu_count()
experiment_uuid = uuid4()
experiment_day = dateparse(date)
experiment_dir = os.path.join(config.experiment_dir, str(experiment_uuid))
experiment = (experiment_uuid, experiment_day, name, "detection", notes)
method = "detection"
try:
print("Creating data directory", experiment_dir)
os.mkdir(experiment_dir)
w, h = 2336, 1729
# Reads the source video, outputs frames
# -ss 00:00:02.00 -t 00:00:00.50
print("Launching Video Reader")
video_reader = FFmpeg(
video_file,
"",
(h, w, 1),
" -vf scale={}:{}".format(w, h),
[],
False,
FrameData,
)
print("Launching Database processor")
csv_proc = CSVWriter(experiment_uuid, experiment_day, name, method, notes)
db_proc = DBProcessor(experiment_uuid, experiment_day, name, method, notes)
# Computes a background for a frame, outputs {"frame": frame, "bg": bg}
print("Launching Background Modeler")
bg_proc = BG(model="simpleMax", window_size=20, img_shape=(h, w, 1))
# Utilities for viewing various stages of processing.
raw_player = RawPlayer()
bg_player = BGPlayer()
fg_player = FGPlayer()
mask_player = MaskPlayer()
crop_player = CropPlayer()
meta_player = MetaPlayer()
# A utility to clean up datagram resources
cleaner = Cleaner()
# Number of processes to spawn for particular processors
n_prop_procs = 10
n_crop_procs = 4
n_binary = 5
n_crop_writer = 10
n_csv_procs = 5
# Main pipeline
EZ(
video_reader,
Counter(),
Entry(experiment_uuid),
MagicPixel(),
Rescaler(scale=1 / 1),
bg_proc,
FG(),
Seq(
As(n_binary, Binary, "legacyLabeled"),
As(n_prop_procs, Properties),
As(n_crop_procs, Crop_Processor),
As(n_crop_writer, CropWriter, experiment_dir),
As(
n_csv_procs,
CSVWriter,
experiment_uuid,
experiment_day,
name,
method,
notes,
),
),
db_proc,
Passthrough(),
PerformanceMonitor(
{
"Properties": n_prop_procs,
"Crop_Processor": n_crop_procs,
"Binary": n_binary,
"CropWriter": n_crop_writer,
"CSVWriter": n_csv_procs,
}
),
# raw_player,
# bg_player,
# fg_player,
# mask_player,
cleaner,
# qsize=10,
).start().join()
except Exception as e:
print("Uh oh. Something went wrong")
traceback.print_exc()
if os.path.exists(experiment_dir):
print("Removing files from", experiment_dir)
shutil.rmtree(experiment_dir)
else:
pass
finally:
print("Fin.")
return experiment_uuid
class Cleaner(F):
def do(self, frame):
frame.clean()
class Filter(F):
def initialize(self, properties):
self.properties = properties
def do(self, frame):
for p in self.properties:
frame.erase(p)
self.put(frame)
class RawPlayer(F):
def do(self, frame):
cv2.imshow("Raw Display", frame.load("raw"))
self.put(frame)
cv2.waitKey(1000 // 24)
class FGPlayer(F):
def do(self, frame):
cv2.imshow("FG Display", frame.load("fg"))
self.put(frame)
cv2.waitKey(1000 // 24)
class MaskPlayer(F):
def do(self, frame):
cv2.imshow("Mask Display", 1.0 * frame.load("mask"))
self.put(frame)
cv2.waitKey(1000 // 24)
class BGPlayer(F):
def do(self, frame):
cv2.imshow("BG Display", frame.load("bg"))
self.put(frame)
cv2.waitKey(1000 // 24)
class CropPlayer(F):
def do(self, frame):
crops = frame.load("crops")
crop_h = crops.shape[1]
crop_w = crops.shape[2]
crops_n = crops.shape[0]
disp_w_n = 30
disp_h_n = int(np.ceil(crops_n / disp_w_n))
disp_w = int(disp_w_n * crop_w)
disp_h = int(disp_h_n * crop_h)
disp = np.zeros((disp_h, disp_w))
# print("------------------")
# print("crops:", crops.shape)
# print("crop_h", crop_h)
# print("crop_w", crop_w)
# print("crops_n", crops_n)
# print("disp_w", disp_w)
# print("disp_h", disp_h)
for i in range(disp_h_n):
for j in range(disp_w_n):
n = i * disp_h_n + j
if n == crops_n:
break
disp[
i * crop_h : i * crop_h + crop_h, j * crop_w : j * crop_w + crop_w
] = crops[n].squeeze()
cv2.imshow("Crop Display", disp)
self.put(frame)
cv2.waitKey(1000 // 24)
class MockPerformanceInfo(F):
def setup(self):
self.count = 0
def do(self, frame):
m, c, n = self.get_stats(self.count / 10.0)
self.meta["timings"] = {"Memory": m, "CPU": c, "Bandwidth": n}
self.count += 1
self.put(frame)
def get_memory(self, t):
"Simulate a function that returns system memory"
return 100 * (0.5 + 0.5 * np.sin(0.5 * np.pi * t))
def get_cpu(self, t):
"Simulate a function that returns cpu usage"
return 100 * (0.5 + 0.5 * np.sin(0.2 * np.pi * (t - 0.25)))
def get_net(self, t):
"Simulate a function that returns network bandwidth"
return 100 * (0.5 + 0.5 * np.sin(0.7 * np.pi * (t - 0.1)))
def get_stats(self, t):
return self.get_memory(t), self.get_cpu(t), self.get_net(t)
class Passthrough(F):
def setup(self):
self.start = time.time()
def do(self, frame):
self.meta["timings"]["Time per item"] = time.time() - self.start
self.start = time.time()
self.put(frame)
import matplotlib.pyplot as plt
import numpy as np
import re
class PerformanceMonitor(F):
def setup(self, parallelisms={}, visualize=True, log=False, verbose=True):
self.visualize = visualize
self.log = log
self.fig = None
self.ax = None
self._visBars = None
self.parallelisms = parallelisms
self.verbose = verbose
self.total_times = {}
def do(self, frame):
# print("Meta", self.meta["timings"])
self.timings = []
self.labels = []
for k, v in self.meta["timings"].items():
# Processes are labelled eg. "MyProc-6"
label = re.split("-\d+", k)[0]
self.labels.append(label)
if not label in self.total_times:
self.total_times[label] = 0.0
self.total_times[label] += v
if label in self.parallelisms:
timing = v / self.parallelisms[label]
else:
timing = v
self.timings.append(timing)
if self.verbose:
print(
"Completed frame",
frame.number,
"Num particles",
len(frame.load("regionprops")),
)
if self.visualize:
self._visualize()
self.put(frame)
def teardown(self):
print("PerformanceMonitor Total Times")
for k, v in self.total_times.items():
print(" `->", k, v, "s")
def _visualize(self):
if self.fig is None:
self.fig, self.ax = plt.subplots()
plt.show(block=False)
ind = np.arange(1, len(self.labels) + 1)
bars = plt.bar(ind, self.timings)
self._visBars = {}
for i in range(len(self.labels)):
self._visBars[self.labels[i]] = bars[i]
self.ax.set_title("Node Processing Time")
self.ax.set_ylabel("Seconds")
self.ax.set_xticks(ind)
self.ax.set_xticklabels(self.labels)
else:
for i in range(len(self.labels)):
bar = self._visBars[self.labels[i]]
bar.set_height(self.timings[i])
# ask the canvas to re-draw itself the next time it
# has a chance.
# For most of the GUI backends this adds an event to the queue
# of the GUI frameworks event loop.
self.fig.canvas.draw_idle()
self.ax.set_xticklabels(self.labels)
try:
# make sure that the GUI framework has a chance to run its event loop
# and clear any GUI events. This needs to be in a try/except block
# because the default implementation of this method is to raise
# NotImplementedError
self.fig.canvas.flush_events()
except NotImplementedError:
pass
def _log(self):
pass
class MetaPlayer(F):
"""
Print data related to the frame at current node.
"""
def do(self, frame):
print(
"Experiment:",
frame.experiment_uuid,
", Segment:",
frame.segment_uuid,
" Number:",
frame.number,
" Segment Number:",
frame.segment_number,
)
self.put(frame)
class Counter(F):
"""
Count the number of items that passed through this node.
"""
def setup(self, name="Dracula"):
self.name = name
self.count = 0
def do(self, frame):
self.count += 1
self.put(frame)
def teardown(self):
print("Counter {} counted {}".format(self.name, self.count))
class Entry(F):
"""
The first node after frame emitter.
We use this node to stamp the frames with some meta data
and convert the uint8 [0,255] frame to float64 [0,1]
"""
def initialize(self, experiment_uuid):
self.experiment_uuid = experiment_uuid
self.count = 0
def do(self, frame):
frame.experiment_uuid = self.experiment_uuid
frame.number = self.count
frame.uuid = uuid4()
self.count += 1
frame.save("raw", frame.load("raw") / 255)
# print("Frame", self.count, "entering...")
self.put(frame)
class MagicPixel(F):
"""
Detect segment boundaries using the Megaspeed camera
pixel encoded information. The first row contains no image information, but
does contain metadata.
We use this meta data to detect when a "burst" starts/stops.
"""
def setup(self):
self.segment_number = 0
self.magic_pixel = None
self.segment_uuid = None
def do(self, frame):
magic_pixel_delta = 0.1
raw = frame.load("raw")
if config.use_magic_pixel_segmentation:
this_frame_magic_pixel = raw[0, 4, 0]
if (
self.magic_pixel is None
or abs(this_frame_magic_pixel - self.magic_pixel) > magic_pixel_delta
):
# print("Segment Boundry Detected")
self.segment_uuid = uuid4()
self.segment_number += 1
self.magic_pixel = this_frame_magic_pixel
frame.segment_uuid = self.segment_uuid
frame.segment_number = self.segment_number
self.put(frame)
from skimage.transform import rescale
class Rescaler(F):
"""
Rescale the frame by the desired scale factor.
Currently rescales the "raw" frame.
"""
def setup(self, scale):
self.scale = scale
def do(self, frame):
# the following error from skimage rescale on the frame.load return
# - ValueError: buffer source array is read-only
# frame.save("raw", rescale(frame.load("raw"), self.scale))
if self.scale != 1:
raw = np.copy(frame.load("raw"))
rescaled = rescale(raw, self.scale)
frame.save("raw", rescaled)
self.put(frame)
import math
from collections import deque
class BG(F):
"""
Compute a background.
"""
def setup(self, model="median", window_size=20, *args, env=None, **kwArgs):
self.frame_que = deque()
self.window_size = window_size
# self.q_len = math.ceil(window_size / 2)
# self.q_count = 0
self.model = getattr(self, model)(window_size=window_size, *args, **kwArgs)
def do(self, frame):
# import cv2
# from uuid import uuid4
self.frame_que.append(frame)
# self.q_count += 1
self.bg = self.model.process(frame.load("raw"))
# cv2.imwrite('/home/mot/tmp/bg_'+str(uuid4())+'.png', self.bg)
if len(self.frame_que) > self.window_size:
# bg = self.que.popleft()
frame = self.frame_que.popleft()
frame.save("bg", self.bg)
self.put(frame)
def teardown(self):
while len(self.frame_que) > 0:
# self.q_count -= 1
frame = self.frame_que.popleft()
frame.save("bg", self.bg)
self.put(frame)
class simpleMax:
"""
Computes the max over the first window_size frames.
Good for approximating the lighting in a backlit scene.
"""
def __init__(self, window_size=20, img_shape=None):
# print("simpleMax maxlen: "+str(math.ceil(window_size / 2)-5))
self.window_size = window_size
self.que = deque()
self.bg = None
def process(self, frame):
# like erosion, but faster
from skimage.morphology import erosion
# parameter: minimum lighting (dynamic range), threshold below
min_range = 20 / 255.0
if len(self.que) < self.window_size:
self.que.append(frame)
elif len(self.que) == self.window_size:
# print("computing bg...")
if self.bg is None:
bg = np.max(self.que, axis=0)
bg[bg < min_range] = 0
bg = erosion(bg.squeeze(), square(8))
bg = np.expand_dims(bg, axis=-1)
self.bg = bg
return self.bg
class FG(F):
"""
Process the image to yield the scene foreground.
"""
def setup(self, model="division", *args, **kwargs):
# If your process needs to do any kind of setup once it has been forked,
# or if it the first process in a workflow and expected to generate
# values for the rest of the pipeline, that code should go here.
self.model = getattr(self, model)()
def do(self, frame):
# The main workhorse of a process. Items will flow in here, potentially
# be modified, mapped, reduced, or otherwise morgified, and output can
# be then pushed downstream using the self.put() method.
# Here, for example, any items are simply passed along.
frame.save("fg", self.model.process(frame))
self.put(frame)
class division:
"""
In a backlit scene that roughly obeys Beer-Lambert laws, dividing the
raw image by the background (backlit scene lighting) yields the
transmittance, which for our application is useful.
"""
def __init__(self):
pass
def process(self, frame):
eps = 0.0001
raw = frame.load("raw")
bg = frame.load("bg")
div = frame.load("raw") / (frame.load("bg") + eps)
# div[np.isnan(div)] = 1.0 # get rid of nan's from 0/0
return np.clip(div, 0, 1)
from skimage.filters import threshold_sauvola
from skimage.morphology import binary_opening, remove_small_objects, square, erosion
from skimage.segmentation import clear_border
class Binary(F):
"""
Performs thresholding on the foreground image.
"""
def setup(self, model="simple", *args, **kwargs):
# If your process needs to do any kind of setup once it has been forked,
# or if it the first process in a workflow and expected to generate
# values for the rest of the pipeline, that code should go here.
self.model = getattr(self, model)(*args, **kwargs)
def do(self, frame):
# The main workhorse of a process. Items will flow in here, potentially
# be modified, mapped, reduced, or otherwise morgified, and output can
# be then pushed downstream using the self.put() method.
# Here, for example, any items are simply passed along.
frame.save("mask", self.model.process(frame))
self.put(frame)
class legacyLabeled:
def __init__(self, threshold=0.5):
self.threshold = threshold
def process(self, frame):
# Take the center, removing edge artifacts
# frame = frame[200:-200, 200:-200]
sframe = frame.load("fg").squeeze()
binary = sframe < self.threshold
binary = binary_opening(binary, square(3))
binary = clear_border(binary)
# opened = binary_opening(binary, square(3))
# cleared = clear_border(opened)
return binary
from skimage.measure import label, regionprops
class Properties(F):
def do(self, frame):
labelled = label(frame.load("mask"))
properties = regionprops(labelled, frame.load("fg").squeeze())
frame.save("regionprops", properties)
frame.save("track_uuids", [uuid4() for i in range(len(properties))])
self.put(frame)
from lib.Crop import Crop
class Crop_Processor(F):
def do(self, frame):
regionprops = frame.load("regionprops")
cropper = Crop(frame.load("fg"))
coords = [(p.centroid[1], p.centroid[0]) for p in regionprops]
bboxes = [((p.bbox[1], p.bbox[0]), (p.bbox[3], p.bbox[2])) for p in regionprops]
crops = np.array(
[cropper.crop(int(round(c[0])), int(round(c[1]))) for c in coords]
)
frame.save("crops", crops)
self.put(frame)
class CropWriter(F):
def initialize(self, experiment_dir):
self.experiment_dir = experiment_dir
def do(self, frame):
crops = frame.load("crops")
track_uuids = frame.load("track_uuids")
frame_uuid = str(frame.uuid)
frame_dir = os.path.join(self.experiment_dir, frame_uuid)
os.makedirs(frame_dir, exist_ok=True)
for i, crop in enumerate(crops):
with warnings.catch_warnings(): # suppress warnings about low contrast images
warnings.simplefilter("ignore")
# print(crop.shape, np.min(crop), np.mean(crop), np.max(crop), crop.dtype)
crop2 = (crop.squeeze() * 255).astype(np.uint8)
# print(
# crop2.shape,
# np.min(crop2),
# np.mean(crop2),
# np.max(crop2),
# crop2.dtype,
# )
io.imsave(
os.path.join(frame_dir, str(track_uuids[i]) + ".jpg"),
(crop.squeeze() * 255).astype(np.uint8),
quality=90,
)
self.put(frame)
class FrameData(Datagram):
def initialize(self):
self.experiment_uuid = None
self.segment_uuid = None
self.segment_number = None
self.uuid = None
self.number = None
class CSVWriter(F):
def initialize(
self, experiment_uuid, experiment_day, name, method, notes, verbose=True
):
self.verbose = verbose
if self.verbose:
print("Launching CSV processor")
self.experiment_uuid = experiment_uuid
self.experiment_day = experiment_day
self.exp_name = name
self.method = method
self.notes = notes
self.csv_files = [
"/tmp/{}_segment.csv",
"/tmp/{}_frame.csv",
"/tmp/{}_track.csv",
"/tmp/{}_particle.csv",
]
def do(self, frame):
# Add frame
self.add_frame(frame)
# Add detections
self.add_detections(frame)
self.put(frame)
def add_frame(self, frame):
if config.use_magic_pixel_segmentation:
data = (frame.uuid, frame.experiment_uuid, frame.segment_uuid, frame.number)
s = "{}\t{}\t{}\t{}\n"
with open("/tmp/{}_frame.csv".format(self.experiment_uuid), "a") as f:
f.write(s.format(data[0], data[1], data[2], data[3]))
else:
data = (frame.uuid, frame.experiment_uuid, frame.number)
s = "{}\t{}\t{}\n"
with open("/tmp/{}_frame.csv".format(self.experiment_uuid), "a") as f:
f.write(s.format(data[0], data[1], data[2]))
def add_detections(self, frame):
regionprops = frame.load("regionprops")
DEFAULT_CATEGORY = 1 # set to unknown for now
particles = [
(
uuid4(),
self.experiment_uuid,
p.area,
p.mean_intensity,
p.perimeter,
p.major_axis_length,
p.minor_axis_length,
p.orientation,
p.solidity,
p.eccentricity,
DEFAULT_CATEGORY,
)
for i, p in enumerate(regionprops)
]
coords = [(p.centroid[1], p.centroid[0]) for p in regionprops]
bboxes = [((p.bbox[1], p.bbox[0]), (p.bbox[3], p.bbox[2])) for p in regionprops]
track_uuids = frame.load("track_uuids")
tracks = [
(track_uuids[i], frame.uuid, particles[i][0], coords[i], bboxes[i])
for i, p in enumerate(regionprops)
]
s = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n"
with open("/tmp/{}_particle.csv".format(self.experiment_uuid), "a") as f:
for p in particles:
f.write(
s.format(
p[0],
p[1],
p[2],
p[3],
p[4],
p[5],
p[6],
p[7],
p[8],
p[9],
p[10],
)
)
s = "{}\t{}\t{}\t{}\t{}\n"
with open("/tmp/{}_track.csv".format(self.experiment_uuid), "a") as f:
for t in tracks:
f.write(s.format(t[0], t[1], t[2], t[3], t[4]))
class DBProcessor(F):
def initialize(
self, experiment_uuid, experiment_day, name, method, notes, verbose=True
):
self.verbose = verbose
if self.verbose:
print("Launching DB processor")
self.experiment_uuid = experiment_uuid
self.experiment_day = experiment_day
self.exp_name = name
self.method = method
self.notes = notes
self.csv_files = [
"/tmp/{}_segment.csv",
"/tmp/{}_frame.csv",
"/tmp/{}_track.csv",
"/tmp/{}_particle.csv",
]
self.prev_segment_uuid = None
def do(self, frame):
# Add segment
if config.use_magic_pixel_segmentation:
if frame.segment_uuid != self.prev_segment_uuid:
self.prev_segment_uuid = frame.segment_uuid
self.add_segment(frame)
self.put(frame)
def add_segment(self, frame):
data = (frame.segment_uuid, frame.experiment_uuid, frame.segment_number)
s = "{}\t{}\t{}\n"
with open("/tmp/{}_segment.csv".format(self.experiment_uuid), "a") as f:
f.write(s.format(data[0], data[1], data[2]))
async def copy_to_database(self):
if self.verbose:
print("Copying to database.")
await self.tx.execute(
"""
INSERT INTO Experiment (experiment, day, name, method, notes)
VALUES ($1, $2, $3, $4, $5)
""",
self.experiment_uuid,
self.experiment_day,
self.exp_name,
"DetectionMpyxDatagram",
self.notes,
)
if config.use_magic_pixel_segmentation:
if self.verbose:
print("Inserting segments into database.")
await self.tx.execute(
"""
COPY segment FROM '/tmp/{}_segment.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if config.use_magic_pixel_segmentation:
if self.verbose:
print("Inserting frames into database.")
await self.tx.execute(
"""
COPY frame FROM '/tmp/{}_frame.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
else:
if self.verbose:
print("Inserting frames into database.")
await self.tx.execute(
"""
COPY frame (frame, experiment, number) FROM '/tmp/{}_frame.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if self.verbose:
print("Inserting particles into database.")
await self.tx.execute(
"""
COPY particle (particle, experiment, area, intensity, perimeter, major, minor, orientation, solidity, eccentricity, category\n)FROM '/tmp/{}_particle.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if self.verbose:
print("Inserting tracks into database.")
await self.tx.execute(
"""
COPY track (track, frame, particle, location, bbox\n) FROM '/tmp/{}_track.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
def teardown(self):
self.tx, self.transaction = self.async(Database().transaction())
try:
self.async(self.copy_to_database())
except Exception as e:
print("rolling back database")
self.async(self.transaction.rollback())
else:
self.async(self.transaction.commit())
def cleanupFS(self):
for f in self.csv_files:
if os.path.isfile(f.format(self.experiment_uuid)):
os.remove(f.format(self.experiment_uuid))
|
# coding:utf8
#用sklearn吧,贝叶斯假设了变量之间相互独立
|
turno=input("Digite abreviado qual turno você estuda: ")
if (turno == "M"):
print("Bom Dia")
if(turno == "N"):
print("Boa Noite")
if (turno == "V"):
print("Boa Tarde")
if(turno != "V")and(turno != "N")and (turno != "M"):
print("Valor Invalido")
|
from yabadaba import databasemanager
from .IprPyDatabase import IprPyDatabase
# Extend the yabadaba MongoDatabase to include IprPyDatabase operations
class MongoDatabase(databasemanager.get_class('mongo'), IprPyDatabase):
def check_records(self, record_style=None):
"""
Counts the number of records of a given style in the database. If the
records are calculation records, then it will also list the number of
calculations with each status value.
Parameters
----------
record_style : str, optional
The record style to check on. If not given, then the available record
styles will be listed and the user prompted to pick one.
"""
if record_style is None:
record_style = self.select_record_style()
if record_style is not None:
# Display information about database records
count = self.count_records(style=record_style)
print(f'In {self}:')
print(f'- {count} of style {record_style}', flush=True)
# Count status values of calculations
if record_style[:12] == 'calculation_':
count = self.count_records(style=record_style, status='finished')
print(f" - {count} finished")
count = self.count_records(style=record_style, status='not calculated')
print(f" - {count} not finished")
count = self.count_records(style=record_style, status='error')
print(f" - {count} issued errors") |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 00:29:39 2019
@author: acrobat
"""
import os
import sys
import argparse
from datetime import datetime
import matplotlib.pyplot as plt
from torchvision import utils
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
#from dataset import *
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from model import Resnet,BasicBlock
#关于梯度更新 https://blog.csdn.net/byron123456sfsfsfa/article/details/90609758
def train(epoch):
net.train()
#net 哪里有train这个方法的
for batch_index, (images, lables) in enumerate(cifar10_training_loader):
images = Variable(images)
lables = Variable(lables)
lables = lables.cuda() #意思就是把变量放在cuda上面
images = images.cuda()
optimizer.zero_grad()#把梯度置0,方便后续计算梯度
outputs = net(images)
loss = loss_function(outputs, lables)
loss.backward() #这一步是求导
optimizer.step() #这一步是更新参数
n_iter = (epoch -1)*len(cifar10_training_loader) +batch_index +1 #总共的迭代次数
print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR:{:0.6f}'.format(
loss.item(),
optimizer.param_groups[0]['lr'],
epoch=epoch,
trained_samples=batch_index * args.b + len(images),
total_samples=len(cifar10_training_loader.dataset)))
writer.add_scalar('Train/loss',loss.item(),n_iter)
def eval_training(epoch):
net.eval()
#这个的意思是实现net.eval这个方法么,不是 好像是调用测试模型,这样数据输入网络时候可能bn之类的和训练不同
test_loss = 0.0
correct = 0.0
for(images,lables) in cifar10_test_loader:
images = Variable(images)
lables = Variable(lables)
images = images.cuda()
lables = lables.cuda()
outputs = net(images)
loss = loss_function(outputs,lables)
test_loss += loss.item() #我们在提取 loss 的纯数值的时候,常常会用到 loss.item(),其返回值是一个 Python 数值 (python number)
_, preds = outputs.max(1)#最大那个是预测结果,看预测结果准不准, 但是这个_,pred 是个什么意思
correct += preds.eq(lables).sum()
print('Test set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(
test_loss / len(cifar10_test_loader.dataset),
correct.float() / len(cifar10_test_loader.dataset)
))
print()
#add information to tensorboard
#先输入进去标题‘test/acc’ 然后是y值,然后是x坐标
writer.add_scalar('Test/Average loss', test_loss / len(cifar10_test_loader.dataset), epoch)
writer.add_scalar('Test/Accuracy', correct.float() / len(cifar10_test_loader.dataset), epoch)
return correct.float() / len(cifar10_test_loader.dataset)
def get_training_dataloader(batch_size=16, shuffle=True):
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
])
cifar10_training = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)
# 有关dataloader的介绍 https://www.cnblogs.com/ranjiewen/p/10128046.html
cifar10_training_loader = DataLoader(
cifar10_training, shuffle=shuffle, batch_size=batch_size)
return cifar10_training_loader
def get_test_dataloader(batch_size=16,shuffle=True):
transform_test = transforms.Compose([
transforms.ToTensor(),
])
cifar10_test = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform_test)
cifar10_test_loader = DataLoader(
cifar10_test, shuffle=shuffle, batch_size=batch_size)
return cifar10_test_loader
def visTensor(tensor,epoch,name, ch=0, allkernels=False, nrow=8, padding=1):
n,c,w,h = tensor.shape #获取tensor的各个维度信息,64 3 7 7
if allkernels: tensor = tensor.view(n*c, -1, w, h)
elif c != 3: tensor = tensor[:,ch,:,:].unsqueeze(dim=1) #如果这个滤镜不是3维的,那么压缩到一维
rows = np.min((tensor.shape[0] // nrow + 1, 64))
grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding)
plt.figure( figsize=(nrow,rows) )
plt.imshow(grid.numpy().transpose((1, 2, 0)), cmap = plt.get_cmap('gray_r'))
plt.savefig(os.path.join('./visTensor3','epoch{}-{}.jpg'.format(epoch,name)))
#plt.imsave(os.path.join('./visTensor','epoch{}-{}.jpg'.format(epoch,name)),grid.numpy().transpose((1, 2, 0))) #重置了figure的分辨率,所以显示出来大,如果直接保存,分辨率太低
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-batchsize',type=int, default=128, help='batch size for dataloader')
parser.add_argument('-lr', type=float, default=0.001, help='initial learning rate')
parser.add_argument('-s', type=bool, default=True, help='whether shuffle the dataset')
parser.add_argument('-a',type = bool, default=False, help='test the filter, reconstructed')
parser.add_argument('-b',type = bool, default=False, help='test the acc')
parser.add_argument('-c',type = bool, default=True, help='train')
args = parser.parse_args()
net = Resnet(BasicBlock)
net = net.cuda() #参数和模型应该都放在cuda上
cifar10_training_loader = get_training_dataloader(
batch_size=args.batchsize,
shuffle=args.s)
cifar10_test_loader = get_test_dataloader(
batch_size=args.batchsize,
shuffle=args.s)
cifar10_image_loader = get_test_dataloader(
batch_size =1,
shuffle=args.s)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=args.lr)
#TIME_NOW = datetime.now().isoformat() #这个文件命名有点问题,Windows下文件夹名称不允许有:
TIME_NOW = '20191025'
checkpoint_path = os.path.join('checkpoint',TIME_NOW)
#为checkpoint创建存储空间
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path,'{epoch}-{type}.pth')
#这里的epoch和type是哪里的 而且那个。pyh是文件类型么
#tensorboard的尝试 我要用的不是graph 这个还需要改 https://www.aiuai.cn/aifarm646.html
#if not os.path.exists('tensorboard_runs'):
# os.mkdir('tensorboard_runs')
writer = SummaryWriter()
# input_tensor = torch.Tensor(12,3,32,32).cuda()#这是啥!!!大概是输入网络的参数?
# writer.add_graph(net, Variable(input_tensor, requires_gard=True))
#训练完成后重构效果
a = args.a
if a:
net.load_state_dict(torch.load(r'C:\Users\acrobat\.spyder-py3\checkpoint\20191021\123-best.pth'),True)
#将kernels可视化
epoch = 123
kernels = net.conv1[0].weight.cpu().data.clone()
visTensor(kernels,epoch,'conv1-0filter', ch=0, allkernels=False)
for n_iter, (image, label) in enumerate(cifar10_image_loader):
if n_iter > 0:
break
#将测试图片可视化
visTensor(image,epoch,'testimage', ch=0, allkernels=False)
#重构conv164
for i in range(0,64):
#一层一层得到重建图conv1
kernels = net.get_reftconv1(image,i).data.clone()
visTensor(kernels,i,'conv1-reconstructed', ch=0, allkernels=False)
#重构conv5512
for i in range(0,512):
#一层一层得到重建图conv1
kernels = net.get_reftconv5(image,i).data.clone()
visTensor(kernels,i,'conv5-reconstructed', ch=0, allkernels=False)
#将图片输入conv1里,得到64个feature maps
kernels = net.get_featureconv1(image).cpu().data.clone()
visTensor(kernels,epoch,'conv1-0feature', ch=0, allkernels=True)
plt.axis('off')
plt.ioff()
plt.show()
#尝试从checkpoint读取数据并继续训练
b = args.b
if b:
net.load_state_dict(torch.load(r'C:\Users\acrobat\.spyder-py3\checkpoint\20191021\123-best.pth'),True)
#加r的意思是只读路径,没有什么转义字符;net把最好的网络读进来之后,可以集训训练或者测试结果
net.eval()#开启评测模式
correct_1 = 0.0
correct_5 = 0.0
total = 0
for n_iter, (image, label) in enumerate(cifar10_test_loader):
print("iteration: {}\ttotal {} iterations".format(n_iter + 1, len(cifar10_test_loader)))
image = Variable(image).cuda()
label = Variable(label).cuda()
output = net(image)
_, pred = output.topk(5, 1, largest=True, sorted=True)
label = label.view(label.size(0), -1).expand_as(pred)
correct = pred.eq(label).float()
#compute top 5
correct_5 += correct[:, :5].sum()
#compute top1
correct_1 += correct[:, :1].sum()
print()
print("Top 1 err: ", 1 - correct_1 / len(cifar10_test_loader.dataset))
print("Top 5 err: ", 1 - correct_5 / len(cifar10_test_loader.dataset))
print("Parameter numbers: {}".format(sum(p.numel() for p in net.parameters())))
c = args.c
if c:
best_acc = 0.0 #到了一定次数之后记录最好结果而不是最后结果
for epoch in range(1,200):
train(epoch)
acc = eval_training(epoch)
if epoch > 60 and best_acc < acc:
torch.save(net.state_dict(), checkpoint_path.format(epoch = epoch, type = 'best'))
best_acc = acc
continue
if not epoch % 20:
torch.save(net.state_dict(),checkpoint_path.format(epoch=epoch, type='regular'))
# #将kernels可视化
# kernels = net.conv1[0].weight.cpu().data.clone()
# visTensor(kernels,epoch,'conv1-0filter', ch=0, allkernels=False)
#
# for n_iter, (image, label) in enumerate(cifar10_image_loader):
# if n_iter > 1:
# break
# #将测试图片可视化
# visTensor(image,epoch,'testimage', ch=0, allkernels=False)
# #重构conv164
# for i in range(0,64):
# #一层一层得到重建图conv1
# kernels = net.get_reftconv1(image,i).data.clone()
# visTensor(kernels,i,'conv1-reconstructed', ch=0, allkernels=False)
# #重构conv5512
# for i in range(0,512):
# #一层一层得到重建图conv1
# kernels = net.get_reftconv5(image,i).data.clone()
# visTensor(kernels,i,'conv5-reconstructed', ch=0, allkernels=False)
#
# #将图片输入conv1里,得到64个feature maps
# kernels = net.get_featureconv1(image).cpu().data.clone()
# visTensor(kernels,epoch,'conv1-0feature', ch=0, allkernels=True)
#
# plt.axis('off')
# plt.ioff()
# plt.show()
writer.close()
|
import sys
sys.path.append("..")
from services.manipulation import *
class Format1:
"""
Type 1 -> Text on the top of the image.
Type 2 -> Text in the bottom of the image.
Type 3 -> Text on top and bottom of the image.
"""
def __init__(
self,
image_path,
top_text=None,
bottom_text=None,
font_path='impact/impact.ttf',
font_size=9,
):
self.image_path = image_path
self.top_text = top_text
self.bottom_text = bottom_text
self.font_path = font_path
self.font_size = font_size
def generate(self):
img = Image.open(self.image_path)
if self.top_text and self.bottom_text:
img = text_on_top(self.top_text, img)
image = text_in_bottom(self.bottom_text, img)
elif self.top_text:
image = text_on_top(self.top_text, img)
elif self.bottom_text:
image = text_in_bottom(self.bottom_text, img)
#image.save('meme-' + img.filename.split(os.sep)[-1])
path, imagename = os.path.split(img.filename)
img.filename = os.path.join(path,'meme-' + imagename)
return img
format1type1 = """
Type 1: __________________
| Text on top |
| |
| |
| |
|________________|
"""
format1type2 = """
Type 2: __________________
| |
| |
| |
| Text in bottom |
|________________|
"""
format1type3 = """
Type 3: __________________
| Text on top |
| |
| |
| Text in bottom |
|________________|
"""
# def generate(self):
# img = Image.open(self.image_path)
# draw = ImageDraw.Draw(img)
# (image_width, image_height) = img.size
# font = ImageFont.truetype(font=self.font_path,
# size=int(image_height
# * self.font_size) // 100)
# self.top_text = self.top_text.upper()
# (char_width, char_height) = font.getsize('A')
# chars_per_line = image_width // char_width
# top_lines = textwrap.wrap(self.top_text, width=chars_per_line)
# y = 10
#
# for line in top_lines:
# (line_width, line_height) = font.getsize(line)
# x = (image_width - line_width) / 2
# draw.text((x, y), line, fill='white', font=font)
# y += line_height
#
# img.save('meme-' + img.filename.split(os.sep)[-1])
# img.show()
|
#Copyright (c) 2019 Natan Nascimento Oliveira Matos <natanascimentom@outlook.com>
#Clock using Tkinter
from tkinter import *
from time import strftime
#For make the clock function
def clock():
def clock_main():
main_structure["text"] = strftime("%H:%M:%S")
def counter():
clock_main()
root.after(1000,counter)
counter()
#Interface structure
root = Tk()
#Structure for plot the clock on GUI
main_structure = Label(root, text = "--:--:--")
main_structure["font"] = ("Dubai Medium", "25", "bold", "roman")
main_structure.pack()
#For enable the clock
clock()
#For scale the interface ("LxA+E+T")
root.geometry("150x50+50+50")
#To insert the title in the GUI
root.title("Clock")
#If haven't "mainloop", in this code, the program stop in the clock function, and not would tell the seconds
root.mainloop() |
#!/usr/bin/env python3
"""
Analysis utilities for jet analysis with track dataframe.
Authors:
James Mulligan (james.mulligan@berkeley.edu)
Ezra Lesser (elesser@berkeley.edu)
"""
from __future__ import print_function
# General
import os
import sys
import math
# Data analysis and plotting
import uproot
import pandas
import numpy as np
import ROOT
# Fastjet via python (from external library heppy)
import fjcontrib
# Base class
from pyjetty.alice_analysis.process.base import common_utils
################################################################
class ProcessUtils(common_utils.CommonUtils):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, **kwargs):
super(ProcessUtils, self).__init__(**kwargs)
#---------------------------------------------------------------
# Check if det-jet passes acceptance criteria
#---------------------------------------------------------------
def is_det_jet_accepted(self, jet_det, min_leading_track_pT=None):
min_found = False
for track in jet_det.constituents():
if min_leading_track_pT and not min_found and track.pt() >= min_leading_track_pT:
min_found = True
if track.pt() > 100.:
return False
return min_found if min_leading_track_pT else True
#---------------------------------------------------------------
# Check if truth-jet passes acceptance criteria
#---------------------------------------------------------------
def is_truth_jet_accepted(self, jet_truth, min_leading_track_pT=None):
# If there is no leading track pT cut, accept all truth jets
if not min_leading_track_pT:
return True
# Otherwise same def as is_det_jet_accepted, but without 100 GeV track check
for track in jet_truth.constituents():
if track.pt() >= min_leading_track_pT:
return True
return False
#---------------------------------------------------------------
# Compute delta-R (eta-phi) between a PseudoJet and a given eta,phi value
#---------------------------------------------------------------
def delta_R(self, jet, eta, phi):
delta_phi = np.abs(jet.phi() - phi)
delta_eta = jet.eta() - eta
if delta_phi > np.pi:
delta_phi = 2*np.pi - delta_phi
deltaR = np.sqrt(delta_phi*delta_phi + delta_eta*delta_eta)
return deltaR
#---------------------------------------------------------------
# Get the leading constituent of a jet
#---------------------------------------------------------------
def get_leading_constituent(self, jet):
leading_particle = None
leading_particle_pt = 0.
for particle in jet.constituents():
if particle.pt() > leading_particle_pt:
leading_particle = particle
return leading_particle
#---------------------------------------------------------------
# Return leading jet (or subjet)
#---------------------------------------------------------------
def leading_jet(self, jets):
leading_jet = None
for jet in jets:
if not leading_jet:
leading_jet = jet
if jet.pt() > leading_jet.pt():
leading_jet = jet
return leading_jet
#---------------------------------------------------------------
# Perform grooming and return Lund declustering object
# (cpptools/src/fjcontrib/custom/GroomerShop.hh)
#
# Note that the GroomerShop returns a pointer to the
# LundDeclustering object -- this object is a class member,
# of the GroomerShop, so it will only stay in scope as long as
# the GroomerShop remains in scope.
#---------------------------------------------------------------
def groom(self, gshop, grooming_setting, jetR):
if 'sd' in grooming_setting:
zcut = grooming_setting['sd'][0]
beta = grooming_setting['sd'][1]
return gshop.soft_drop(beta, zcut, jetR)
elif 'dg' in grooming_setting:
if len(gshop.jet().constituents()) < 2:
return None
a = grooming_setting['dg'][0]
if a == 'max_pt_softer':
return gshop.max_pt_softer()
elif a == 'max_z':
return gshop.max_z()
elif a == 'max_kt':
return gshop.max_kt()
elif len(a) > 6 and a[0:7] == 'late_kt':
return gshop.late_kt(float(a[8:]))
elif a == 'max_kappa':
return gshop.max_kappa()
elif a == 'max_tf':
return gshop.max_tf()
elif a == 'min_tf':
return gshop.min_tf()
else:
return gshop.dynamical(a)
else:
sys.exit('grooming_setting {} not recognized.'.format(grooming_setting))
|
import itertools
from sympy import sieve
from math import sqrt, ceil
import numpy as np
from datetime import datetime
# def timer(func):
# def wrapper(*args, **kwargs):
# start = datetime.now()
# result = list(func(*args, **kwargs))
# # result = func(*args, **kwargs)
# end = datetime.now()
# print(func.__qualname__, args, "took \t\t",
# (end-start).microseconds/1000, "milliseconds")
# return result
# return wrapper
def timer_noresult(func):
def wrapper(*args, **kwargs):
start = datetime.now()
result = list(func(*args, **kwargs))
# result = func(*args, **kwargs)
end = datetime.now()
# print(func.__qualname__, args, "took \t\t",
# (end-start).microseconds/1000, "milliseconds")
return (end-start).total_seconds() * 1000
return wrapper
def sieveOfAtkin(end):
"""sieveOfAtkin(end): return a list of all the prime numbers <end
using the Sieve of Atkin."""
# Code by Steve Krenzel, <Sgk284@gmail.com>, improved
# Code: https://web.archive.org/web/20080324064651/http://krenzel.info/?p=83
# Info: http://en.wikipedia.org/wiki/Sieve_of_Atkin
assert end > 0
lng = ((end-1) // 2)
sieve = [False] * (lng + 1)
x_max, x2, xd = int(sqrt((end-1)/4.0)), 0, 4
for xd in range(4, 8*x_max + 2, 8):
x2 += xd
y_max = int(sqrt(end-x2))
n, n_diff = x2 + y_max*y_max, (y_max << 1) - 1
if not (n & 1):
n -= n_diff
n_diff -= 2
for d in range((n_diff - 1) << 1, -1, -8):
m = n % 12
if m == 1 or m == 5:
m = n >> 1
sieve[m] = not sieve[m]
n -= d
x_max, x2, xd = int(sqrt((end-1) / 3.0)), 0, 3
for xd in range(3, 6 * x_max + 2, 6):
x2 += xd
y_max = int(sqrt(end-x2))
n, n_diff = x2 + y_max*y_max, (y_max << 1) - 1
if not(n & 1):
n -= n_diff
n_diff -= 2
for d in range((n_diff - 1) << 1, -1, -8):
if n % 12 == 7:
m = n >> 1
sieve[m] = not sieve[m]
n -= d
x_max, y_min, x2, xd = int((2 + sqrt(4-8*(1-end)))/4), -1, 0, 3
for x in range(1, x_max + 1):
x2 += xd
xd += 6
if x2 >= end:
y_min = (((int(ceil(sqrt(x2 - end))) - 1) << 1) - 2) << 1
n, n_diff = ((x*x + x) << 1) - 1, (((x-1) << 1) - 2) << 1
for d in range(n_diff, y_min, -8):
if n % 12 == 11:
m = n >> 1
sieve[m] = not sieve[m]
n += d
primes = [2, 3]
if end <= 3:
return primes[:max(0, end-2)]
for n in range(5 >> 1, (int(sqrt(end))+1) >> 1):
if sieve[n]:
primes.append((n << 1) + 1)
aux = (n << 1) + 1
aux *= aux
for k in range(aux, end, 2 * aux):
sieve[k >> 1] = False
s = int(sqrt(end)) + 1
if s % 2 == 0:
s += 1
primes.extend([i for i in range(s, end, 2) if sieve[i >> 1]])
return primes
def eratosthenes2(n):
multiples = set()
for i in range(2, n+1):
if i not in multiples:
yield i
multiples.update(range(i*i, n+1, i))
# new
def primes(n):
""" Returns a list of primes < n """
sieve = [True] * n
for i in range(3, int(n**0.5)+1, 2):
if sieve[i]:
sieve[i*i::2*i] = [False]*((n-i*i-1)//(2*i)+1)
return [2] + [i for i in range(3, n, 2) if sieve[i]]
def primes1(n):
""" Returns a list of primes < n """
sieve = [True] * (n//2)
for i in range(3, int(n**0.5)+1, 2):
if sieve[i//2]:
sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)
return [2] + [2*i+1 for i in range(1, n//2) if sieve[i]]
def primesfrom3to(n):
""" Returns a array of primes, 3 <= p < n """
sieve = np.ones(n//2, dtype=np.bool)
for i in range(3, int(n**0.5)+1, 2):
if sieve[i//2]:
sieve[i*i//2::i] = False
return 2*np.nonzero(sieve)[0][1::]+1
def primesfrom2to(n):
""" Input n>=6, Returns a array of primes, 2 <= p < n """
sieve = np.ones(n//3 + (n % 6 == 2), dtype=np.bool)
for i in range(1, int(n**0.5)//3+1):
if sieve[i]:
k = 3*i+1 | 1
sieve[k*k//3::2*k] = False
sieve[k*(k-2*(i & 1)+4)//3::2*k] = False
return np.r_[2, 3, ((3*np.nonzero(sieve)[0][1:]+1) | 1)]
def symsieve(n):
return list(sieve.primerange(1, n))
izip = itertools.zip_longest
chain = itertools.chain.from_iterable
compress = itertools.compress
def rwh_primes2(n):
""" Input n>=6, Returns a list of primes, 2 <= p < n """
zero = bytearray([False])
size = n//3 + (n % 6 == 2)
sieve = bytearray([True]) * size
sieve[0] = False
for i in range(int(n**0.5)//3+1):
if sieve[i]:
k = 3*i+1 | 1
start = (k*k+4*k-2*k*(i & 1))//3
sieve[(k*k)//3::2*k] = zero*((size - (k*k)//3 - 1) // (2 * k) + 1)
sieve[start::2*k] = zero*((size - start - 1) // (2 * k) + 1)
ans = [2, 3]
poss = chain(izip(*[range(i, n, 6) for i in (1, 5)]))
ans.extend(compress(poss, sieve))
return ans
# to be tested
# n = int(input("input n:"))
# res1 = erato_test(n)
# res2 = atkin_test(n)
# res3 = primes_test(n)
# res4 = primes1_test(n)
# res5 = primesfrom3to_test(n)
# res6 = primesfrom2to_test(n)
# res7 = symsieve_test(n)
# res8 = rwh_primes2_test(n)
if __name__ == "__main__":
import json
from collections import defaultdict
erato_test = timer_noresult(eratosthenes2)
atkin_test = timer_noresult(sieveOfAtkin)
primes_test = timer_noresult(primes)
primes1_test = timer_noresult(primes1)
primesfrom3to_test = timer_noresult(primesfrom3to)
primesfrom2to_test = timer_noresult(primesfrom2to)
symsieve_test = timer_noresult(symsieve)
rwh_primes2_test = timer_noresult(rwh_primes2)
n_list = [2000, 4000, 6000, 8000, 10000,
20000, 40000, 60000, 80000, 100000,
200000, 400000, 600000, 800000, 1000000,
2000000, 4000000, 6000000, 8000000, 10000000,
15000000, 20000000, 25000000, 30000000
]
functions = [erato_test, atkin_test, primes_test,primes1_test, primesfrom3to_test, primesfrom2to_test, symsieve_test, rwh_primes2_test]
fun_names = ["erato", "atkin", "primes", "primes1", "primesfrom3to", "primesfrom2to", "siemsieve", "rwh_primes2"]
results = {}
for i, funct in enumerate(functions):
print(fun_names[i])
temp_list = []
for n in n_list:
temp_list.append(funct(n))
results[fun_names[i]] = temp_list
print(temp_list)
with open("results.json", "w") as resf:
json.dump(results, resf)
|
from uc.itm import UCWrappedProtocol, MSG
from math import ceil, floor
from uc.utils import wait_for, waits
from collections import defaultdict
from numpy.polynomial.polynomial import Polynomial
import logging
log = logging.getLogger(__name__)
class Syn_Bracha_Protocol(UCWrappedProtocol):
#def __init__(self, sid, pid, channels):
def __init__(self, k, bits, sid, pid, channels, pump, poly, importargs):
self.ssid = sid[0]
self.parties = sid[1]
self.delta = sid[2]
self.n = len(self.parties)
self.t = floor(self.n/3)
UCWrappedProtocol.__init__(self, k, bits, sid, pid, channels, poly, pump, importargs)
self.prepared_value = None
self.echoed = False
self.readied = False
self.committed = False
self.num_echos = defaultdict(int)
self.num_readys = defaultdict(int)
self.halt = False
def except_me(self):
return [p for p in self.parties if p != self.pid]
def clock_round(self):
self.write('p2w', ('clock-round',), 0)
rnd = wait_for(self.channels['w2p']).msg[1]
return rnd
def send_msg(self, to, msg, imp):
r = self.clock_round()
fchannelsid = (self.ssid, (self.sid,self.pid), (self.sid,to), r, self.delta)
log.debug("\nsending import: {}".format(imp))
self.write('p2f', ((fchannelsid,'F_chan'), ('send',msg)), imp)
m = wait_for(self.channels['f2p'])
assert m.msg[1] == 'OK', str(m)
def val_msg(self, sender, inp, imp):
# Only if you haven't already prepared a value should you accept a VAL
if not self.prepared_value and sender[1] == 1:
self.prepared_value = inp
msg = ('ECHO', self.prepared_value)
for pid in self.except_me():
self.tick(1)
self.send_msg( pid, ('ECHO', self.prepared_value), 3)
self.num_echos[inp] += 1
self.pump.write("dump")
def echo_msg(self, inp, imp):
n = len(self.parties)
self.num_echos[inp] += 1
log.debug('[{}] Num echos {}, required: {}'.format(self.pid, self.num_echos[inp], ceil(n+(n/3))/2))
if self.num_echos[inp] == ceil(n + (n/3))/2:
if inp == self.prepared_value:
self.num_readys[inp] += 1
# send out READY
for p in self.except_me():
self.tick(1)
self.send_msg( p, ('READY', self.prepared_value), 0)
self.pump.write("dump")
def ready_msg(self, inp, imp):
self.num_readys[inp] += 1
log.debug('[{}] Num readys {}'.format(self.pid, self.num_readys[inp]))
log.debug('required {}'.format(2*(self.n/3)+1))
if self.prepared_value and self.prepared_value == inp:
if self.num_readys[inp] == int(2*(self.n/3) + 1):
print('\033[92m [{}] Accepted input {}\033[0m'.format(self.pid, self.prepared_value))
self.write( 'p2z', self.prepared_value )
self.halt = True
return
self.pump.write("dump")
def p2p_msg(self, sender, msg, imp):
_,msg = msg
sid,pid = sender
ssid,fro,to,r,d = sid
if self.committed: self.pump.write("dump")# dump.dump()
elif msg[0] == 'VAL':
self.val_msg( fro, msg[1], imp)
elif msg[0] == 'ECHO':
self.echo_msg(msg[1], imp)
elif msg[0] == 'READY':
self.ready_msg(msg[1], imp)
else: print('Msg not recognized: {}'.format(msg)); self.pump.write("dump")#dump.dump()
def func_msg(self, d):
if self.halt: self.pump.write('dump'); return
msg = d.msg
imp = d.imp
sender,msg = msg
if sender[1] == 'F_chan':
self.p2p_msg(sender, msg, imp)
else:
#dump.dump()
self.pump.write("dump")
def wrapper_msg(self, msg):
self.pump.write("dump")
def adv_msg(self, msg):
self.pump.write("dump")
def env_input(self, inp):
if self.halt: self.pump.write('dump'); return
if self.pid == 1:
for p in self.parties:
self.send_msg( p, ('VAL', inp), 4*len(self.parties))
#self.pump.write("dump")
self.write('p2z', 'OK')
def env_msg(self, d):
if self.halt: self.pump.write('dump'); return
msg = d.msg
imp = d.imp
if msg[0] == 'input':
self.env_input(msg[1])
else:
self.pump.wite("done")
from uc.itm import ProtocolWrapper, WrappedProtocolWrapper
from uc.adversary import DummyWrappedAdversary
from uc.syn_ours import Syn_FWrapper, Syn_Channel
from uc.execuc import execWrappedUC
def env1(static, z2p, z2f, z2a, z2w, a2z, p2z, f2z, w2z, pump):
delta = 3
n = 3
#sid = ('one', (1,2,3), delta)
sid = ('one', tuple(range(1,n+1)), delta)
static.write( ('sid', sid) )
z2p.write( ((sid,1), ('input', 2)), n*(4*n + 1) )
#wait_for(p2z)
waits(pump, p2z)
def channel_id(fro, to, r):
s = ('one', (sid,fro), (sid,to), r, delta)
return (s,'F_chan')
z2a.write( ('A2W', ('get-leaks',)) )
msgs = waits(pump, a2z)
print('\033[91m [Leaks] \033[0m', '\n'.join(str(m) for m in msgs.msg))
## Force first pop from queue to deliver a VAL to first person
#for i in range(3):
# z2w.write( ('poll',), 1)
# #m = wait_for(a2z).msg; assert m == ('poll',), str(m)
# m = waits(pump, a2z).msg; assert m == ('poll',), str(m)
#z2w.write( ('poll',), 1 )
##wait_for(a2z)
#waits(pump, a2z)
#
# # Get leaks from the channels fro=1, r=2, to=*
# # Expect ECHO messages to all
# (_sid,tag),msg = z_get_leaks( z2a, a2z, channel_id(1,2,2)).msg
# print('fro={}, to={}, msg={}'.format(_sid[1][1],_sid[2][1],msg))
# (_sid,tag),msg = z_get_leaks( z2a, a2z, channel_id(1,3,2)).msg
# print('fro={}, to={}, msg={}'.format(_sid[1][1],_sid[2][1],msg))
# (_sid,tag),msg = z_get_leaks( z2a, a2z, channel_id(1,1,2)).msg
# print('fro={}, to={}, msg={}'.format(_sid[1][1],_sid[2][1],msg))
#
# # Deliver the second VAL message
# for i in range(2):
# z2w.write( ('poll',), 1)
# #m = wait_for(a2z).msg; assert m == ('poll',), str(m)
# m = waits(pump, a2z).msg; assert m == ('poll',), str(m)
# z2w.write( ('poll',), 1 )
# #wait_for(a2z)
# waits(pump, a2z)
# print('*****')
#
# # Adversary execs the final VAL deliver
# z2a.write( ('A2W', ('exec', 2, 0)) )
# wait_for(a2z)
#
# print('\n\033[1mBy this time all parties should have received a VAL message\033[0m\n')
#
# z2a.write( ('A2W', ('delay', 5)) )
# wait_for(a2z)
#
# for i in range(9):
# z2w.write( ('poll',) )
# wait_for(a2z)
# z2w.write( ('poll',) )
# wait_for(a2z)
# for _ in range(2):
# z2w.write( ('poll',) )
# wait_for(a2z)
# z2w.write( ('poll',) )
# wait_for(a2z)
# for _ in range(2):
# z2w.write( ('poll',) )
# wait_for(a2z)
# z2w.write( ('poll',) )
# wait_for(a2z)
# for _ in range(2):
# z2w.write( ('poll',) )
# wait_for(a2z)
#
# for _ in range(10):
# z2w.write( ('poll',) )
# wait_for(a2z)
if __name__ == '__main__':
execWrappedUC(env1, [('F_chan',Syn_Channel)], WrappedProtocolWrapper, Syn_FWrapper, Syn_Bracha_Protocol, DummyWrappedAdversary)
|
#!/bin/python3
import math
import os
import random
import re
import sys
class Stack:
def __init__(self):
self.bracket_list = []
def push(self, item):
self.bracket_list.append(item)
def pop(self, item):
if len(self.bracket_list) != 0:
if (item == ')' and self.bracket_list[-1] == '(') or (item == ']' and self.bracket_list[-1] == '[') or (item == '}' and self.bracket_list[-1] == '{'):
self.bracket_list.pop()
return 0
else:
return 1
else:
return 1
def isEmpty(self):
if len(self.bracket_list) == 0:
return True
else:
return False
# Complete the isBalanced function below.
def isBalanced(s):
st = Stack()
for bracket in s:
if bracket in ['(','{','[']:
st.push(bracket)
else:
ret_value = st.pop(bracket)
if ret_value == 1:
return 'NO'
if st.isEmpty():
return 'YES'
else:
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
|
"""
Created by hzwangjian1
On 2017-08-31
"""
import traceback
def read_write(input_path, output_path, label):
with open(output_path, 'w') as whandler:
with open(input_path, 'r') as rhandler:
str = rhandler.readline()
while str:
try:
sub_strs = str.split("\t")
whandler.write(sub_strs[0] + "\t" + sub_strs[1] + "\t" + label + "\n")
str = rhandler.readline()
except Exception as e:
traceback.print_exc()
if __name__ == '__main__':
read_write('E://temp/docduplicate/image/TrueNegative','E://temp/docduplicate/image/TrueNegative.format','0')
read_write('E://temp/docduplicate/image/TruePositive','E://temp/docduplicate/image/TruePositive.format','1') |
import random
r = [99, 49]
for j in r:
l = random.sample(range(1, j+1), 10)
for i in l:
n = 0
while i != n:
n = int(input("Enter an integer from 1 to %s: " % j))
if n < i:
print("guess is low")
elif n > i:
print("guess is high")
print("you guessed it!")
|
#!/usr/bin/python3
'''
for
'''
exampleList = [1,2,314,413,6,8,9,10,20]
for i in exampleList:
print(i)
print("continue")
print(exampleList)
length = len(exampleList)
for i in range(0,length):
exampleList[i] = exampleList[i] + 1
print(exampleList)
emptyList = []
emptyList.append(1)
emptyList.append(12)
print("emptyList:",emptyList)
# out of for loop
print("over")
# range built-in function
for x in range(1,11):
print(x)
|
import load
import args
import data
import model
import batch
import optimizer
import numpy as np
import os
def train_gaussian_naive_bayes(
train_x,
train_y,
prefix,
validation):
# Make sure the save pathes are valid
# check model save path
if os.path.isdir('models') == False:
os.mkdir('models')
save_model_prefix = os.path.join('models', prefix)
if os.path.isdir(save_model_prefix) == False:
os.mkdir(save_model_prefix)
# check log save path
if os.path.isdir('logs') == False:
os.mkdir('logs')
log_f = open(os.path.join('logs', prefix + '.log'), 'w')
if validation:
log_f.write('accuracy,validation\n')
else:
log_f.write('accuracy\n')
feature_num = train_x.shape[1]
if validation:
data_processor = data.DataProcessor()
train_x, train_y, valid_x, valid_y = \
data_processor.cut_validation(train_x, train_y)
total_data = train_x.shape[0]
trainer = model.GaussianNaiveBayes()
trainer.fit(train_x, train_y, feature_num)
pred_y = trainer.predict(train_x)
accuracy = trainer.count_accuracy(pred_y, train_y)
if validation:
valid_pred = trainer.predict(valid_x)
valid_accuracy = trainer.count_accuracy(valid_pred, valid_y)
message = 'accuracy:%.3f, validation:%.3f'\
% (accuracy, valid_accuracy)
log_f.write('%.3f,%.3f\n'\
% (accuracy, valid_accuracy))
else:
message = 'accuracy:%.3f' % (accuracy)
log_f.write('%.3f\n'% (accuracy))
print(message)
save_model_path = os.path.join(
save_model_prefix, 'model.npy')
trainer.save_model(save_model_path)
log_f.close()
def train(
train_x,
train_y,
batch_size,
total_epoch,
learning_rate,
save_intervals,
prefix,
optim_name,
validation):
# Make sure the save pathes are valid
# check model save path
if os.path.isdir('models') == False:
os.mkdir('models')
save_model_prefix = os.path.join('models', prefix)
if os.path.isdir(save_model_prefix) == False:
os.mkdir(save_model_prefix)
# check log save path
if os.path.isdir('logs') == False:
os.mkdir('logs')
log_f = open(os.path.join('logs', prefix + '.log'), 'w')
if validation:
log_f.write('epoch,loss,accuracy,validation\n')
else:
log_f.write('epoch,loss,accuracy\n')
feature_num = train_x.shape[1]
if validation:
data_processor = data.DataProcessor()
train_x, train_y, valid_x, valid_y = \
data_processor.cut_validation(train_x, train_y)
total_data = train_x.shape[0]
try:
optim_object = getattr(optimizer, optim_name)
except AttributeError:
print('Optimizer not found.')
exit()
optim = optim_object(learning_rate)
trainer = model.LogisticRegression(
feature_num,
optim,
train=True)
batcher = batch.Batcher(train_x, train_y, batch_size)
for epoch in range(total_epoch):
total_loss = 0.0
batcher.new_epoch()
total_accuracy = 0.0
for step, (x, y) in enumerate(batcher):
pred = trainer.forward(x)
loss = trainer.count_loss(pred, y)
trainer.backward(x, y, pred)
total_loss += np.sum(loss)
total_accuracy += trainer.count_accuracy(pred, y)
total_loss /= total_data
accuracy = total_accuracy * batch_size / total_data
if validation:
valid_pred = trainer.forward(valid_x)
print(valid_pred)
valid_accuracy = trainer.count_accuracy(valid_pred, valid_y)
message = 'epoch:%3d, loss:%.3f, accuracy:%.3f, validation:%.3f'\
% (epoch, total_loss, accuracy, valid_accuracy)
log_f.write('%d,%.3f,%.3f,%.3f\n'\
% (epoch, total_loss, accuracy, valid_accuracy))
else:
message = 'epoch:%3d, loss:%.3f, accuracy:%.3f' % \
(epoch, total_loss, accuracy)
log_f.write('%d,%.3f,%.3f\n'\
% (epoch, total_loss, accuracy))
print(message)
if (epoch + 1) % save_intervals == 0:
save_model_path = os.path.join(
save_model_prefix, 'model_e%d.npy' % (epoch+1))
trainer.save_model(save_model_path)
log_f.close()
if __name__ == '__main__':
args = args.get_args()
np.random.seed(args.seed)
data_loader = load.DataLoader()
train_x, train_y = data_loader.read_training_data(
args.train_x_filename, args.train_y_filename, args.test_x_filename)
if args.model == 'GaussianNaiveBayes':
train_gaussian_naive_bayes(
train_x,
train_y,
args.prefix,
args.validation)
else:
train(
train_x,
train_y,
args.batch_size,
args.epoches,
args.learning_rate,
args.save_intervals,
args.prefix,
args.optimizer,
args.validation)
|
import numpy as np
import imutils
import cv2
import argparse
image = cv2.imread("image/picasso.jpg")
cv2.imshow("Original",image)
cv2.waitKey(0)
M = np.float32([[1,0,25],[0,1,50]])
shifted = cv2.warpAffine(image,M,(image.shape[1],image.shape[0]))
cv2.imshow("Shifted Down and Right",shifted)
cv2.waitKey(0)
M = np.float32([[1,0,-50],[0,1,-90]])
shifted = cv2.warpAffine(image,M,(image.shape[1],image.shape[0]))
cv2.imshow("Shifted Up and Left",shifted)
cv2.waitKey(0)
shifted = imutils.translate(image,0,100)
cv2.imshow("Shifted Down",shifted) #İmutils kütüphanesi normalde numpy ile yapacağımız işlemi çok daha kısaltıyor.
cv2.waitKey()
|
from django.shortcuts import render, get_object_or_404
from .models import Product
from cart.models import Cart
def index(request):
cart = Cart.objects.all()
cart_count = len(cart)
products = Product.objects.all()
return render(request, 'products/index.html', {'products': products, 'cart_count': cart_count})
def details(request, id):
cart = Cart.objects.all()
cart_count = len(cart)
product = get_object_or_404(Product, pk=id)
return render(request, 'products/details.html', {'product': product, 'cart_count': cart_count})
def base(request):
cart = Cart.objects.all()
cart_count = len(cart)
return render(request, 'products/base.html', {'cart': cart, 'cart_count': cart_count})
|
import requests
from semantic_version import Version
from semver import satisfies
# TODO: AWS Lambda default endpoint timeout is smaller than 30 seconds
EXTERNAL_SERVICE_TIMEOUT = 3
EXTERNAL_SERVICE_MAXRETRY = 5
NODE_INDEX = 'http://npm.taobao.org/mirrors/node/index.json'
NODE_ADDR = 'http://npm.taobao.org/mirrors/node/v{0}/node-v{0}-{1}.tar.gz'
def cnpm2data(platform, nodesemver):
"""The returned data is list of dict."""
# Get node version and bin from CNPM
retry, errormsg = 0, ''
while True:
# Get response
try:
rv = requests.get(NODE_INDEX, timeout=EXTERNAL_SERVICE_TIMEOUT)
except Exception as e:
# Setup errormsg if exception
errormsg = str(e)
else:
# Setup errormsg if status code is not 200
if rv.status_code != 200:
errormsg = 'CNPM returns {} status code'.format(rv.status_code)
# Break if response is OK
if not errormsg:
break
# Retry if response is not OK
retry += 1
if retry > EXTERNAL_SERVICE_MAXRETRY:
raise Exception('CNPM service error: {}'.format(errormsg))
# Process response
response, data = rv.json(), []
for package in response:
_ = dict()
_['number'] = _process_version(package['version'])
_['url'] = _process_url(
files=package['files'], version=_['number'], platform=platform
)
# Only output if url is valid
if _['url']:
data.append(_)
# Filter by nodesemver range
if nodesemver:
data = _filter_by_semver(data, nodesemver)
return data
def _process_version(version):
# Remove additional v in versions
if version.startswith('v'):
version = version[1:]
# Check semantic verison validness
version = str(Version(version))
return version
def _process_url(files, version, platform):
"""
:param files should be list
"""
# Convert platform to appropriate key
if platform == 'linux-x64':
target = 'linux-x64'
elif platform == 'darwin-x64':
# osx-x64-tar is in tar.gz or tar.xz format
# osx-x64-pkg is in pkg format
target = 'osx-x64-tar'
if target in files:
return NODE_ADDR.format(version, platform)
return ''
def _filter_by_semver(data, nodesemver):
data = filter(lambda d: satisfies(d['number'], nodesemver), data)
return list(data)
|
#!/usr/bin/env python
import configparser as _configparser
from os import getenv as _getenv
from os import path as _path
config_dir = _path.dirname(__file__)
env = _getenv('DBENV', 'development')
env = (env if env != "" else 'development')
Config = _configparser.ConfigParser()
Config.read(_path.join(config_dir, env, 'config.ini'))
|
#Menu creation and coding to call each option
import create_attributes
import player_actions
import game_actions
ca=create_attributes
pa=player_actions
ga=game_actions
main_menu={": :":"MAIN MENU : : :",1:"New Game",2:"Resume Game",3:"View Character Attributes",4:"View Class Attributes",5:"View Species Attributes",6:"Exit"}
game_menu={": :":"GAME MENU : : :",1:"Move",2:"Attack",3:"Talk",4:"Take",5:"Give",6:"Save Game",7:"Main Menu"}
hidden_menu={": :":"Congrats! Welcome to the HIDDEN MENU : : :",1:"Create Species",2:"Create Class",3:"Update Species",4:"Update Class",5:"Delete Species",6:"Delete Class",7:"Create Map",8:"Update Monster Spots"}
#Displays specified menu
def display_menu(menu):
for key in menu:
print(str(key),":",menu[key])
print("\n")
display_menu(main_menu)
display_menu(hidden_menu)
display_menu(game_menu)
def menu_action(menu,option):
print(menu,option)
# ga.new_game()
# ga.resume_game()
# ga.view_atts()
# pa.move_player()
# pa.attack()
# pa.talk()
# pa.take()
# pa.give()
# ca.savelog()
# ga.return_main()
#handles options 1&2 of hidden menu
if menu.upper()=="HIDDEN":
if option==1:
ca.create_log("Species")
elif option==2:
ca.create_log("Classes")
|
#https://www.hackerrank.com/challenges/arrays-ds/problem
#DAY 1
n = input()
n = int(n)
arr = input().split()
for i in range(n-1, -1, -1):
print(arr[i], end=" ")
|
def sumDigit(n):
s = 0
while (n > 0):
s += n % 10
n = n//10
return s
t = int(input())
while(t):
t -= 1
n = int(input())
n *= 10
while (sumDigit(n) % 10 != 0):
n += 1
print(n)
|
import persistence
import logging.config
import os
import json
from protocol_support import *
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logger.config"))
ROOM_MESSAGE_NEW_USER_CONNECTED = 'SERVER: {0} is connected.\n'
def kick_user_from_room(target_u_thread, room_pool):
kick_from_default = target_u_thread.room == target_u_thread.room_pool.default_room
room_pool.remove_users_from_room([target_u_thread], target_u_thread.room)
if kick_from_default:
target_u_thread.send_message('You has been kicked from Main room. Connection is lost.\n')
target_u_thread.stop_thread()
else:
room_pool.put_users_threads_into_room([target_u_thread], target_u_thread.room_pool.default_room)
target_u_thread.send_message('You has been kicked to Main room.\n')
def handle_command(server_thread_obj, message):
'''
main action-hub, parses user required actions and fulfills requests. Function relies on :param server_thread_obj passed in,
through which it has access to current user who makes request and also to room and room supporting system (RoomPool)
It is intended that only currently running server-threads (client handlers) will use this function, passing themselves as parameter.
:param server_thread_obj:
:param message:
:return:
'''
def ping():
return SUCCESS
def login():
log = logging.getLogger('server.protocol')
username = command_content[0].split(PROTOCOL_DATA_SPLITTER)[0]
password = command_content[0].split(PROTOCOL_DATA_SPLITTER)[1]
user = persistence.find_user_by_login(username)
if server_thread_obj.room_pool.user_is_logged_in(username):
log.debug('Attempt of login when already online as {0}'.format(username))
return "ATTEMPT_OF_DOUBLE_LOGIN"
if user and user.password != password:
log.debug('INVALID_CREDENTIALS for user: {0}'.format(username))
return "INVALID_CREDENTIALS"
if not user:
user = persistence.User(login=username, password=None)
if password:
user.password = password
persistence.create_update_user(user)
user = persistence.find_user_by_login(username)
if user.login == username \
and user.password == None or user.password == password:
server_thread_obj.user = user
server_thread_obj.send_message(SUCCESS)
server_thread_obj.room_pool.put_users_threads_into_room([server_thread_obj],
server_thread_obj.room_pool.default_room)
log.info('user "{0}" logged IN. {1}'.format(user.login, ('' if password else 'ANONYMOUSLY')))
server_thread_obj.room.messages_queue.put(ROOM_MESSAGE_NEW_USER_CONNECTED.format(user.login))
return USER_MESSAGE_ROOM_WELCOME.format(server_thread_obj.room.name, user.login, 'public')
else:
log.debug('INVALID_CREDENTIALS from user {0}'.format(username))
return 'INVALID_CREDENTIALS'
def logout():
log = logging.getLogger('server.protocol')
server_thread_obj.room_pool.remove_users_from_room([server_thread_obj], server_thread_obj.room)
log.debug('user "{0}" logged OUT.'.format(server_thread_obj.user.login))
server_thread_obj.user = None
server_thread_obj.room = None
return ''
def enter_room():
log = logging.getLogger('server.protocol')
data = command_content[0].split(PROTOCOL_DATA_SPLITTER)
room = persistence.find_room_by_id(data[0])
# if room is not public
if not room.open_for_all and not server_thread_obj.user.password:
log.error('UNAUTHORIZED ACCESS TO NON_PUBLIC ROOM DETECTED. User: {0} Room: {1}'.format(
server_thread_obj.user.login, room.room_id))
return RESPONSE_MARKER_DATA + 'UNAUTHORIZED ACCESS TO NON_PUBLIC ROOM DETECTED'
# if room has a password
if room.password and (len(data) < 2 or room.password != data[1]):
return RESPONSE_MARKER_DATA + 'INVALID ROOM PASSWORD'
server_thread_obj.room_pool.remove_users_from_room([server_thread_obj], server_thread_obj.room)
server_thread_obj.room_pool.put_users_threads_into_room([server_thread_obj], room)
log.debug('User {0} is entering room {1}'.format(server_thread_obj.user.login, room.room_id))
server_thread_obj.send_message(RESPONSE_MARKER_DATA + SUCCESS)
server_thread_obj.room.messages_queue.put(ROOM_MESSAGE_NEW_USER_CONNECTED.format(server_thread_obj.user.login))
return USER_MESSAGE_ROOM_WELCOME.format(server_thread_obj.room.name, server_thread_obj.user.login,
'public' if room.open_for_all else 'private')
def get_rooms():
log = logging.getLogger('server.protocol')
rooms = persistence.find_all_rooms()
log.debug('get_rooms() performed for ' + str(server_thread_obj.client_address))
return RESPONSE_MARKER_DATA + json.dumps(
[{'room_id': r.room_id, 'name': r.name, 'is_open': True if r.open_for_all == 1 else False,
'has_password': True if r.password else False} for r in rooms])
def create_room():
log = logging.getLogger('server.protocol')
data = command_content[0].split(PROTOCOL_DATA_SPLITTER)
new_room = persistence.Room(data[1], None if data[2] == 'NONE' else data[2], data[0] == '0')
new_room.admins.append(server_thread_obj.user)
room_id = persistence.create_update_room(new_room)
server_thread_obj.user = persistence.find_user_by_id(server_thread_obj.user.user_id)
log.debug('new room has been created: {0} "{1}"'.format(room_id, data[1]))
return USER_MESSAGE_ROOM_CREATED.format(data[1])
def make_admin():
log = logging.getLogger('server.protocol')
error = None
if server_thread_obj.user in server_thread_obj.room.admins:
target_u_thread = [u for u in server_thread_obj.room.connected_user_threads if u.user.login == command_content[0]].pop()
if target_u_thread.user.password:
# changes to in-memory data
target_u_thread.user.admin_in_rooms.append(server_thread_obj.room)
server_thread_obj.room.admins.append(target_u_thread.user)
# changes to persisted data
persistence.make_an_admin(target_u_thread.user.user_id, target_u_thread.room.room_id)
target_u_thread.room.user_list_changes_queue.put(persistence.Room.USER_LIST_CHANGED_TOKEN)
server_thread_obj.room.messages_queue.put('User {0} is now admin in room "{1}"\n'.format(target_u_thread.user.login, target_u_thread.room.name))
return 'You have made {0} admin."\n'.format(target_u_thread.user.login, target_u_thread.room.name)
else:
error = 'ERROR: admin "{0}" tries to make unauthorized user "{1}" admin. from room {2}'
else:
error = 'ERROR: Non admin "{0}" tries to make "{1}" admin. from room {2}'
log.error(error.format(server_thread_obj.user.login, command_content[0], server_thread_obj.room.room_id))
return error.format(server_thread_obj.user.login, command_content[0], server_thread_obj.room.room_id)
def kick_user():
log = logging.getLogger('server.protocol')
error = None
if server_thread_obj.user in server_thread_obj.room.admins:
target_u_thread = [u for u in server_thread_obj.room.connected_user_threads if u.user.login == command_content[0]].pop()
if target_u_thread.user not in server_thread_obj.room.admins:
kick_user_from_room(target_u_thread, server_thread_obj.room_pool)
server_thread_obj.room.messages_queue.put('User {0} has been kicked.\n'.format(target_u_thread.user.login))
return 'You have kicked user {0}.\n'.format(command_content[0])
else:
error = 'ERROR: admin "{0}" tries to kick another admin "{1}" from room {2}'
else:
error = 'ERROR: Non admin "{0}" tries to kick "{1}" or trying to kick another admin from room {2}'
log.error(error.format(server_thread_obj.user.login, command_content[0], server_thread_obj.room.room_id))
return error.format(server_thread_obj.user.login, command_content[0], server_thread_obj.room.room_id)
def vote_for_user_kick():
log = logging.getLogger('server.protocol')
target_u_thread = [u for u in server_thread_obj.room.connected_user_threads if u.user.login == command_content[0]].pop()
user_to_kick = target_u_thread.user.login
if target_u_thread.user in server_thread_obj.room.admins:
log.error('ERROR: user "{0}" tries to vote-kick admin "{1}" from room {2}. That must not be possible'.format(server_thread_obj.user.login, user_to_kick,
server_thread_obj.room.room_id))
return 'You are trying to vote-kick admin. It is either bug or you are hacker!'
if command == COMMAND_VOTE_FOR_KICK:
server_thread_obj.room.vote_for_kick(user_to_kick, COMMAND_VOTE_FOR_KICK)
server_thread_obj.room.vote_for_kick(user_to_kick, COMMAND_VOTE_FOR_KICK_VOTE_YES)
log.debug('User kick vote initiated for {0} by {1} in room {2}'.format(server_thread_obj.user.login, user_to_kick, server_thread_obj.room.room_id))
return 'You initiated kick-vote for user {0}\n'.format(user_to_kick)
elif command == COMMAND_VOTE_FOR_KICK_VOTE_YES:
server_thread_obj.room.vote_for_kick(user_to_kick, COMMAND_VOTE_FOR_KICK_VOTE_YES)
log.debug('User {0} voted YES for kick {1} in room {2}'.format(server_thread_obj.user.login, user_to_kick, server_thread_obj.room.room_id))
return 'You voted YES for kick user {0}\n'.format(user_to_kick)
elif command == COMMAND_VOTE_FOR_KICK_VOTE_NO:
server_thread_obj.room.vote_for_kick(user_to_kick, COMMAND_VOTE_FOR_KICK_VOTE_NO)
log.debug('User {0} voted NO for kick {1} in room {2}'.format(server_thread_obj.user.login, user_to_kick, server_thread_obj.room.room_id))
return 'You voted NO for kick user {0}\n'.format(user_to_kick)
COMMANDS = {
COMMAND_PING: ping,
COMMAND_LOGIN: login,
COMMAND_LOGOUT: logout,
COMMAND_ENTER_ROOM: enter_room,
COMMAND_GET_ROOMS: get_rooms,
COMMAND_CREATE_ROOM: create_room,
COMMAND_MAKE_ADMIN: make_admin,
COMMAND_KICK: kick_user,
COMMAND_VOTE_FOR_KICK: vote_for_user_kick,
COMMAND_VOTE_FOR_KICK_VOTE_YES: vote_for_user_kick,
COMMAND_VOTE_FOR_KICK_VOTE_NO: vote_for_user_kick
}
command = message.split(' ')[0]
command_content = message.split(' ')[1:]
if command not in COMMANDS:
return None
return COMMANDS.get(command)()
def parse_message(message):
command = message.split(PROTOCOL_CREDS)[0][len(PROTOCOL_HEAD):]
if (command == COMMAND_LOGIN):
body = message.split(PROTOCOL_CREDS)[1].split(PROTOCOL_BODY)[0]
else:
body = message.split(PROTOCOL_BODY)[1][:len(message.split(PROTOCOL_BODY)[1]) - len(PROTOCOL_END)]
return command + ' ' + body |
__author__: str = "Eric Mesa"
__version__: str = "0.8.1"
__license__: str = "GNU GPL v3.0"
__copyright__: str = "(c) 2020 Eric Mesa"
__email__: str = "eric@ericmesa.com"
|
#!/usr/bin/env python3
import logging
def main(parsed_args):
import os, os.path, re, shutil, sqlite3, subprocess, sys, textwrap, urllib.parse, urllib.request
if not parsed_args.poco_html_docs_folder:
logging.info("Checking pocoproject.org/documentation ...")
website_index_html = urllib.request.urlopen("https://pocoproject.org/documentation/").read().decode("utf-8")
match = re.search(r'''href="(.*/poco-.*-all-doc\.zip)"''', website_index_html)
zip_url = match.group(1)
parsed_zip_url = urllib.parse.urlparse(zip_url)
zip_filename = os.path.basename(parsed_zip_url.path)
folder_name = os.path.splitext(zip_filename)[0]
if os.path.exists(folder_name):
logging.info("Folder already downloaded and unpacked: %s", folder_name)
else:
logging.info("Downloading %s", zip_filename)
urllib.request.urlretrieve(zip_url, zip_filename)
logging.info("Unpacking")
shutil.rmtree(folder_name, ignore_errors=True)
subprocess.check_call(["unzip", zip_filename])
parsed_args.poco_html_docs_folder = folder_name
# `libxml2` currently has a bug which prevent installing it on macOS
# A working workaround:
#
# STATIC_DEPS=true LIBXML2_VERSION=2.9.2 pip3 install lxml
import lxml.html
docset_name = "poco.docset"
if os.path.exists(docset_name):
shutil.rmtree(docset_name)
os.mkdir(docset_name)
contents_path = os.path.join(docset_name, "Contents")
os.mkdir(contents_path)
# Info.plist
#
# Dash uses 'DocSetPlatformFamily' as search prefix ("poco:")
info_path = os.path.join(contents_path, "Info.plist")
logging.debug("%s", info_path)
open(info_path, "w").write(textwrap.dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>org.pocoproject.poco-dash-docset</string>
<key>CFBundleName</key>
<string>Poco</string>
<key>DashDocSetFallbackURL</key>
<string>http://pocoproject.org/docs/</string>
<key>DashDocSetFamily</key>
<string>dashtoc</string>
<key>DocSetPlatformFamily</key>
<string>poco</string>
<key>dashIndexFilePath</key>
<string>index.html</string>
<key>isDashDocset</key>
<true/>
<key>isJavaScriptEnabled</key>
<true/>
</dict>
</plist>
"""))
res_path = os.path.join(contents_path, "Resources")
os.mkdir(res_path)
doc_path = os.path.join(res_path, "Documents")
os.mkdir(doc_path)
conn = sqlite3.connect(os.path.join(res_path, 'docSet.dsidx'))
cur = conn.cursor()
cur.execute('CREATE TABLE searchIndex (id integer primary key, name text, type text, path text)')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
for src_path, dirs, files in os.walk(parsed_args.poco_html_docs_folder):
dst_dir = os.path.relpath(src_path, parsed_args.poco_html_docs_folder)
dst_path = os.path.normpath(os.path.join(doc_path, dst_dir))
if dst_dir != '.':
os.mkdir(dst_path)
for fn in files:
if fn == 'navigation.html':
continue
srcfile = os.path.join(src_path, fn)
dstfile = os.path.join(dst_path, fn)
logging.debug("%s", dstfile)
if fn.endswith('.html'):
et = lxml.html.parse(srcfile)
et.find('//div[@id="navigation"]').drop_tree()
et.find('//script[@src="js/iframeResizer.min.js"]').drop_tree()
lxml.html.etree.SubElement(et.find('//head'), "link", {"rel":"stylesheet","href":"css/styles-override.css","type":"text/css"})
titletext = et.find('//title').text
if titletext == 'Package Index':
libname = et.find('//h1[@class="category"]').text.replace("Library ","")
pkgname = et.find('//h1[@class="title"]').text.replace("Package ", "")
logging.info(' Package: %s.%s', libname, pkgname)
cur.execute('INSERT INTO searchIndex(type, name, path) values(?, ?, ?)', ['Package', libname+'.'+pkgname, fn])
elif titletext.startswith('Class '):
classname = et.find('//h1[@class="symbol"]').text.replace("class ", "")
logging.info(' Class: %s', classname)
cur.execute('INSERT OR IGNORE INTO searchIndex(type, name, path) values(?, ?, ?)', ['Class', classname, fn])
content_el = et.find('//div[@class="body"]/div[@id="content"]')
member_type = None
for el in content_el:
if el.tag == "h2":
if el.text == "Types":
member_type = "Type"
elif el.text == "Enumerations":
member_type = "Enum"
elif el.text == "Constructors":
member_type = "Constructor"
elif el.text == "Destructor":
member_type = None
elif el.text == "Member Functions":
member_type = "Method"
elif el.text == "Variables":
member_type = "Variable"
elif el.text == "Nested Classes":
member_type = "Class"
else:
member_type = None
tag_type = member_type
tag_name = None
tag_indexed = (member_type != "Constructor")
tag_href = None
if member_type and (el.tag == "h3"):
anc = el.find('./a[@id]')
if anc is not None:
tag_name = anc.text
if member_type == "Method":
if (el.find('./img[@alt="static"]') is not None):
tag_type = "Subroutine" # to have 'S' icon on static methods
elif (el.find('./img[@alt="protected"]') is not None):
tag_type = "Procedure" # to have 'P' icon on protected methods
elif re.match(r'''^operator\W''', tag_name):
tag_type = "Operator"
elif member_type == "Variable":
saved_name = tag_name
tag_name = None # wait for p.decl
else:
anc = el.find('./a[@href]')
if anc is not None:
if anc.text.startswith("class "):
tag_name = anc.text[6:]
tag_type = "Class"
elif anc.text.startswith("struct "):
tag_name = anc.text[7:]
tag_type = "Struct"
else:
tag_name = anc.text
tag_href = anc.get("href")
if (member_type == "Enum") and (el.tag == "p") and (el.get("class") == "decl"):
anc = el.find('./a[@id]')
if anc is not None:
tag_name = anc.text
tag_type = "Constant"
if (member_type == "Variable") and (el.tag == "p") and (el.get("class") == "decl"):
tag_name = saved_name
decltext = "".join(el.itertext())
if re.search(r'''^(static\s|const\s)+''', decltext):
tag_type = "Constant"
else:
tag_type = "Variable"
# insert
if tag_name:
logging.info(" %s: %s", tag_type, tag_name)
url_anchor = "//apple_ref/cpp/"+tag_type+"/"+tag_name
el.insert(0, lxml.html.etree.Element("a", {"name":url_anchor,"class":"dashAnchor"}))
if tag_indexed:
cur.execute('INSERT OR IGNORE INTO searchIndex(type, name, path) values(?, ?, ?)', [tag_type, tag_name, (tag_href if tag_href else fn + '#' + url_anchor)])
elif fn == "index.html":
for el in et.findall('//a[@href]'):
href = el.get("href")
if "http://" in href:
continue
if re.search(r'''^[0-9]{5}''', href) or ("Guide" in href):
cur.execute('INSERT INTO searchIndex(type, name, path) values(?, ?, ?)', ['Guide', el.text, href])
elif href.startswith("Poco.") and (el.text == "Poco" or el.text.startswith("Poco::")):
cur.execute('INSERT INTO searchIndex(type, name, path) values(?, ?, ?)', ['Namespace', el.text, href])
open(dstfile,"wb").write(lxml.html.tostring(et))
else:
shutil.copyfile(srcfile, dstfile)
open(os.path.join(doc_path, "css", "styles-override.css"),"w").write(textwrap.dedent("""\
#content
{
left : 0;
border-left : none;
}
"""))
conn.commit()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("poco_html_docs_folder", nargs='?')
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity, more than once to be more verbose")
parsed_args = parser.parse_args()
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(len(levels)-1,parsed_args.verbose)]
logging.basicConfig(level=level, format="%(message)s")
main(parsed_args)
|
import pytest
from pyfa_ng_backend import create_app
@pytest.fixture
def app():
app = create_app()
return app
@pytest.fixture
def full_fit():
return {
'ship': 32311,
'high_slots': [
{'id': 2929, 'state': 'online', 'charge': 12779},
{'id': 2929, 'state': 'offline', 'charge': 12779},
{'id': 2929, 'state': 'overload', 'charge': 12779},
],
'mid_slots': [
{'id': 5945, 'state': 'overload'},
{'id': 4833, 'state': 'active', 'charge': 32014},
{'id': 9622, 'state': 'offline'},
{'id': 5443, 'state': 'overload'},
{'id': 2281, 'state': 'overload'},
],
'low_slots': [
{'id': 2048, 'state': 'online'},
{'id': 519, 'state': 'active'},
{'id': 519, 'state': 'active'},
{'id': 4405, 'state': 'active'},
{'id': 4405, 'state': 'active'},
{'id': 4405, 'state': 'offline'},
],
'rigs': [
26082,
26088,
26088,
],
'drones': [
{'id': 2446, 'state': 'active'},
{'id': 2446, 'state': 'active'},
{'id': 2446, 'state': 'active'},
{'id': 2446, 'state': 'active'},
{'id': 2446, 'state': 'active'},
{'id': 2446, 'state': 'offline'},
{'id': 2446, 'state': 'offline'},
{'id': 2446, 'state': 'offline'},
],
'implants': [
13219,
10228,
24663,
13244,
],
}
|
def es8(n):
if n < 5: # Casi base
return True if n != 1 and n != 3 else False
strategia_vincente = [False for _ in range(n + 1)]
strategia_vincente[0] = True
strategia_vincente[2] = True
strategia_vincente[4] = True
for i in range(5, n + 1):
strategia_vincente[i] = not strategia_vincente[i - 1] \
or not strategia_vincente[i - 3] \
or not strategia_vincente[i - 4]
return strategia_vincente[n]
|
from functools import total_ordering
from amath.Computation.num_properties import digits, digitsafterdecimal
from amath.Computation.relationship import gcd
from ..Errors import Indeterminate
@total_ordering
class _Fraction(object):
__slots__ = ['numerator', 'denominator', 'whole']
"""
Fraction Class. Used to create a data type
"""
def __init__(self, n, d):
"""
Fraction initialization
:type d: int
:type n: int
:param n: numerator
:param d: denomenator
:return:
:raises Indeterminate:
Create a Fraction
>>> Fraction(5,2)
5/2
>>> Fraction(-5,2)
-5/2
>>> Fraction(5,-2)
-5/2
>>> Fraction(4,10)
2/5
>>> Fraction(0,2)
0/2
>>> Fraction(1,0)
ComplexInfinity
"""
try:
self.numerator = n // gcd(abs(n), abs(d))
except ZeroDivisionError:
raise Indeterminate("Indeterminate expression 0/0 encountered")
self.denominator = d // gcd(abs(n), abs(d))
self.whole = 0
if type(self.denominator) is not complex:
self.denominator = int(self.denominator)
if type(self.numerator) is not complex:
self.numerator = int(self.numerator)
if (type(self.numerator) is not complex) and (type(self.denominator) is not complex):
if self.denominator < 0:
self.denominator = abs(self.denominator)
self.numerator *= -1
if self.denominator == 0:
raise Indeterminate
self.whole = self.numerator // self.denominator
def __add__(self, other):
"""
Adds to values
:param other:
:return:
>>> Fraction(1,4) + Fraction(2,4)
3/4
>>> Fraction(1,2) + Fraction(3,4)
5/4
>>> Fraction(1,2) + 2
5/2
>>> Fraction(1,2) + 2.5
3/1
"""
ax = other
if type(other) is float:
ax = dectofr(other)
return Fraction(self.numerator * ax.denominator + self.denominator * ax.numerator,
self.denominator * ax.denominator)
__radd__ = __add__
def __sub__(self, other):
# type: (object) -> Fraction
"""
Subtract a value from Fraction
:param other:
:return:
>>> Fraction(3, 4) - Fraction(1, 4)
1/2
>>> Fraction(7, 4) - Fraction(3 ,4)
1/1
>>> Fraction(6, 4) - 2
-1/2
>>> Fraction(11, 2) - 3.5
2/1
"""
dx = other
if type(other) is float:
dx = dectofr(other)
return Fraction(self.numerator * dx.denominator - self.denominator * dx.numerator,
self.denominator * dx.denominator)
def __rsub__(self, other):
dx = other
if type(other) is float:
dx = dectofr(other)
return Fraction(dx.numerator * self.denominator - dx.denominator * self.numerator,
dx.denominator * self.denominator)
def __mul__(self, other):
"""
Multiplication
:param other:
:return:
>>> Fraction(1,2) * Fraction(5,4)
5/8
>>> Fraction(1,2) * 4
2/1
>>> Fraction(1,3) * 2.5
5/6
"""
try:
other = float(other)
except ValueError:
return NotImplemented
except TypeError:
return NotImplemented
if other == float("inf") or other == float("-inf"):
return other
mx = dectofr(other)
return Fraction(self.numerator * mx.numerator, self.denominator * mx.denominator)
__rmul__ = __mul__
def __truediv__(self, other):
dx = other
if type(other) is float:
dx = dectofr(other)
return Fraction(self.numerator * dx.denominator, self.denominator * dx.numerator)
def __rtruediv__(self, other):
dx = other
if type(other) is float:
dx = dectofr(other)
return Fraction(dx.numerator * self.denominator, dx.denominator * self.numerator)
def __floordiv__(self, other):
"""
Division
:param other:
:return:
Uses truediv
>>> Fraction(1,2) / Fraction(3,4)
2/3
>>> Fraction(1,2) / 2
1/4
>>> Fraction(1,4) / 0.5
1/2
"""
return self.__truediv__(other)
def __rfloordiv__(self, other):
"""
Division
:param other:
:return:
Uses truediv
>>> Fraction(1,2) / Fraction(3,4)
2/3
>>> Fraction(1,2) / 2
1/4
>>> Fraction(1,4) / 0.5
1/2
"""
return self.__rtruediv__(other)
def __pow__(self, power, modulo=None):
y = pow(self.numerator, power)
z = pow(self.denominator, power)
if modulo is not None:
return Fraction(y, z) % modulo
return Fraction(y, z)
def __rpow__(self, other, modulo=None):
from amath.Computation.power import root
return pow(root(other, self.denominator), self.numerator)
def __str__(self):
return "%s/%s" % (self.numerator, self.denominator)
# def __cmp__(self, other):
# """
# compare two values
# :param other:
# :return:
#
# >>> Fraction(1,2) < Fraction(2,3)
# True
# >>> Fraction(2,3) == Fraction(4,6)
# True
# >>> Fraction(1,3) < 1
# True
# >>> Fraction(5,2) > 2.5
# False
# """
# if type(other) is float:
# other = dectofr(other)
# a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)
# b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)
# if a.onum > b.onum:
# return 1
# elif a.onum is b.onum:
# return 0
# else:
# return -1
def __hash__(self):
return hash(self.numerator / self.denominator)
def __eq__(self, other):
if isinstance(other, Fraction):
return True if self.numerator == other.numerator and self.denominator == other.denominator else False
return True if self.numerator / self.denominator == other else False
def __lt__(self, other):
if isinstance(other, Fraction):
return True if self - other < 0 else False
return True if self.numerator / self.denominator < other else False
def __nonzero__(self):
"""
Non Zero
:return:
"""
if self != 0:
return True
else:
return False
def __repr__(self):
try:
return self.__str__()
except AttributeError:
return str(None)
def digits(self):
x = frtodec(self)
return digits(x)
def is_int(self):
if self.denominator == 1:
return True
else:
return False
def __trunc__(self):
return self.whole
def __float__(self):
"""
Convert to float
:return:
>>> float(Fraction(1,2))
0.5
>>> float(Fraction(1,25))
0.04
>>> float(Fraction(5,2))
2.5
"""
return frtodec(self)
def __mod__(self, other):
"""
Modulus
:param other:
:return:
>>> Fraction(1,2) % 2
1/2
>>> Fraction(1,2) % Fraction(1,3)
1/6
"""
z = int(self / other)
a = self - (other * z)
return a
def __abs__(self):
if self.numerator < 0:
return Fraction(-self.numerator, self.denominator)
else:
return self
def __neg__(self):
return Fraction(-self.numerator, self.denominator)
def __pos__(self):
return Fraction(self.numerator, self.denominator)
def __coerce__(self, other):
try:
other = float(other)
except:
return NotImplemented
x = dectofr(other)
return self, x
Fraction = type("Fraction", (_Fraction, object), {}) # Generate our type
def dectofr(x):
"""
Converts decimals to fractions
:param x: decimal to convert
:return: Fraction
>>> dectofr(2.5)
5/2
>>> dectofr(0.25)
1/4
>>> dectofr(2.1)
21/10
>>> dectofr('1.12')
28/25
>>> dectofr("1.13")
113/100
Does work for int
>>> dectofr(5)
5/1
"""
# n = int(floor(x))
# x -= n
# if x < error:
# # return (n, 1)
# return Fraction(n, 1)
# elif 1 - error < x:
# # return (n+1, 1)
# return Fraction(n + 1, 1)
#
# # The lower fraction is 0/1
# lower_n = 0
# lower_d = 1
# # The upper fraction is 1/1
# upper_n = 1
# upper_d = 1
# while True:
# # The middle fraction is (lower_n + upper_n) / (lower_d + upper_d)
# middle_n = lower_n + upper_n
# middle_d = lower_d + upper_d
# # If x + error < middle
# if middle_d * (x + error) < middle_n:
# # middle is our new upper
# upper_n = middle_n
# upper_d = middle_d
# # Else If middle < x - error
# elif middle_n < (x - error) * middle_d:
# # middle is our new lower
# lower_n = middle_n
# lower_d = middle_d
# # Else middle is our best fraction
# else:
# # return (n * middle_d + middle_n, middle_d)
# # return "{0}/{1}".format(n*middle_d+middle_n,middle_d)
# return Fraction(n * middle_d + middle_n, middle_d)
n = str(x)
d = 1
dig = digitsafterdecimal(x)
multiplier = 10 ** dig
n = n.replace(".", "")
return Fraction(int(n), int(d * multiplier))
def frtodec(x):
"""
Converts Fraction to decimal
:param x: Fraction to be converted
:return: Decimal
>>> frtodec(Fraction(1,2))
0.5
>>> frtodec(Fraction(1,3))
0.3333333333333333
"""
if not isinstance(x, Fraction):
raise TypeError("Argument must be a fraction")
return x.numerator / x.denominator
def strtofr(x):
try:
return dectofr(float(x))
except ValueError:
n, d = x.split('/')
return Fraction(float(n), float(d))
|
import boto3
import os
REGION = os.getenv('AWS_REGION', 'us-west-2')
client = boto3.client('cloudwatch', REGION)
def divide_chunks(l, n):
# looping till length l
for i in range(0, len(l), n):
yield l[i:i + n]
with open('bad-alarms.csv') as infile:
alarms = [a.strip() for a in infile.readlines()]
for chunk in divide_chunks(alarms, 100):
response = client.delete_alarms(AlarmNames=chunk)
print(response)
|
from sage.misc.latex import latex
from riordan_utils import *
def enhanced_latex(order, row_template):
def coefficient_handler(row_index, col_index, coeff):
return '' if coeff is 0 and col_index > row_index else str(coeff)
def row_handler(row_index, coefficients):
#matrix_rows.append(row_template.format(* map(str, coefficients)))
return row_template.format(*coefficients)
return coefficient_handler, row_handler
def latex_triangle(d, h):
order = 100
row_template = r' & '.join(['{{{0}}}'.format(i) for i in range(order)]) + r' & '
def make_latex_code(s):
return (r'\left[ \begin{array}'
+ '{{{0}}}'.format('c' * (order+1)) + s + ' \\\\ '
+ row_template.format(*[r'\vdots' for i in range(order)]) + r'\ddots'
+ r'\end{array} \right]')
latex_handler = enhanced_latex(order, row_template)
pascal_matrix, _, coefficients_per_rows = Riordan_matrix_latex_code (
array=(d,h), order=order, on_computed_coefficient=latex_handler)
matrix_rows = [coefficients_per_rows[row_index] for row_index in range(order)]
return pascal_matrix, r'\begin{{equation}} \left( {0}, {1} \right) = {2} \end{{equation}}'.format(
latex(d(t)), latex(h(t)), make_latex_code(' \\\\ '.join(matrix_rows)))
def build_tex_files_about_colouring(
Riordan_array, results,
colouring, partitioning,
path_prefix, matrix_cutter=lambda m: m[:10, :10]):
# prepating dictionary for collecting results
tex_files = {}
# unpacking results
array_as_expanded_matrix, tikz_nodes = results
def characteristic_prefix_template(suffix=''):
return r"{array_name}-{colouring_scheme}-{partitioning}{suffix}".format(
array_name="-".join(Riordan_array.name.split(" ")),
colouring_scheme=colouring.str_for(filename=True),
partitioning=partitioning.str_for(filename=True),
suffix='-' + suffix if suffix else '').lower()
def ends_with_extension(filename, file_extension='tex'):
return filename + (r'.' + file_extension if file_extension else '')
def make_dict_value(**kwds):
# the following assert ensures that the keys
# we need for typesetting are in the *automatically*
# build dictionary, collecting keywords arguments.
assert 'content' in kwds and 'typesettable' in kwds
return kwds
# save tikz lines to a dedicated file
tikz_lines_input_filename = ends_with_extension(
characteristic_prefix_template("tikz-nodes"))
tex_files[tikz_lines_input_filename] = make_dict_value(
content=tikz_nodes, typesettable=False)
# instantiate the template file in order to build the coloured triangle
colouring_triangle_filename = characteristic_prefix_template("triangle")
tex_files[ends_with_extension(colouring_triangle_filename)] = make_dict_value(
content=substitute_from_filename(
template_filename="../templates/coloured.tex",
tikz_lines_input_filename=tikz_lines_input_filename),
typesettable=True)
# instantiate the template file for include figure tex generation
include_figure_filename = characteristic_prefix_template("include-figure")
colouring_triangle_pdf_filename = "{path_prefix}{triangle_filename}.pdf".format(
path_prefix=path_prefix, triangle_filename=colouring_triangle_filename)
caption = r'''
{array_name} triangle, formally:
\begin{{displaymath}}
{formal_def}
\end{{displaymath}} % \newline % new line no more necessary
{colouring}, {partitioning}
{additional_caption_text}'''.format(
array_name=Riordan_array.name,
formal_def=Riordan_array.formal_def(),
colouring=colouring.str_for(summary=True),
partitioning=partitioning.str_for(summary=True),
additional_caption_text=(r'\newline Additional notes: ' +
Riordan_array.additional_caption_text
if Riordan_array.additional_caption_text else ''))
tex_files[ends_with_extension(include_figure_filename)] = make_dict_value(
content=substitute_from_filename(
template_filename="../templates/include-figure.tex",
triangle_filename=colouring_triangle_pdf_filename,
caption=caption,
label=colouring_triangle_filename),
typesettable=False)
tex_files[ends_with_extension(
characteristic_prefix_template("include-matrix"))] = make_dict_value(
content=r'\begin{displaymath}'
+ latex(matrix_cutter(array_as_expanded_matrix))
+ r'\end{displaymath}',
typesettable=False)
return tex_files
|
import json
from chalice.config import Config
from chalice.local import LocalGateway
from app import app
class ResquesterUtil:
@staticmethod
def do_request(method, path, body, headers):
lg = LocalGateway(app, Config(chalice_stage='homolog'))
return lg.handle_request(method=method,
path=path,
headers=headers,
body=json.dumps(body))
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'testKlasyfikatora.ui'
##
## Created by: Qt User Interface Compiler version 5.14.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_TestKlasyfikatoraForm(object):
def setupUi(self, TestKlasyfikatoraForm):
if not TestKlasyfikatoraForm.objectName():
TestKlasyfikatoraForm.setObjectName(u"TestKlasyfikatoraForm")
TestKlasyfikatoraForm.resize(894, 638)
self.gridLayout = QGridLayout(TestKlasyfikatoraForm)
self.gridLayout.setObjectName(u"gridLayout")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label = QLabel(TestKlasyfikatoraForm)
self.label.setObjectName(u"label")
self.horizontalLayout.addWidget(self.label)
self.comboBoxKlasyfikatory = QComboBox(TestKlasyfikatoraForm)
self.comboBoxKlasyfikatory.setObjectName(u"comboBoxKlasyfikatory")
self.comboBoxKlasyfikatory.setMinimumSize(QSize(200, 0))
self.horizontalLayout.addWidget(self.comboBoxKlasyfikatory)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.btnTestuj = QPushButton(TestKlasyfikatoraForm)
self.btnTestuj.setObjectName(u"btnTestuj")
self.horizontalLayout_2.addWidget(self.btnTestuj)
self.btnZmienUstawieniaKlasyfikatora = QPushButton(TestKlasyfikatoraForm)
self.btnZmienUstawieniaKlasyfikatora.setObjectName(u"btnZmienUstawieniaKlasyfikatora")
self.horizontalLayout_2.addWidget(self.btnZmienUstawieniaKlasyfikatora)
self.btnPokazTabele = QPushButton(TestKlasyfikatoraForm)
self.btnPokazTabele.setObjectName(u"btnPokazTabele")
self.horizontalLayout_2.addWidget(self.btnPokazTabele)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.txtWyniki = QTextEdit(TestKlasyfikatoraForm)
self.txtWyniki.setObjectName(u"txtWyniki")
self.verticalLayout.addWidget(self.txtWyniki)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(TestKlasyfikatoraForm)
QMetaObject.connectSlotsByName(TestKlasyfikatoraForm)
# setupUi
def retranslateUi(self, TestKlasyfikatoraForm):
TestKlasyfikatoraForm.setWindowTitle(QCoreApplication.translate("TestKlasyfikatoraForm", u"Test Leave Out klasyfikatora", None))
self.label.setText(QCoreApplication.translate("TestKlasyfikatoraForm", u"Kt\u00f3ry klasyfikator testowa\u0107:", None))
self.btnTestuj.setText(QCoreApplication.translate("TestKlasyfikatoraForm", u"Testuj", None))
self.btnZmienUstawieniaKlasyfikatora.setText(QCoreApplication.translate("TestKlasyfikatoraForm", u"Zmie\u0144 ustawienia klasyfikatora", None))
self.btnPokazTabele.setText(QCoreApplication.translate("TestKlasyfikatoraForm", u"Poka\u017c tabel\u0119 bazow\u0105", None))
# retranslateUi
|
"""
Mailgun object storage
"""
from __future__ import absolute_import, division, unicode_literals
import attr
import time
@attr.s
class Message(object):
"""
A :obj:`Message` is a representation of an email in Mailgun.
It can produce JSON-serializable objects for various pieces of
state that are required for API responses.
"""
message_id = attr.ib()
to = attr.ib()
msg_from = attr.ib()
subject = attr.ib()
body = attr.ib()
custom_headers = attr.ib(default=attr.Factory(dict))
static_defaults = {
"tags": [],
"delivery-status": {
"message": "",
"code": 0,
"description": None,
"session-seconds": 1.114408016204834
},
"envelope": {
"transport": "smtp",
"sending-ip": "127.0.0.1",
},
"recipient-domain": "mailgun.com",
"id": "mimic-LCZuENBlS0iWjs-yBpNJaQ",
"campaigns": [],
"user-variables": {},
"flags": {
"is-routed": None,
"is-authenticated": True,
"is-system-test": False,
"is-test-mode": False
},
"log-level": "info",
"timestamp": time.time(),
"message": {
"headers": {},
"attachments": [],
"recipients": [],
"size": 0
},
"recipient": None,
"event": "delivered"
}
def generate_events(self):
"""
Long-form JSON-serializable object representation of this message, as
returned by a GET on this individual message.
"""
template = self.static_defaults.copy()
template.update({
"envelope": {
"sender": self.msg_from,
"targets": self.to
},
"message": {
"headers": {
"to": self.to,
"message-id": self.message_id,
"from": self.msg_from,
"subject": self.subject
},
"recipients": [self.to],
"recipient": self.to
}
})
return template
@attr.s
class MessageStore(object):
"""
A collection of message objects.
"""
message_store = attr.ib(default=attr.Factory(list))
def add_to_message_store(self, **attributes):
"""
Create a new Message object and add it to the
:obj: `message_store`
"""
msg = Message(**attributes)
self.message_store.append(msg)
return
def list_messages(self, filter_by=None):
"""
List all the messages.
:param str filter_by: supports filtering the List by
`to` addresses only currently.
"""
to_be_listed = self.message_store
if filter_by:
events = [msg.generate_events()
for msg in to_be_listed if msg.to in filter_by]
else:
events = [msg.generate_events() for msg in to_be_listed]
return {
"items": events,
"paging": {
"next": "http://i-am-a-fake-link-to-nothing",
"last": "http://i-am-a-fake-link-to-nothing",
"first": "http://i-am-a-fake-link-to-nothing=",
"previous": "http://i-am-a-fake-link-to-nothing=="
}}
def filter_message_by_to_address(self, to_address):
"""
Retrieve a :obj:`Message` object by its `to` address.
"""
for each_msg in self.message_store:
if each_msg.to == to_address:
return each_msg
|
#!/usr/local/bin/python
# Filename: demo.py
# More info: http://www.zhihu.com/question/20899988
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
#page = 1
url = 'http://cctv.cntv.cn/lm/xinwenlianbo'
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
try:
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8')
pattern = re.compile("dateurl='(.*?)'",re.S)
dateurl = re.findall(pattern, content)
realurl = "http://cctv.cntv.cn/lm/xinwenlianbo/"+dateurl[0]+".shtml"
except urllib2.URLError, e:
if hasattr(e, "code"):
print e.code
if hasattr(e, "reason"):
print e.reason
try:
request = urllib2.Request(realurl, headers = headers)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8')
pattern = re.compile('<li><a href="http://news.cntv.cn/(.*?)".*?">(.*?)</a>',re.S)
items = re.findall(pattern, content)
for item in items:
xinwen = '- '+'[>]('"http://news.cntv.cn/" + item[0]+') '+item[1]
xinwen = xinwen.encode('utf-8')
print xinwen
except urllib2.URLError, e:
if hasattr(e, "code"):
print e.code
if hasattr(e, "reason"):
print e.reason
|
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, SimpleRNN
input_word = 'abcde'
w_to_id = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4}
id_to_onehot = {0: [1., 0., 0., 0., 0.], 1: [0., 1., 0., 0., 0.], 2: [0., 0., 1., 0., 0.], 3: [0., 0., 0., 1., 0.],
4: [0., 0., 0., 0., 1.]}
x_train = [
[id_to_onehot[w_to_id['a']], id_to_onehot[w_to_id['b']], id_to_onehot[w_to_id['c']], id_to_onehot[w_to_id['d']]],
[id_to_onehot[w_to_id['b']], id_to_onehot[w_to_id['c']], id_to_onehot[w_to_id['d']], id_to_onehot[w_to_id['e']]],
[id_to_onehot[w_to_id['c']], id_to_onehot[w_to_id['d']], id_to_onehot[w_to_id['e']], id_to_onehot[w_to_id['a']]],
[id_to_onehot[w_to_id['d']], id_to_onehot[w_to_id['e']], id_to_onehot[w_to_id['a']], id_to_onehot[w_to_id['b']]],
[id_to_onehot[w_to_id['e']], id_to_onehot[w_to_id['a']], id_to_onehot[w_to_id['b']], id_to_onehot[w_to_id['c']]]
]
y_train = [w_to_id['e'], w_to_id['a'], w_to_id['b'], w_to_id['c'], w_to_id['d']]
np.random.seed(7)
np.random.shuffle(x_train)
np.random.seed(7)
np.random.shuffle(y_train)
tf.random.set_seed(7)
x_train = np.reshape(x_train, (len(x_train), 4, 5))
y_train = np.array(y_train)
model = tf.keras.models.Sequential([
SimpleRNN(3),
Dense(5, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model_save_path = "./rnn_4pre1_checkpoint/rnn_4pre1.ckpt"
if os.path.exists(model_save_path + '.index'):
print("-------------------load model--------------------")
model.load_weights(model_save_path)
cp_callbacks = tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,
save_weights_only=True,
save_best_only=True,
monitor='loss')
history = model.fit(x_train, y_train, batch_size=32, epochs=100, callbacks=[cp_callbacks])
model.summary()
with open("./rnn_4pre1_weights.txt", 'w') as f:
for v in model.trainable_variables:
f.write(str(v.name) + '\n')
f.write(str(v.shape) + '\n')
f.write(str(v.numpy()) + '\n')
acc = history.history['sparse_categorical_accuracy']
loss = history.history['loss']
plt.plot(acc, label='training accuracy')
plt.plot(loss, label='training loss')
plt.title('training accuracy and loss')
plt.legend()
plt.show()
preNum = int(input('input the number of test alphabet:'))
for i in range(preNum):
alphabet1 = input('input test alphabet')
alphabet = [id_to_onehot[w_to_id[a]] for a in alphabet1]
alphabet = np.reshape(alphabet, (1, 4, 5))
result = model.predict(alphabet)
pred = tf.argmax(result, axis=1)
pred = int(pred)
tf.print(alphabet1 + "->" + input_word[pred])
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JunitTestsConcurrencyIntegrationTest(PantsRunIntegrationTest):
def test_parallel_target(self):
"""Checks the 'concurrency=parallel_classes' setting in the junit_tests() target"""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/parallel'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
def test_parallel_cmdline(self):
"""Checks the --test-junit-default-concurrency=PARALLEL_CLASSES option"""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
def test_concurrency_serial_default(self):
"""Checks the --test-junit-default-concurrency=SERIAL option"""
with self.temporary_workdir() as workdir:
# NB(zundel): the timeout for each test in ParallelMethodsDefaultParallel tests is
# currently set to 3 seconds making this test take about 2 seconds to run due
# to (1 timeout failure)
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:cmdline'
], workdir)
self.assert_failure(pants_run)
# Its not deterministic which test will fail, but one of them should timeout
self.assertIn("Tests run: 2, Failures: 1", pants_run.stdout_data)
def test_parallel_annotated_test_parallel(self):
"""Checks the @TestParallel annotation."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-parallel'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
def test_parallel_annotated_test_serial(self):
"""Checks the @TestSerial annotation."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-serial'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
@pytest.mark.xfail
def test_concurrency_annotated_test_serial_parallel_methods(self):
"""Checks the @TestSerial annotation with --test-junit-default-concurrency=PARALLEL_BOTH."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_BOTH',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-serial'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
def test_parallel_methods(self):
"""Checks the concurency='parallel_methods' setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'testprojects/tests/java/org/pantsbuild/testproject/parallelmethods'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
def test_parallel_methods_cmdline(self):
"""Checks the --test-junit-parallel-methods setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_BOTH',
'--test-junit-parallel-threads=4',
'testprojects/tests/java/org/pantsbuild/testproject/parallelmethods:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
def test_parallel_methods_serial_default(self):
"""Checks the --no-test-junit-default-parallel setting."""
with self.temporary_workdir() as workdir:
# NB(zundel): the timeout for each test in ParallelMethodsDefaultParallel tests is
# currently set to 3 seconds making this test take about 9 seconds to run due
# to (3 timeout failures)
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=4',
'testprojects/tests/java/org/pantsbuild/testproject/parallelmethods:cmdline'
], workdir)
self.assert_failure(pants_run)
# Its not deterministic which test will fail, but 3/4 of them should timeout
self.assertIn("Tests run: 4, Failures: 3", pants_run.stdout_data)
|
import os
import sys, inspect
import tempfile
import pytest
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
sys.path.insert(0, myPath + '/../light_controller')
from light_controller.server import app
@pytest.fixture
def client():
app.config['TESTING'] = True
client = app.test_client()
yield client
|
def find_spaceship(astromap):
for a, row in enumerate(reversed(astromap.split('\n'))):
for b, col in enumerate(row):
if col == 'X':
return [b, a]
return 'Spaceship lost forever.'
|
# 1、字符串:序列操作、编写字符串的其他方法、模式匹配
S='abcd'
print(len(S))
S[0] # 'a'
S[1] # 'b'
S[-1] # 'd'
S[1:2] # bc
S[1:] # bcd
S[:2] # abc
S[:-1] # abc
S[:] # abcd
S + 'efg' #abcdefg
S * 3 # abcdefg abcdefg abcdefg
# 特定函数
# find函数 没有找到时返回-1
S.find('b') # 1
# replace()
S.replace('a','XYZ')
# upper() 大写
S.upper()
# isalpha() 测试字符串的内容 ,True ,False
S.isalpha();
# S.rstrip();去空
S.rstrip();
# 帮助文档 help
help(S.replace)
#编写字符的其他方法
s = 'A\nb\tc'
len(s) # 5
ord('\n') # 10
# r 表示后面的为非转义
print(r'//t//t//n')
# 模式匹配
# 搜索子字符串,这个子字符串以“hello”开始,后面跟着0空格或几个空格或制表符,接着有任意字符并将其保存到group中,最后以world结尾。
import re
match = re.match('hello[ \t]*(.*)world','hello python world')
match.group(1) # 输出'python '
match = re.match('/(.*)/(.*)/(.*)','/user/home/lumjack')
match.groups() #输出('user','home','lumberjack')
# 2、列表(列表的基本操作)
#序列操作
l = [123,'spam',1.22]
# 求列表长度
len(l)
# 第一个元素
l[0]
# 倒数第一个之前的元素
l[:-1]
# 把一个list 放入另一个 但是不能改变原list的
l+[1,2,3]
# 类型特定的操作 不能改变原list
# 删除对应位置的元素 list从0开始。
l.pop(2)
# 添加元素到末尾
l.append('4897')
# 任意位置插入元素
#l.insert(1,22)
# 任意位置移除元素
#l.remove(2)
# 排序
m = [9,5,7,4,6]
m.sort()
# 元素倒转
m.reverse()
# 嵌套
n = [[1,2,3],[4,5,6],[7,8,9]]
print(n[1])
print(n[1][2])
# 列表解析
col2=[row[1] for row in n]
# col2 [2,5,8] 实现矩阵操作
[row[1] + 1 for row in n]
[row[1] for row in n if row[1] % 2 == 0]
diag = [n [i][i] for i in [0,1,2]]
print('diag:'+[n [i][i] for i in [0,1,2]])
doubles = [c * 2 for c in 'spam']
# 利用sum函数 转化为元组 或者字典
g = (sum(row) for row in n)
next(g) # 6
next(g) # 15
# 对各行的统计
list(map(sum,n))
print(list(map(sum,n))) # [6,15,24]
{sum(row) for row in n}
{i:sum(n[i]) for i in range(3)}
# 列表、集合和字典都可以解析创建
# 阶乘递归
def fact(n):
if n == 1:
return 1
return n * fact(n-1)
print(fact(5))
import re
m1=re.match('/(.*)/(.*)/(.*)','/user/home/lumber')
l=m1.groups()
print(l)
m=re.match('hello[ \t]*(.*)world' ,'hello super world')
l2=m.group(1)
print(l2)
# file
f = open('hello.txt','w')
f.write('hello\n')
f.write('world\n')
f.close()
t= open('hello.txt')
text=t.read()
t.close()
print(text)
st=text.split()
print(st)
detail=dir(f)
print(detail)
he=help(f.seek)
print(he)
# set 变成无序字典
x = set('spam')
y = {'h','a','m'}
print(x&y)
print(x|y)
print(x-y)
# list 转化为 set
t3 = {i ** 2 for i in[1,2,3,4]}
print(t3)
# 精确类型 decimal
import decimal
print(1/3)
d = decimal.Decimal(1/3)
print(d)
# 控制小数点位数
decimal.getcontext().prec = 2
d2 = decimal.Decimal('1.00') / decimal.Decimal('3.00')
print(d2)
# 分数
from fractions import Fraction
f = Fraction(2,3)
print(f)
f1 = f + 1
print(f1)
# 函数type()返回的是类型对象
print(type(l))
print(type(type(l)))
if type(l)== type(()):
print('yes')
# 用的是类型名比较
if type(l) == list:
print('yes')
if isinstance(l,list):
print('yes')
# 取一个list或tuple部分元素
L=['pengrong','super','bilaisheng','chenzhongyi','changjie','lily']
# 笨方法
[L[0],L[1]]
# 切片
L[0:2]
d = {'a':1,'b':2,'c':3}
for key in d:
print(key)
for ch in 'ABC':
print(ch)
from collections import Iterable
# 是否为可迭代对象 -整数,小数不能
isinstance('abc',Iterable)
# 下标循环
for i,value in enumerate(['A',"b","c"]):
print(i,value)
for x,y in [(1,2),(2,3),(3,4)]:
print(x,y)
# 1
l1=list(range(1,7))
print(l1)
l2=[]
for x in range(1,11):
l2.append(x*x)
print(l2)
# 简化写法
l3 = [x*x for x in range(1,11)]
print(l3)
# 全排列
l4 = [m + n for m in 'ABC' for n in 'abc']
print(l4)
# 导入os模块 下面的可以列出文件和目录
import os
l5=[d for d in os.listdir('.')]
print(l5)
d = {'1':'a','2':'b','3':'c'}
for k, v in d.items():
print(k,v)
# 列表生成式可以生成列表
l6 = [k + '=' + v for k,v in d.items()]
# 把list的元素变成小写
L = ['H','B','C']
l7 = [L.lower() for s in L]
print(l7)
import string
# while实现
n = 9
j = 0
i = 0
while i < n:
i = i + 1
while j < i:
j = j + 1
print("%d * %d =%d" % (j, i, i * j), end="\t")
if(j==i):
j=0
break
print(" ")
# 3、打印九九乘法口诀和循环输出26个字母(并将字母的Ascall码同时输出)
for i in string.ascii_lowercase:
print(str( i ) +r' '+ str(ord(i)))
for i in range(1,10):
for j in range(1,i+1):
print("%d * %d = %d" % (j,i,j*i),end="\t")
print(" ")
|
#! /usr/bin/python3
import unittest
import auto
class TestJakis(unittest.TestCase):
def test_f0(self):
self.assertTrue(True)
def test_f1__1(self):
w = auto.f1(0)
self.assertEqual(w,0)
def test_f1_2(self):
c = auto.f1(1)
self.assertEqual(c,1)
def test_f1_3(self):
b = auto.f1(2)
self.assertEqual(b,4)
def test_f1_4(self):
d = auto.f1(2,1)
self.assertEqual(d,5)
def test_f1_4(self):
d = auto.f1(2,3)
self.assertEqual(d,7)
def test_f2_1(self):
w = auto.f2('ala')
self.assertEqual(w,'a')
def test_f2_2(self):
w = auto.f2([1,2,3])
self.assertEqual(w,1)
def test_f2_3(self):
w = auto.f2([])
self.assertEqual(w,'BUUM')
def test_f3_1(self):
w = auto.f3(1)
self.assertEqual(w,'jeden')
def test_f3_2(self):
w = auto.f3(2)
self.assertEqual(w,'dwa')
def test_f3_3(self):
w = auto.f3(3)
self.assertEqual(w,'trzy')
def test_f3_4(self):
w = auto.f3(6)
self.assertEqual(w,'other')
def test_f4_1(self):
w = auto.f4('ala')
self.assertEqual(w,'ala ma kota')
def test_f4_2(self):
w = auto.f4('kot')
self.assertEqual(w,'kot ma kota')
def test_f4_3(self):
w = auto.f4('kot', 'psa')
self.assertEqual(w,'kot ma kota i psa')
def test_f4_4(self):
w = auto.f4('kot', 'mysz')
self.assertEqual(w,'kot ma kota i mysz')
def test_f5_1(self):
w = auto.f5(0)
self.assertEqual(w,[])
def test_f5_2(self):
w = auto.f5(1)
self.assertEqual(w,[0])
def test_f5_3(self):
w = auto.f5(2)
self.assertEqual(w,[0,1])
def test_f5_4(self):
w = auto.f5(7)
self.assertEqual(w,[0,1,2,3,4,5,6])
def test_f5_5(self):
w = auto.f5(7,2)
self.assertEqual(w,[0,2,4,6])
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.0.3 on 2018-11-05 07:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pizza',
name='pizzaToppings',
),
migrations.RemoveField(
model_name='sub',
name='subExtra',
),
]
|
import numpy as np
from torch.utils.data import Dataset
import os
from torchvision import transforms
from dataloaders import custom_transforms as tr
from abc import ABC, abstractmethod
import cv2
from PIL import Image, ImageFile
import scipy.io as scio
ImageFile.LOAD_TRUNCATED_IMAGES = True
def pil_loader(filename, label=False):
ext = (os.path.splitext(filename)[-1]).lower()
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
img = Image.open(filename)
if not label:
img = img.convert('RGB')
img = np.array(img).astype(dtype=np.uint8)
img = img[:,:,::-1] #convert to BGR
else:
if img.mode != 'L' and img.mode != 'P':
img = img.convert('L')
img = np.array(img).astype(dtype=np.uint8)
elif ext == '.mat':
img = scio.loadmat(filename)
elif ext == '.npy':
img = np.load(filename, allow_pickle=True)
else:
raise NotImplementedError('Unsupported file type %s'%ext)
return img
class BaseDataset(Dataset,ABC):
def __init__(self, args):
super().__init__()
self.args = args
self.ignore_index = 255
@abstractmethod
def __getitem__(self, index):
pass
@abstractmethod
def __len__(self):
return 0
@abstractmethod
def __str__(self):
pass
@staticmethod
def modify_commandline_options(parser,istrain=False):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
def transform_train(self):
temp = []
temp.append(tr.Resize(self.args.input_size))
if self.args.get('aug', True):
print('\nWith augmentations.')
temp.append(tr.RandomHorizontalFlip())
temp.append(tr.RandomRotate(15))
temp.append(tr.RandomCrop(self.args.input_size))
else:
print('\nWithout augmentations.')
temp.append(tr.Normalize(self.args.norm_params.mean, self.args.norm_params.std))
temp.append(tr.ToTensor())
composed_transforms = transforms.Compose(temp)
return composed_transforms
def transform_validation(self):
temp = []
temp.append(tr.Resize(self.args.input_size))
# temp.append(tr.RandomCrop(self.args.input_size))
temp.append(tr.Normalize(self.args.norm_params.mean, self.args.norm_params.std))
temp.append(tr.ToTensor())
composed_transforms = transforms.Compose(temp)
return composed_transforms
|
#To identify the integer
num=input("Enter number: ")
if num<0:
print "It is negative"
elif num>0:
print"It is positive"
elif num==0:
print"It is zero"
else:
print"MIND----->BOOM!",exit()
#To check if it's a prime number
prime=True
if num==1:
print "It is neither prime nor composite"
else:
for i in range(2,num):
if (num%i==0):
print "It's a prime number."
else:
print "It's not a prime number.",exit()
#To Check if it's a palindrome
n=num
r=0
while n!=0:
d=n%10.0
r=r*10+d
n=n/10
if n==r:
print "It's a palindrome."
else:
print"It's not a palindrome."
#To check whether it is divisible by 3
if num%3==0:
print "It is divisible by 3"
else:
print "It is not divisble by 3"
#To check if the given number is an armstrong number
sum=0
temp=num
while temp>0:
digit=temp%10
sum+=digit**3
temp//=10
if arm==sum:
print num,"is an armstrong number"
else:
print num, "is not an armstrong number"
|
def howMany(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
# Your Code Here
i=0
for e in aDict:
print e
i+=1
return i
animals = { 'a': ['aardvark'], 'b': ['baboon'], 'c': ['coati']}
print howMany(animals) |
from django.forms import ModelForm
from .models import Guest
class GuestForm(ModelForm):
class Meta:
model = Guest
fields = (
'first_name',
'last_name',
'baby_menu',
'baby_chair',
'diet_restrictions',
)
|
from datetime import date
from django.test import TestCase
from elections.models import (
ElectedRole,
Election,
ElectionSubType,
ElectionType,
)
from organisations.models import Organisation
from organisations.tests.factories import (
DivisionGeographyFactory,
OrganisationDivisionFactory,
OrganisationDivisionSetFactory,
OrganisationGeographyFactory,
)
from .base_tests import BaseElectionCreatorMixIn, FuzzyInt
class TestCreateIds(BaseElectionCreatorMixIn, TestCase):
def run_test_with_data(
self, all_data, expected_ids, expected_titles, **kwargs
):
self.create_ids(all_data, **kwargs)
assert Election.private_objects.count() == len(expected_ids)
# ensure the records created match the expected ids
for expected_id in expected_ids:
assert Election.private_objects.filter(
election_id=expected_id
).exists()
# ensure the records created match the expected titles
for expected_title in expected_titles:
assert Election.private_objects.filter(
election_title=expected_title
).exists()
# ensure group relationships have been saved correctly
for election in Election.private_objects.all():
if election.group_type != "election":
assert isinstance(election.group_id, int)
def test_group_id(self):
self.run_test_with_data(
self.base_data, ["local." + self.date_str], ["Local elections"]
)
def test_creates_div_data_ids(self):
self.assertEqual(Election.private_objects.count(), 0)
all_data = self.base_data
all_data.update({self.make_div_id(): "contested"})
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
def test_creates_div_data_ids_two_divs(self):
all_data = self.base_data
all_data.update(
{
self.make_div_id(): "contested",
self.make_div_id(div=self.org_div_2): "contested",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div." + self.date_str,
"local.test.test-div-2." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1",
"Test Council local elections Test Div 2",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
def test_creates_ids_two_orgs(self):
org2 = Organisation.objects.create(
official_identifier="TEST2",
organisation_type="local-authority",
official_name="Test Council 2",
slug="test2",
territory_code="ENG",
election_name="Test Council 2 local elections",
start_date=date(2016, 10, 1),
)
ElectedRole.objects.create(
election_type=self.election_type1,
organisation=org2,
elected_title="Local Councillor",
elected_role_name="Councillor for Test Council 2",
)
div_set2 = OrganisationDivisionSetFactory(organisation=org2)
div3 = OrganisationDivisionFactory(
divisionset=div_set2, name="Test Div 3", slug="test-div-3"
)
all_data = self.base_data
all_data["election_organisation"] = [self.org1, org2]
all_data.update(
{
self.make_div_id(): "contested",
self.make_div_id(org=org2, div=div3): "contested",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test2." + self.date_str,
"local.test.test-div." + self.date_str,
"local.test2.test-div-3." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council 2 local elections",
"Test Council local elections Test Div 1",
"Test Council 2 local elections Test Div 3",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
def test_creates_div_data_ids_blank_divs(self):
all_data = self.base_data
all_data.update(
{
self.make_div_id(): "contested",
self.make_div_id(div=self.org_div_2): "",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
def test_creates_by_election(self):
all_data = self.base_data
all_data.update(
{
self.make_div_id(): "by_election",
self.make_div_id(div=self.org_div_2): "by_election",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div.by." + self.date_str,
"local.test.test-div-2.by." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1 by-election",
"Test Council local elections Test Div 2 by-election",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
for election in Election.private_objects.filter(group_type=None):
assert "by-election" in election.election_title
def test_creates_mayor_id(self):
mayor_org = Organisation.objects.create(
official_identifier="MAYORTEST1",
organisation_type="combined-authority",
official_name="Test authority",
slug="test-ca",
territory_code="ENG",
election_name="Test Council Mayoral elections",
start_date=date(2016, 10, 1),
)
mayor_election_type = ElectionType.objects.get(election_type="mayor")
ElectedRole.objects.create(
election_type=mayor_election_type,
organisation=mayor_org,
elected_title="Mayor",
elected_role_name="Mayor of Foo Town",
)
all_data = {
"election_organisation": [mayor_org],
"election_type": mayor_election_type,
"date": self.date,
}
expected_ids = [
"mayor." + self.date_str,
"mayor.test-ca." + self.date_str,
]
expected_titles = ["Mayoral elections", "Mayor of Foo Town"]
self.run_test_with_data(all_data, expected_ids, expected_titles)
ballot = Election.private_objects.get(
election_id="mayor.test-ca." + self.date_str
)
self.assertIsNone(ballot.group_type)
def test_creates_parl_id(self):
parl_org = Organisation.objects.create(
official_identifier="parl",
organisation_type="parl",
official_name="Parl",
slug="parl",
territory_code="ENG",
election_name="General Election",
start_date=date(2016, 10, 1),
)
parl_election_type = ElectionType.objects.get(election_type="parl")
ElectedRole.objects.create(
election_type=parl_election_type,
organisation=parl_org,
elected_title="Member of Parliament",
elected_role_name="Member of Parliament",
)
all_data = {
"election_organisation": [parl_org],
"election_type": parl_election_type,
"date": self.date,
}
expected_ids = ["parl." + self.date_str]
expected_titles = ["UK Parliament elections"]
self.run_test_with_data(all_data, expected_ids, expected_titles)
def test_creates_naw_id(self):
naw_org = Organisation.objects.create(
official_identifier="naw",
organisation_type="naw",
official_name="naw",
slug="naw",
territory_code="WLS",
election_name="National Assembly for Wales elections",
start_date=date(2016, 10, 1),
)
naw_election_type = ElectionType.objects.get(election_type="naw")
naw_election_sub_type_c = ElectionSubType.objects.get(
election_subtype="c", election_type=naw_election_type
)
naw_election_sub_type_r = ElectionSubType.objects.get(
election_subtype="r", election_type=naw_election_type
)
ElectedRole.objects.create(
election_type=naw_election_type,
organisation=naw_org,
elected_title="Assembly Member",
elected_role_name="Assembly Member for Foo",
)
naw_div_set = OrganisationDivisionSetFactory(organisation=naw_org)
org_div_3 = OrganisationDivisionFactory(
divisionset=naw_div_set,
name="Test Div 3",
slug="test-div-3",
division_election_sub_type="c",
)
org_div_4 = OrganisationDivisionFactory(
divisionset=naw_div_set,
name="Test Div 4",
slug="test-div-4",
division_election_sub_type="c",
)
org_div_5 = OrganisationDivisionFactory(
divisionset=naw_div_set,
name="Test Div 5",
slug="test-div-5",
division_election_sub_type="r",
)
all_data = {
"election_organisation": [naw_org],
"election_type": naw_election_type,
"election_subtype": [
naw_election_sub_type_c,
naw_election_sub_type_r,
],
"date": self.date,
}
all_data.update(
{
self.make_div_id(
org=naw_org, div=org_div_3, subtype="c"
): "contested", # contested seat
self.make_div_id(
org=naw_org, div=org_div_4, subtype="c"
): "by_election", # by election
self.make_div_id(
org=naw_org, div=org_div_5, subtype="r"
): "contested",
}
)
expected_ids = [
"naw." + self.date_str,
"naw.c." + self.date_str,
"naw.r." + self.date_str,
"naw.c.test-div-3." + self.date_str, # no 'by' suffix
"naw.c.test-div-4.by." + self.date_str, # 'by' suffix
"naw.r.test-div-5." + self.date_str,
]
expected_titles = [
"National Assembly for Wales elections",
"National Assembly for Wales elections (Constituencies)",
"National Assembly for Wales elections (Regions)",
"National Assembly for Wales elections (Constituencies) Test Div 3",
"National Assembly for Wales elections (Constituencies) Test Div 4 by-election",
"National Assembly for Wales elections (Regions) Test Div 5",
]
self.run_test_with_data(
all_data,
expected_ids,
expected_titles,
subtypes=[naw_election_sub_type_c, naw_election_sub_type_r],
)
def test_election_with_organisation_geography(self):
all_data = self.base_data
OrganisationGeographyFactory(
organisation=all_data["election_organisation"][0],
geography=self.test_polygon,
)
all_data.update(
{
self.make_div_id(): "contested",
self.make_div_id(div=self.org_div_2): "contested",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div." + self.date_str,
"local.test.test-div-2." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1",
"Test Council local elections Test Div 2",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
for election in Election.private_objects.all():
if election.group_type == "organisation":
self.assertTrue(election.geography is not None)
else:
self.assertTrue(election.geography is None)
result = Election.private_objects.for_lat_lng(
51.50124158773981, -0.13715744018554688
)
self.assertEqual(1, len(result))
self.assertEqual("local.test." + self.date_str, result[0].election_id)
def test_election_with_division_geography(self):
all_data = self.base_data
DivisionGeographyFactory(
division=self.org_div_2, geography=self.test_polygon
)
all_data.update(
{
self.make_div_id(): "contested",
self.make_div_id(div=self.org_div_2): "contested",
}
)
expected_ids = [
"local." + self.date_str,
"local.test." + self.date_str,
"local.test.test-div." + self.date_str,
"local.test.test-div-2." + self.date_str,
]
expected_titles = [
"Local elections",
"Test Council local elections",
"Test Council local elections Test Div 1",
"Test Council local elections Test Div 2",
]
self.run_test_with_data(all_data, expected_ids, expected_titles)
for election in Election.private_objects.all():
if election.election_id == "local.test.test-div-2." + self.date_str:
self.assertTrue(election.geography is not None)
else:
self.assertTrue(election.geography is None)
result = Election.private_objects.for_lat_lng(
51.50124158773981, -0.13715744018554688
)
self.assertEqual(1, len(result))
self.assertEqual(
"local.test.test-div-2." + self.date_str, result[0].election_id
)
def test_gla_a_is_ballot(self):
election_type = ElectionType.objects.get(election_type="gla")
gla = Organisation.objects.create(
official_identifier="gla",
organisation_type="gla",
official_name="Greater London Authority",
slug="gla",
territory_code="ENG",
election_name="London Assembly election",
start_date=date(2016, 10, 1),
)
gla.election_types.add(election_type)
all_data = {
"election_organisation": [gla],
"election_subtype": [
election_type.subtype.get(election_subtype="a")
],
"election_type": election_type,
"date": self.date,
gla.pk: None,
"{}_no_divs".format(gla.pk): "",
}
expected_ids = [
"gla." + self.date_str,
"gla.a." + self.date_str,
]
expected_titles = [
"Greater London Assembly elections",
"Greater London Assembly elections (Additional)",
]
with self.assertNumQueries(FuzzyInt(24, 25)):
self.run_test_with_data(
all_data,
expected_ids,
expected_titles,
subtypes=True,
)
ballot = Election.private_objects.get(election_id__startswith="gla.a.")
self.assertIsNone(ballot.group_type)
|
import re, calendar, time
from datetime import datetime, date, timedelta
from genshi.builder import tag
from trac.core import *
from trac.web import IRequestHandler
from trac.web.chrome import INavigationContributor, ITemplateProvider
from trac.util.datefmt import to_datetime, utc
class TicketCalendarPlugin(Component):
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'ticketcalendar'
def get_navigation_items(self, req):
if req.perm.has_permission('TICKET_VIEW'):
yield ('mainnav', 'ticketcalendar',
tag.a('Calendar', href=req.href.ticketcalendar()))
# IRequestHandler methods
def match_request(self, req):
return re.match(r'/ticketcalendar(?:_trac)?(?:/.*)?$', req.path_info)
def calendarRange(self, y, m):
w,mdays = calendar.monthrange(y,m)
w = (w + 1) % 7
firstDay = date(y,m,1)-timedelta(days=w)
lastDay = date(y,m,mdays)
w = (lastDay.weekday()+1)%7
lastDay = lastDay + timedelta(days=(6-w))
return firstDay, lastDay
def dateToString(self, dt):
m = dt.month
if m < 10:
m = '0'+str(m)
d = dt.day
if d < 10:
d = '0'+str(d)
return str(dt.year)+"/"+str(m)+"/"+str(d)
def process_request(self, req):
ymonth = req.args.get('month')
yyear = req.args.get('year')
show_my_ticket = req.args.get('show_my_ticket')
selected_milestone = req.args.get('selected_milestone')
cday = date.today()
if not (not ymonth or not yyear):
cday = date(int(yyear),int(ymonth),1)
# cal next month
nm = cday.month + 1
ny = cday.year
if nm > 12:
ny = ny + 1
nm = 1
nmonth = datetime(ny,nm,1)
# cal previous month
pm = cday.month - 1
py = cday.year
if pm < 1:
py = py -1
pm = 12
pmonth = date(py,pm,1)
first,last = self.calendarRange(cday.year, cday.month)
# process ticket
db = self.env.get_db_cnx()
cursor = db.cursor();
my_ticket_sql = ""
self.log.debug("myticket")
self.log.debug(show_my_ticket)
if show_my_ticket=="on":
my_ticket_sql = "AND owner = '" + req.authname + "'"
selected_milestone_sql = ""
if selected_milestone != None and selected_milestone != "":
selected_milestone_sql = "AND milestone = '" + selected_milestone + "'"
sql = ("SELECT id, type, summary, owner, description, status, a.value, c.value from ticket t "
"JOIN ticket_custom a ON a.ticket = t.id AND a.name = 'due_assign' "
"JOIN ticket_custom c ON c.ticket = t.id AND c.name = 'due_close' "
"WHERE ((a.value > '%s' AND a.value < '%s' ) "
" OR (c.value > '%s' AND c.value < '%s')) %s %s" %
(self.dateToString(first),
self.dateToString(last),
self.dateToString(first),
self.dateToString(last),
my_ticket_sql,
selected_milestone_sql))
self.log.debug(sql)
cursor.execute(sql)
tickets=[]
for id, type, summary, owner, description, status, due_assign, due_close in cursor:
due_assign_date = None
due_close_date = None
try:
t = time.strptime(due_assign,"%Y/%m/%d")
due_assign_date = date(t[0],t[1],t[2])
except ValueError, TypeError:
None
try:
t = time.strptime(due_close,"%Y/%m/%d")
due_close_date = date(t[0],t[1],t[2])
except ValueError, TypeError:
None
ticket = {'id':id, 'type':type, 'summary':summary, 'owner':owner, 'description': description, 'status':status, 'due_assign':due_assign_date, 'due_close':due_close_date}
tickets.append(ticket)
# get roadmap
sql = ("SELECT name, due, completed, description from milestone")
self.log.debug(sql)
cursor.execute(sql)
milestones = [""]
for name, due, completed, description in cursor:
if due!=0:
due_time = to_datetime(due, utc)
due_date = date(due_time.year, due_time.month, due_time.day)
milestone = {'name':name, 'due':due_date, 'completed':completed != 0,'description':description}
milestones.append(milestone)
data = {'current':cday, 'prev':pmonth, 'next':nmonth, 'first':first, 'last':last, 'tickets':tickets, 'milestones':milestones,
'show_my_ticket': show_my_ticket, 'selected_milestone': selected_milestone}
return 'calendar.html', data, None
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('tc', resource_filename(__name__, 'htdocs'))]
|
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
import csv
import xlrd
import xlwt
def getSheet3FromSQLite():
# 打开数据库连接以及需要使用的表格文档
# open('sheet3_baseline.csv', 'rt',
# encoding='utf-8', newline='') as src,
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
workbooksrc = xlrd.open_workbook('D:\DataTool\dataTool.xls')
src = workbooksrc.sheet_by_name('Sheet3')
workbookdes = xlwt.Workbook()
dst = workbookdes.add_sheet('sheet3')
sqStatement = "SELECT newreg.createtime, newreg.usrmobile, simtrade.tradedays, \
newreg.departmentid, marketdep.depname, newreg.marketcode, \
marketper.marketname, marketper.markettype, marketper.marketmobile,\
newreg.refid, newreg.refnickname, newreg.refrealname, newreg.refphone, newreg.pageindex\
FROM newreg \
LEFT JOIN simtrade \
ON newreg.usrmobile = simtrade.usrmobile \
LEFT JOIN marketdep \
ON newreg.departmentid = marketdep.depid \
LEFT JOIN marketper \
ON newreg.marketcode = marketper.marketcode \
ORDER BY newreg.createtime; "
row = 1
#抬头补充
dst.write(0, 0, '注册时间') #A
dst.write(0, 1, '用户手机号') #B
dst.write(0, 2, '本月模拟交易天数') #C
dst.write(0, 3, '营销营业部代码') #D
dst.write(0, 4, '营销营业部名称') #E
dst.write(0, 5, '营销人员编码') #F
dst.write(0, 6, '营销人员名称') #G
dst.write(0, 7, '营销人员类别') #H
dst.write(0, 8, '营销人员手机号') #I
dst.write(0, 9, '推荐人id')
dst.write(0, 10, '推荐人昵称')
dst.write(0, 11, '推荐人姓名')
dst.write(0, 12, '推荐人电话')
dst.write(0, 13, '海报ID')
for createtime, usrmobile, tradedays, departmentid, departmentname, marketcode, marketname, markettype, marketmobile,\
refid, refnickname, refrealname, refphone, pageindex in db.execute(sqStatement):
dst.write(row, 0, str(createtime).split(' ')[0])
dst.write(row, 1, str(usrmobile))
# 因为得到的数据是基于newreg,使用simtrade中的tradedays找到新用户的模拟交易天数
# 得到None说明该用户尚未有模拟交易的记录, 空格
if str(tradedays).strip() == 'None':
dst.write(row, 2, '')
else:
dst.write(row, 2, str(tradedays))
# 如果没有营销部门编号,空格
if str(departmentid).strip() == 'None':
dst.write(row, 3, '')
else:
dst.write(row, 3, str(departmentid))
# 如果没有营销部们名称,空格
if str(departmentname).strip() == 'None':
dst.write(row, 4, '')
else:
dst.write(row, 4, str(departmentname))
# 如果没有营销人员编号,空格
if str(marketcode).strip() == 'None':
dst.write(row, 5, '')
else:
dst.write(row, 5, str(marketcode))
# 如果没有营销人员名称,空格
if str(marketname).strip() == 'None':
dst.write(row, 6, '')
else:
dst.write(row, 6, str(marketname))
# 如果没有营销人员类别,空格
if str(markettype).strip() == 'None':
dst.write(row, 7, '')
else:
dst.write(row, 7, str(markettype))
# 如果没有营销人员手机,空格
if str(marketmobile).strip() == 'None':
dst.write(row, 8, '')
else:
dst.write(row, 8, str(marketmobile))
# expanded(拿到的newreg自带营销关系)
if str(refid).strip() == 'None':
dst.write(row, 9, '')
else:
dst.write(row, 9, str(refid))
if str(refnickname).strip() == 'None':
dst.write(row, 10, '')
else:
dst.write(row, 10, str(refnickname))
if str(refrealname).strip() == 'None':
dst.write(row, 11, '')
else:
dst.write(row, 11, str(refrealname))
if str(refphone).strip() == 'None':
dst.write(row, 12, '')
else:
dst.write(row, 12, str(refphone))
if str(pageindex).strip() == 'None':
dst.write(row, 13, '')
else:
dst.write(row, 13, str(pageindex))
# iterator
row = row + 1
workbookdes.save('../output/expandedSheet3.xls')
# csv.writer(dst).writerows(db.execute(sqStatement))
getSheet3FromSQLite()
|
PAGE_URL = "https://www.kickstarter.com/discover/advanced?category_id=16&woe_id=0&sort=magic&seed=2569455&page="
CELL_URL_STYLE = "soft-black.mb3"
DEFAULT_NUM = 300
"""
Get all links from single webpage of results
"""
def get_links_from_page(driver, link_style=CELL_URL_STYLE):
return [project.get_attribute('href') for project in driver.find_elements_by_class_name(link_style)]
"""
Get num_of_projects projects
"""
def get_project_links(driver, num_of_projects=DEFAULT_NUM):
project_links = []
page_num = 1
while len(project_links) < num_of_projects:
driver.get(PAGE_URL + str(page_num))
project_links += get_links_from_page(driver)
page_num += 1
return project_links[:num_of_projects] |
n = int(input("Ingrese un numero: "))
if n==0:
print("No aplica")
elif n%2==0:
print("Es par")
else:
print("Es impar")
|
from typing import List
import pymorphy2
from nltk import ToktokTokenizer
from kts_linguistics.chars import RUSSIAN_ALPHABET
from kts_linguistics.corpora.corpora import Corpora
from kts_linguistics.phonetics.phonetize import phonetize_word, phonetize
def normalize_corpora(corpora: Corpora) -> Corpora:
new_corpora = Corpora()
morph = pymorphy2.MorphAnalyzer()
for word, count in corpora.words_with_counts():
new_corpora.increment_popularity(morph.parse(word)[0].normal_form, count)
return new_corpora
def phonetize_corpora(corpora: Corpora) -> Corpora:
new_corpora = Corpora()
for word, count in corpora.words_with_counts():
new_corpora.increment_popularity(phonetize_word(word), count)
return new_corpora
def normalize_sentences(sentences: List[str]) -> List[str]:
new_sentences = []
tokenizer = ToktokTokenizer()
morph = pymorphy2.MorphAnalyzer()
for line in sentences:
line = line.lower()
line = ''.join(c if c in RUSSIAN_ALPHABET else ' ' for c in line)
line = ' '.join(morph.parse(word)[0].normal_form for word in tokenizer.tokenize(line))
new_sentences.append(line)
return new_sentences
def phonetize_sentences(sentences: List[str]) -> List[str]:
return [phonetize(s) for s in sentences]
|
import abc
import random
from copy import deepcopy
from library import POST
from library import Mining
class POST_UDCC(POST):
def __init__(self, state, mur, reduce=False):
super().__init__(state)
self._ur = dict() # key: role - values: users assigned to key
self._mur = mur # maximum users per role
self._reduce = reduce
self._ua = deepcopy(self._orig_ua)
self._pa = deepcopy(self._orig_pa)
def _update_ur(self):
for user, roles in self._ua.items():
for r in roles:
if r not in self._ur:
self._ur[r] = [user]
else:
self._ur[r].append(user)
def redundant_roles(self):
self._redundant = dict()
for user, roles in self._ua.items():
to_check = sorted([(r, self._pa[r]) for r in roles], key=lambda t: len(t[1]))
# print(user, to_check)
for i in range(len(to_check) - 1):
for j in range(i + 1, len(to_check)):
if to_check[i][1] <= to_check[j][1]:
if user in self._redundant:
self._redundant[user].add(to_check[i][0])
else:
self._redundant[user] = {to_check[i][0]}
return self._redundant
def remove_redundant_roles(self):
for user, roles in self._redundant.items():
if not (roles <= self._ua[user]):
print('ERROR!!!!')
self._ua[user] = self._ua[user] - roles
def unused_roles(self):
all_roles = set()
for roles in self._ua.values():
all_roles.update(roles)
return set(self._pa.keys()) - all_roles if set(self._pa.keys()) != all_roles else {}
def remove_unused_roles(self, to_remove):
for role in to_remove:
del self._pa[role]
def mine(self):
if self._reduce: # first remove reduntant roles then remove, if any, unused roles
if self.redundant_roles():
self.remove_redundant_roles()
if u_r := self.unused_roles():
self.remove_unused_roles(u_r)
self._update_ur()
nr = max(self._pa.keys())
for role, users in self._ur.items():
if len(users) > self._mur:
i_u = 0 # number of users for which we modified the role assignments
for u in users[self._mur:]:
self._ua[u].remove(role)
if i_u % self._mur == 0:
nr += 1
self._pa[nr] = deepcopy(self._pa[role])
i_u += 1
self._ua[u].add(nr)
class STRICT_UDCC(Mining):
def __init__(self, dataset, mur, access_matrix='upa', criterion='min', num_iter=10):
super().__init__(dataset)
self._mur = len(self._users) if mur == 0 else mur # maximum users per role
self._num_iter = num_iter # number of times the heuristic tries to generate pair of roles (see _split)
self._au = dict() # key: role - value: number of users assogned to key
self._forbidden_roles = list() # role assigned to mur users
self._dupa = dict() # direct user-to-permission assignment
# use the original UPA or the entries left uncovered in UPA
self._matrix = self._upa if access_matrix == 'upa' else self._unc_upa
# select the minimum weight row (criterion ='min') or the maximum weight row
self._selection = min if criterion == 'min' else max
def _pick_role(self):
# select a pair (user, role) according the fixed criterion in the specified access_matrix
u, prms = self._selection([(u, self._matrix[u]) for u in self._unc_users],
key=lambda t: len(t[1]))
prms = self._unc_upa[u]
if prms not in self._forbidden_roles:
to_return = [u, prms] # return user and role
else: # split the role as it already reached the UDCC constraint (i.e., mur)
# print('FORBIDEN ROLE', prms)
# Cannot split a role with a single permission it will be handled by DUPA
if len(prms) == 1:
roles = [None, None]
else:
roles = self._split(prms)
# print('returned by _split: ', roles)
to_return = [u, roles[0], roles[1]]
return to_return
def _split(self, prms):
# any considered role is a proper subset of prms
all_contained_roles = [(role, self._au[r]) for r, role in self._pa.items()
if role < prms and self._au[r] < self._mur]
# first check pairs of existing roles satisfying the UDCC constraint
to_check = list()
for i in range(len(all_contained_roles) - 1):
for j in range(i + 1, len(all_contained_roles)):
if all_contained_roles[i][0].union(all_contained_roles[j][0]) == prms:
to_check.append((all_contained_roles[i][0], all_contained_roles[j][0],
all_contained_roles[i][1] + all_contained_roles[j][1]))
# If no pair of existing roles covers prms, consider any contained role
# in prms and its complement with respect to prms. Consider the complement
# only if it is not a mined role (i.e., it does not appear in PA)
if not to_check:
for (role, nau) in all_contained_roles:
if prms - role not in self._pa.values():
to_check.append((role, prms - role, nau))
if to_check:
# If some roles pair has been found, take the one with least sum mur values (i.e., min nau)
to_return = min(to_check, key=lambda t: t[2])[:2] # take the first two elements (i.e., the roles)
else:
# try num_iter times to generate two random new roles covering prms
i = 0
while i < self._num_iter:
i += 1
np = random.randint(1, len(prms) - 1) # len(prms) -1 to avoid the generation of prms
r1 = set(random.sample(list(prms), np))
r2 = prms - r1
if r1 in self._pa.values() or r2 in self._pa.values():
continue # if either r1 or2 already has been mined, try again
else:
to_return = [r1, r2]
break
else: # If roles generation fails num_iter times, give up and handle prms by DUPA
to_return = [None, None] # no roles found
# print('SPLIT:', to_return)
return to_return
def _update_ua_pa(self, u_to_add, prms): # _u_ is not used
usrs = set()
# Look for role's index, if any
idx = 0
if in_pa := [r for (r, role) in self._pa.items() if role == prms]:
idx = in_pa[0]
'''
if idx:
if self._au[idx] >= self._mur:
print('SOMETHING WENT WRONG IN PICKING ROLE', idx)
print('roles:')
for id_r, prms_r in self._pa.items():
print(id_r, prms_r)
print('au\n', self._au)
'''
# If the role induced by prms is new, than add it to PA
if not idx: # prms represents a new role
self._k += 1
idx = self._k
self._pa[idx] = deepcopy(prms)
self._au[idx] = 0
# users possessing all permissions in prms some of that have not been covered yet
user_to_consider = [usr for usr in self._unc_users if prms.issubset(self._upa[usr]) and
prms.intersection(self._unc_upa[usr])]
# Done to add user u_to_add to the set of users the role induced by prms
user_to_consider.remove(u_to_add)
user_to_consider.insert(0, u_to_add)
for u in user_to_consider:
if self._au[idx] < self._mur:
usrs.add(u)
self._au[idx] += 1
if u in self._ua:
self._ua[u].add(idx)
else:
self._ua[u] = {idx}
# If the role (prms) at index idx has already reached the maximum number of
# allowed users (i.e., mur), then mark it as forbidden and stop searching for
# other usrers to assign prms to
if self._au[idx] == self._mur:
self._forbidden_roles.append(self._pa[idx])
break
else:
break
return usrs # users that have been assigned role induced by prms
def mine(self):
while self._unc_users:
result = self._pick_role() # result = [user, r1, r2] (r2 might be not present)
u = result[0]
if result[1] is not None: # assign roles to u and to at most mur other users containing them
for role in result[1:]:
users = self._update_ua_pa(u, role)
# print('affected users:', users)
self._update_unc(users, role)
else: # assign uncovered permissions through DUPA
# print('FILLING DUPA')
self._dupa[u] = deepcopy(self._unc_upa[u])
self._update_unc({u}, self._unc_upa[u])
def check_solution(self):
covered = True
if self._users != set(self._upa.keys()):
print('ERROR: skipped user in UA')
print(set(self._upa.keys()).symmetric_difference(self._users))
covered = False
for u in self._users:
if u not in self._ua:
if u not in self._dupa:
print('ERROR: skipped user', u)
covered = False
# break
if self._dupa[u] != self._upa[u]:
print('ERROR: wrong DUPA assignment')
covered = False
# break
else:
perms = set()
for r in self._ua[u]:
perms.update(self._pa[r])
if u in state._dupa:
perms.update(self._dupa[u])
if perms != self._upa[u]:
print('uncovered permissions for user', u, 'uncovered permissions', self._upa[u] - perms)
covered = False
# break
return covered
def get_dupa(self):
dupa = 0
for u, permissions in self._dupa.items():
dupa += len(permissions)
return dupa
def verify_dupa_covering(self):
for u in self._dupa:
prms = deepcopy(self._dupa[u])
for i, r in self._pa.items():
if r <= self._dupa[u] and self._au[i] < self._mur:
prms = prms - r
if not prms:
print('ATTENTION!!!')
print(' permissions assigned to user ', u, ' by DUPA can be covered by mined roles')
class STRICT_UDCC_REDUCE(STRICT_UDCC, POST_UDCC):
def mine(self):
super().mine()
wsc, nr, ua, pa = self.get_wsc()
dupa = self.get_dupa()
print(f'{nr:>5} & {wsc:>7} & {ua:>7} & {pa:>7} & {dupa:>5}')
if self.redundant_roles():
print('redundant roles')
self.remove_redundant_roles()
if u_r := self.unused_roles():
self.remove_unused_roles(u_r)
wsc, nr, ua, pa = self.get_wsc()
dupa = self.get_dupa()
print(f'{nr:>5} & {wsc:>7} & {ua:>7} & {pa:>7} & {dupa:>5}')
# abstract class
class UDCC(Mining, abc.ABC):
def __init__(self, dataset, mur=0):
super().__init__(dataset)
self._mur = len(self._users) if mur == 0 else mur # maximum users per role
@abc.abstractmethod
def _pick_role(self):
pass
def _update_ua_pa(self, usrs, prms):
self._k += 1
self._pa[self._k] = prms
for u in usrs:
if u in self._ua:
self._ua[u].add(self._k)
else:
self._ua[u] = {self._k}
def _update_unc(self, usrs, prms):
for u in usrs:
self._unc_upa[u] = self._unc_upa[u] - prms
if len(self._unc_upa[u]) == 0:
del self._unc_upa[u]
self._unc_users.remove(u)
for p in prms:
if p in self._unc_pua:
self._unc_pua[p] = self._unc_pua[p] - usrs
if len(self._unc_pua[p]) == 0 and p in self._unc_permissions:
del self._unc_pua[p]
self._unc_permissions.remove(p)
def mine(self):
while len(self._unc_users) > 0:
usrs, prms = self._pick_role()
if usrs:
self._update_ua_pa(usrs, prms)
self._update_unc(usrs, prms)
class UDCC_1(UDCC):
def _pick_role(self):
u, prms = min(self._unc_upa.items(), key=lambda t: len(t[1]))
all_usrs = [(u, self._unc_upa[u]) for u in self._unc_users if prms <= self._unc_upa[u]]
# try also _unc_upa
# all_usrs = [(u, self._unc_upa[u]) for u in self._unc_users if prms <= self._upa[u]]
all_usrs.sort(key=lambda t: len(t[1]), reverse=True)
usrs = [t[0] for t in all_usrs]
if len(usrs) > self._mur:
return set(usrs[:self._mur]), prms
else:
return set(usrs), prms
class UDCC_2(UDCC):
def _pick_role(self):
u, u_min = min(self._unc_upa.items(), key=lambda t: len(t[1]))
p, p_min = min(self._unc_pua.items(), key=lambda t: len(t[1]))
usrs, prms = self._pick_role_u(u) if u_min <= p_min else self._pick_role_p(p)
return usrs, prms
def _pick_role_u(self, u): # the selected node is a user
prms = self._unc_upa[u]
usrs = [u for u in self._unc_users if prms <= self._unc_upa[u]]
if len(usrs) > self._mur:
return set(usrs[:self._mur]), prms
else:
return set(usrs), prms
def _pick_role_p(self, p): # the selected node is a permission
all_usrs = list(self._unc_pua[p])
if len(all_usrs) > self._mur:
usrs = set(all_usrs[:self._mur])
else:
usrs = set(all_usrs)
prms = {p for p in self._unc_permissions if usrs <= self._pua[p]}
return usrs, prms
class UDCC_RM_1(UDCC):
def _pick_role(self):
u, prms = min([(u, self._upa[u]) for u in self._unc_users], key=lambda t: len(t[1]))
# print(u, prms)
all_usrs = {usr for usr in self._unc_users if prms <= self._upa[usr]}
#print(all_usrs)
if len(all_usrs) <= self._mur:
usrs = all_usrs
# print(usrs)
else:
all_usrs.remove(u)
new_set = set(list(all_usrs)[:self._mur - 1])
new_set.add(u)
usrs = new_set
# print(usrs)
#input('xxx')
return usrs, prms
class UDCC_RM_2(UDCC):
def _pick_role(self):
u, prms = min([(u, self._unc_upa[u]) for u in self._unc_users], key=lambda t: len(t[1]))
all_usrs = {usr for usr in self._unc_users if prms <= self._upa[usr]}
# print(u, prms)
all_usrs = {usr for usr in self._unc_users if prms <= self._upa[usr]}
# print(all_usrs)
if len(all_usrs) <= self._mur:
usrs = all_usrs
#print(usrs)
else:
all_usrs.remove(u)
new_set = set(list(all_usrs)[:self._mur - 1])
new_set.add(u)
usrs = new_set
# print(usrs)
# input('xxx')
return usrs, prms
if __name__ == '__main__':
pass
dataset = 'hc'
mur = 4
dataset_name = 'datasets/' + dataset + '.txt'
state = STRICT_UDCC(dataset_name, mur, access_matrix='unc_upa', criterion='min')
state.mine()
wsc, nr, ua, pa = state.get_wsc()
print('wsc', wsc, '#roles:', nr, '|ua|:', ua, '|pa|:', pa, '|dupa|:', state.get_dupa())
print('dupa:', state._dupa)
print('covered:', state.check_solution())
dataset = 'americas_large'
mur = 50
dataset_name = 'datasets/' + dataset + '.txt'
state = STRICT_UDCC_REDUCE(dataset_name, mur, access_matrix='unc_upa', criterion='min')
state.mine()
print('covered:', state.check_solution())
|
import smtplib
from email.message import EmailMessage
from pathlib import Path # this is similar to os.path -- allows us to access files
from string import Template # string Template allows you to substitute variables inside of texts
# html = Path('index.html').read_text()
html = Template(Path('index.html').read_text())
email = EmailMessage() # this is our email object
email['from'] = 'Cordawg'
email['to'] = 'cory.send.it@gmail.com'
email['subject'] = 'Better update this subject line'
email.set_content(html.substitute({'name': 'Cordawg'}), 'html')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('cory.send.it@gmail.com', 'bonfrerebehavior')
smtp.send_message(email)
print('this email is sent') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/23 下午2:50
# @Author : ZHZ
# @Description : 根据num_days去划分数据集,默认值是14
import pandas as pd
import numpy as np
import datetime
num_days = 14
days_20141009 = datetime.datetime(2014, 10, 9)
#filtered_outlier_if = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/FilteredData/filtered_outlier_if.csv");
filtered_outlier_if = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_if1.csv");
filtered_outlier_if['days_20141009'] = filtered_outlier_if['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
#filtered_outlier_isf = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/FilteredData/filtered_outlier_isf.csv");
filtered_outlier_isf = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/2_isf2.csv");
filtered_outlier_isf['days_20141009'] = filtered_outlier_isf['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
def countByDays_if(dataframe, start_day, end_day):
if start_day > end_day:
return
dataframe = dataframe[dataframe['days_20141009']>=start_day]
dataframe = dataframe[dataframe['days_20141009']<=end_day]
#print start_day,end_day,dataframe.date.sort_values().head(1),dataframe.days_20141009.max()
temp = {}
temp['date'] = str(dataframe.date.min())+"_"+str(dataframe.date.max())
temp['item_id'] = dataframe.item_id.mean()
temp['cate_id'] = dataframe.cate_id.mean()
temp['cate_level_id'] = dataframe.cate_level_id.mean()
temp['brand_id'] = dataframe.brand_id.mean()
temp['supplier_id'] = dataframe.supplier_id.mean()
temp['pv_ipv'] = dataframe.pv_ipv.sum()
temp['pv_uv'] = dataframe.pv_uv.sum()
temp['cart_ipv'] = dataframe.cart_ipv.sum()
temp['cart_uv'] = dataframe.cart_uv.sum()
temp['collect_uv'] = dataframe.collect_uv.sum()
temp['num_gmv'] = dataframe.num_gmv.sum()
temp['amt_gmv'] = dataframe.amt_gmv.sum()
temp['qty_gmv'] = dataframe.qty_gmv.sum()
temp['unum_gmv'] = dataframe.unum_gmv.sum()
temp['amt_alipay'] = dataframe.amt_alipay.sum()
temp['num_alipay'] = dataframe.num_alipay.sum()
temp['qty_alipay'] = dataframe.qty_alipay.sum()
temp['unum_alipay'] = dataframe.unum_alipay.sum()
temp['ztc_pv_ipv'] = dataframe.ztc_pv_ipv.sum()
temp['tbk_pv_ipv'] = dataframe.tbk_pv_ipv.sum()
temp['ss_pv_ipv'] = dataframe.ss_pv_ipv.sum()
temp['jhs_pv_ipv'] = dataframe.jhs_pv_ipv.sum()
temp['ztc_pv_uv'] = dataframe.ztc_pv_uv.sum()
temp['tbk_pv_uv'] = dataframe.tbk_pv_uv.sum()
temp['ss_pv_uv'] = dataframe.ss_pv_uv.sum()
temp['jhs_pv_uv'] = dataframe.jhs_pv_uv.sum()
temp['num_alipay_njhs'] = dataframe.num_alipay_njhs.sum()
temp['amt_alipay_njhs'] = dataframe.amt_alipay_njhs.sum()
temp['qty_alipay_njhs'] = dataframe.qty_alipay_njhs.sum()
temp['unum_alipay_njhs'] = dataframe.unum_alipay_njhs.sum()
temp['days_20141009'] = str(dataframe.days_20141009.min())+"_"+str(dataframe.days_20141009.max())
return temp
def TransferDataByDays_if():
for i,father_kid_item in filtered_outlier_if.groupby([filtered_outlier_if['cate_level_id'],
filtered_outlier_if['cate_id'],
filtered_outlier_if['item_id']]):
new_father_kid_item_data = []
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = first_day-1
#print first_day,last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
#print father_kid_item[father_kid_item['days_20141009']==last_day]
while(flag_day<=last_day):
flag_day = flag_day + num_days
if (flag_day>=last_day):
temp = countByDays_if(father_kid_item, flag_day - num_days + 1, last_day)
else:
temp = countByDays_if(father_kid_item, flag_day - num_days + 1, flag_day)
if temp ==None:
print "这里有个None"
else:
new_father_kid_item_data.append(temp)
new_father_kid_item = pd.DataFrame(new_father_kid_item_data,columns=[
"date","item_id","cate_id","cate_level_id","brand_id","supplier_id","pv_ipv,pv_uv","cart_ipv,cart_uv",
"collect_uv","num_gmv","amt_gmv","qty_gmv","unum_gmv","amt_alipay","num_alipay","qty_alipay","unum_alipay",
"ztc_pv_ipv","tbk_pv_ipv","ss_pv_ipv","jhs_pv_ipv","ztc_pv_uv","tbk_pv_uv","ss_pv_uv","jhs_pv_uv","num_alipay_njhs",
"amt_alipay_njhs","qty_alipay_njhs","unum_alipay_njhs"]
).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/FilteredData/item_all/"+str(i)+"_"+str(num_days)+"_all.csv",index = None,columns=None)
def TransferDataByDays_isf():
for i,father_kid_item in filtered_outlier_isf.groupby([filtered_outlier_isf['cate_level_id'],
filtered_outlier_isf['cate_id'],
filtered_outlier_isf['item_id'],
filtered_outlier_isf['store_code']]):
new_father_kid_item_data = []
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = first_day-1
#print first_day,last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
#print father_kid_item[father_kid_item['days_20141009']==last_day]
while(flag_day<=last_day):
flag_day = flag_day + num_days
if (flag_day>=last_day):
temp = countByDays_if(father_kid_item, flag_day - num_days + 1, last_day)
else:
temp = countByDays_if(father_kid_item, flag_day - num_days + 1, flag_day)
if temp ==None:
print "这里有个None"
else:
new_father_kid_item_data.append(temp)
new_father_kid_item = pd.DataFrame(new_father_kid_item_data,columns=[
"date","item_id","cate_id","cate_level_id","brand_id","supplier_id","pv_ipv,pv_uv","cart_ipv,cart_uv",
"collect_uv","num_gmv","amt_gmv","qty_gmv","unum_gmv","amt_alipay","num_alipay","qty_alipay","unum_alipay",
"ztc_pv_ipv","tbk_pv_ipv","ss_pv_ipv","jhs_pv_ipv","ztc_pv_uv","tbk_pv_uv","ss_pv_uv","jhs_pv_uv","num_alipay_njhs",
"amt_alipay_njhs","qty_alipay_njhs","unum_alipay_njhs"]
).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/FilteredData/item_kid/"+str(i)+"_"+str(num_days)+"_"+str(i[3])+".csv",index = None,columns=None)
TransferDataByDays_if()
TransferDataByDays_isf()
|
# -*- coding:utf-8 -*-
'''
Created on 2016骞�4鏈�1鏃�
@author: huke
'''
def listComprehensions(L=[]):
print(L)
print([s.lower() for s in L if isinstance(s,str)])
if __name__ == '__main__':
L1 = ['Hello', 'World', 18, 'Apple', None]
listComprehensions(L1)
|
def binary_search(givenList, value):
listSize = len(givenList) - 1
first_element_index = 0
last_element_index = listSize
while first_element_index <= last_element_index:
midpoint = (first_element_index + last_element_index) // 2
if givenList[midpoint] == value:
return midpoint
elif givenList[midpoint] < value:
first_element_index = midpoint + 1
else:
last_element_index = midpoint - 1
if first_element_index > last_element_index:
return None
def binary_search_recursive(givenList, first_element_index, last_element_index, value):
if (last_element_index < first_element_index):
return None
else:
midpoint = first_element_index + (last_element_index - first_element_index) //2
if givenList[midpoint] > value:
return binary_search_recursive(givenList, first_element_index, midpoint - 1, value)
elif givenList[midpoint] < value:
return binary_search_recursive(givenList, midpoint + 1, last_element_index, value)
else:
return midpoint
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.contrib.auth.models import Group
from .managers import CustomUserManager
class CustomUser(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
alias = models.CharField(max_length=50, blank=True)
is_active = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
group = models.ForeignKey(Group, on_delete=models.CASCADE, null=True)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
|
import csv
import sys
class CSV_Tool:
def __init__ (self, source_file):
self.source_file = source_file
self.headers = {
'roll' : 0, 'name' : 1, 'dob' : 2,
'city' : 3, 'state': 4, 'cgpa' : 5 }
self.operations = dict()
self.operations [1] = self.choice_1 # Function Object for choice_1 method ()
self.operations [2] = self.choice_2
self.operations [3] = self.choice_3
self.operations [4] = self.choice_4
self.operations [5] = self.choice_5
self.operations [6] = self.choice_6
while True:
choice = self.menu ()
self.operations[choice] () # Make a call to the mapped function
def menu(self):
print ("Choose one from the following actions: ")
print ("\t 1. List all the students")
print ("\t 2. Display all students from a particular state:")
print ("\t 3. Display all students from a particular state and born in a particular month")
print ("\t 4. Display all students with a given CGPA")
print ("\t 5. Display all students whose name starts with a particular letter and a particular year of birth")
print ("\t 6. Exit the application")
try:
choice = int (input("\nEnter your choice: "))
except ValueError as e:
""" Invoked when user input was not convertible into an Integer """
print ("\n\tYour input ain't an integer, Try Again ....\n\n")
return menu()
else:
if (choice < 1 or choice > 6):
""" Choice was out of bounds """
print ("\n\tYour choice is out of bounds. Do Try again ....\n\t")
return menu()
return choice;
def choice_1 (self):
with open (self.source_file, "rt") as file:
reader = csv.reader (file)
print ("\n\nName of Students: ")
reader.__next__() # Incrementing the iterator to not print the HEADER Info
for row in reader:
""" We know Name of students is 2nd Column """
print ("\t" + row[1])
print ("\n")
def choice_2 (self):
arg = input ("\t Enter the state: ")
with open (self.source_file, "rt") as file:
reader = csv.reader (file)
print ("\n\n Students from a particular state are: ")
reader.__next__() # Escaping the header
for row in reader:
if (row [self.headers['state']].lower() == arg.lower()):
print (row [self.headers ['name']])
def choice_3 (self):
state = input ("Enter the state: ")
while True:
month = input ("Enter the month (like Jan, Feb, etc.): ")
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
if month.lower() not in months:
""" Enter correct month """
print ("You entered incorrect month, Do Try Again ....\n")
else:
""" Successful Value for month obtained """
break
with open (self.source_file, "rt") as file:
reader = csv.reader (file)
print ("Students from a particular state and born in a particular month: ")
reader.__next__()
for row in reader:
if ( row [self.headers ['state']].lower() == state.lower()
and (row [self.headers ['dob']].split ('-')[1]).lower() == month.lower() ):
print ("\t" + row [self.headers ['name']])
print ()
def choice_4 (self):
while True:
try:
cgpa = float (input ("\n\tEnter cgpa: "))
except ValueError:
print ("\tYou entered a non-float value, Try Again ....\n")
else:
if (cgpa < 0 or cgpa > 10):
""" Incorrect CGPA """
print ("\tIncorrect CGPA, should be +ve and less than 10, Try Again ....\n")
else:
""" We got the desired CGPA """
break
with open (self.source_file, "rt") as file:
reader = csv.reader (file)
reader.__next__()
print ("Students with given CGPA are: ")
for row in reader :
if (float(row [self.headers ['cgpa']]) == cgpa):
print (row [self.headers ['name']])
print ("\n")
def choice_5 (self):
letter = input ("Enter letter with which name should start: ")
while True:
try:
year = int (input ("Enter the year of birth: "))
except ValueError:
print ("\tYou entered a non-Integer value, Try Again...\n")
else:
""" Validate your year requirements """
""" I guess year should be b/w 1995 and 2001 for being in B.Tech 6th sem """
if (year < 1995 or year > 2001):
print ("\tYear of birth is not b/w 1995 and 2001, So better Try Again ....\n")
else:
""" Got the year """
break
with open (self.source_file, "rt") as file:
reader = csv.reader (file)
reader.__next__()
for row in reader:
if ( (row [self.headers ['name']].lower()).startswith (letter.lower())
and int ((row [self.headers ['dob']]).split ('-') [-1]) == year ):
print (row [self.headers ['name']])
def choice_6 (self):
print ("\n\t Thank You for using the tool")
sys.exit (0)
app = CSV_Tool ("ECE-VI.csv")
# app.choice_1(self)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from appium_auto.three.page.base_page import BasePage
# from appium_auto.three.page.memberinvitepage import MemberInvitePage
class AddressListPage(BasePage):
def click_addmember(self):
from appium_auto.three.page.memberinvitepage import MemberInvitePage
return MemberInvitePage(self._driver)
|
def merge(u, v):
bu, eu = 0, len(u)
bv, ev = 0, len(v)
result = []
while bu < eu and bv < ev:
if u[bu] < v[bv]:
result.append(u[bu])
bu += 1
else:
result.append(v[bv])
bv += 1
result += u[bu:]
result += v[bv:]
assert(len(result) == len(u) + len(v))
return result
def merge_sort(v):
if(len(v) < 2):
return v
m = len(v) // 2
left = merge_sort(v[:m])
right = merge_sort(v[m:])
return merge(left, right)
v = [9,6,7,1,8]
print(merge_sort(v))
|
import pygame
import sys
import random
import math
from rules import get_legal_moves, in_check
pygame.init()
pygame.display.set_caption("Chess")
screenx = 640
screeny = 640
screen = pygame.display.set_mode((screenx, screeny))
bgcolor = (250, 250, 250)
clock = pygame.time.Clock()
DARK_SQUARE = (120, 60, 40)
LIGHT_SQUARE = (200, 180, 120)
HIGHLIGHT_SQUARE = (240, 240, 120, 180)
CHECK_HIGHLIGHT = (250, 100, 100, 100)
MOVE_INDICATOR = (100, 200, 150, 100)
pygame.font.init()
font = pygame.font.SysFont("Arial", 20, True)
class Pieces:
def __init__(self):
self.types = {}
def load_pieces(self):
piece_types = ['R', 'N', 'B', 'Q', 'K', 'P']
for ptype in piece_types:
self.types['w'+ptype] = pygame.image.load('piece_sprites/w'+ptype+'.png')
self.types['b'+ptype] = pygame.image.load('piece_sprites/b'+ptype+'.png')
class Chessboard:
def __init__(self, area, pieces):
self.area = area
self.board = ['' for i in range(64)]
self.pieces = pieces
self.screen_x, self.screen_y = pygame.display.get_window_size()
self.square = pygame.Surface((self.screen_x/8, self.screen_y/8))
self.color_to_move = ''
self.highlight_squares = []
self.dotted_squares = []
self.en_passant = None
self.castle_available = ''
self.incheck_squares = []
def load_position(self, position='rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq'): #'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
board_index = 0
for fen_index in range(len(position)):
if position[fen_index].isnumeric():
for l in range(int(position[fen_index])):
self.board[board_index] = ''
board_index += 1
elif position[fen_index] == '/':
continue
elif position[fen_index] == ' ':
self.color_to_move = position[fen_index+1]
self.castle_available = position[fen_index+3:]
break
else:
if position[fen_index].islower():
self.board[board_index] = 'b' + position[fen_index].upper()
board_index += 1
else:
self.board[board_index] = 'w' + position[fen_index]
board_index += 1
def draw_board(self):
for y in range(8):
for x in range(8):
if y % 2:
if x % 2:
self.square.fill(LIGHT_SQUARE)
else:
self.square.fill(DARK_SQUARE)
else:
if x % 2:
self.square.fill(DARK_SQUARE)
else:
self.square.fill(LIGHT_SQUARE)
# showing square index, remove after development
index = font.render(str(y*8 + x), True, (0, 0, 0))
self.square.blit(index, (0, 0))
self.area.blit(self.square, (self.screen_x/8 * x, self.screen_y/8 * y))
if 8*y+x in self.highlight_squares:
transparent_square = pygame.Surface((80,80)).convert_alpha()
transparent_square.fill(HIGHLIGHT_SQUARE)
self.area.blit(transparent_square, (self.screen_x/8 * x, self.screen_y/8 * y))
if 8*y+x in self.dotted_squares:
transparent_square = pygame.Surface((80,80)).convert_alpha()
transparent_square.fill(MOVE_INDICATOR)
self.area.blit(transparent_square, (self.screen_x/8 * x, self.screen_y/8 * y))
if 8*y+x in self.incheck_squares:
transparent_square = pygame.Surface((80,80)).convert_alpha()
transparent_square.fill(CHECK_HIGHLIGHT)
self.area.blit(transparent_square, (self.screen_x/8 * x, self.screen_y/8 * y))
if self.color_to_move == 'w':
pygame.display.set_caption("Chess - White to Move")
elif self.color_to_move == 'b':
pygame.display.set_caption("Chess - Black to Move")
def draw_pieces(self):
for square_index in range(len(self.board)):
if self.board[square_index]:
x, y = index_to_coordinates(square_index)
self.area.blit(self.pieces.types[self.board[square_index]], (x*self.screen_x/8, y*self.screen_y/8))
def switch_turns(self):
if self.color_to_move == 'w':
self.color_to_move = 'b'
elif self.color_to_move == 'b':
self.color_to_move = 'w'
def index_to_coordinates(index):
y = index // 8
x = index % 8
return (x, y)
def coordinates_to_index(x, y):
index = y * 8 + x
return index
pieces = Pieces()
pieces.load_pieces()
chessgame = Chessboard(screen, pieces)
chessgame.load_position()
dragging = False
while True:
screen.fill(bgcolor)
chessgame.draw_board()
chessgame.draw_pieces()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if dragging == False:
mousex, mousey = pygame.mouse.get_pos()
old_square_index = coordinates_to_index(mousex//80, mousey//80)
if chessgame.board[old_square_index]:
if chessgame.board[old_square_index][0] == chessgame.color_to_move:
legal_moves = get_legal_moves(chessgame.board, old_square_index, chessgame.castle_available, chessgame.en_passant)
true_legal_moves = []
for move in legal_moves:
test_position = chessgame.board.copy()
test_position[move] = test_position[old_square_index]
test_position[old_square_index] = ''
if not in_check(test_position, chessgame.color_to_move, '', chessgame.en_passant):
true_legal_moves.append(move)
legal_moves = true_legal_moves
chessgame.dotted_squares = legal_moves
dragging_piece = chessgame.board[old_square_index]
chessgame.board[old_square_index] = ''
dragging = True
if event.type == pygame.MOUSEBUTTONUP:
chessgame.dotted_squares = []
if dragging == True:
mousex, mousey = pygame.mouse.get_pos()
new_square_index = coordinates_to_index(mousex//80, mousey//80)
if new_square_index in legal_moves:
chessgame.highlight_squares = [old_square_index, new_square_index]
chessgame.board[new_square_index] = dragging_piece
dragging = False
# PAWN PROMOTION
for piece_index in range(8):
if chessgame.board[piece_index] == 'wP':
chessgame.board[piece_index] = 'wQ'
if chessgame.board[63 - piece_index] == 'bP':
chessgame.board[63 - piece_index] = 'bQ'
# PAWN EN PASSANT
if dragging_piece == 'wP' and new_square_index == chessgame.en_passant:
chessgame.board[new_square_index + 8] = ''
if dragging_piece == 'bP' and new_square_index == chessgame.en_passant:
chessgame.board[new_square_index - 8] = ''
chessgame.en_passant = None
if dragging_piece == 'bP' and new_square_index - old_square_index == 16:
chessgame.en_passant = new_square_index - 8
if dragging_piece == 'wP' and new_square_index - old_square_index == -16:
chessgame.en_passant = new_square_index + 8
# KING CASTLE
if dragging_piece[1] == 'K' and new_square_index - old_square_index == 2:
chessgame.board[new_square_index-1] = dragging_piece[0] + 'R'
chessgame.board[new_square_index+1] = ''
if dragging_piece[1] == 'K' and new_square_index - old_square_index == -2:
chessgame.board[new_square_index+1] = dragging_piece[0] + 'R'
chessgame.board[new_square_index-2] = ''
if dragging_piece == 'wK':
chessgame.castle_available = chessgame.castle_available.replace('K', '')
chessgame.castle_available = chessgame.castle_available.replace('Q', '')
if dragging_piece == 'bK':
chessgame.castle_available = chessgame.castle_available.replace('k', '')
chessgame.castle_available = chessgame.castle_available.replace('q', '')
if dragging_piece[1] == 'R':
if old_square_index == 0:
chessgame.castle_available = chessgame.castle_available.replace('q', '')
if old_square_index == 7:
chessgame.castle_available = chessgame.castle_available.replace('k', '')
if old_square_index == 56:
chessgame.castle_available = chessgame.castle_available.replace('Q', '')
if old_square_index == 63:
chessgame.castle_available = chessgame.castle_available.replace('K', '')
else:
if new_square_index == 0:
chessgame.castle_available = chessgame.castle_available.replace('q', '')
if new_square_index == 7:
chessgame.castle_available = chessgame.castle_available.replace('k', '')
if new_square_index == 56:
chessgame.castle_available = chessgame.castle_available.replace('Q', '')
if new_square_index == 63:
chessgame.castle_available = chessgame.castle_available.replace('K', '')
chessgame.switch_turns()
chessgame.incheck_squares = []
if in_check(chessgame.board, chessgame.color_to_move, chessgame.castle_available, chessgame.en_passant):
for square_index in range(len(chessgame.board)):
if chessgame.board[square_index] == chessgame.color_to_move + 'K':
chessgame.incheck_squares.append(square_index)
else:
chessgame.board[old_square_index] = dragging_piece
dragging = False
mouse = pygame.mouse.get_pressed()
if mouse[0]:
if dragging == True:
mousex, mousey = pygame.mouse.get_pos()
screen.blit(pieces.types[dragging_piece], (mousex-40, mousey-40))
pygame.display.update()
msElapsed = clock.tick(30) |
#!/usr/bin/python3
#Handle Parameters sending from php
import sys
titleList = []
i=1
while i<len(sys.argv):
titleList.append(sys.argv[i])
i += 1
title = " ".join(titleList)
#print (title)
#Access to mySQL search database
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="username",
passwd="password",
database="table"
)
#Convert SQL to Pandas Data structure
import pandas as pd
import numpy as np
df = pd.read_sql('SELECT Title, Overview, Popularity FROM movie2 WHERE Overview <> "" and Title <> ""', con=mydb)
#print (df.shape)
#print (df['Title'].head())
#Build Tf-idf matrix for overview of each movie by TfIdfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(stop_words='english')
tfidf_matrix = tfidf.fit_transform(df['Overview'])
#print (tfidf_matrix.shape)
#Compute the cosine similarity matrix
from sklearn.metrics.pairwise import linear_kernel
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
#Construct a reverse map of indices and movie titles
indices = pd.Series(df.index, index=df['Title']).drop_duplicates()
#Function that takes in movie title as input and outputs most similar movies
def get_recommendations(title, cosine_sim=cosine_sim):
idx = indices[title]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:11]
movie_indices = [i[0] for i in sim_scores]
return df['Title'].iloc[movie_indices]
movieRec = get_recommendations(title)
movie1 = movieRec.iloc[0]
movie2 = movieRec.iloc[1]
movie3 = movieRec.iloc[2]
movie4 = movieRec.iloc[3]
movie5 = movieRec.iloc[4]
movie6 = movieRec.iloc[5]
movie7 = movieRec.iloc[6]
movie8 = movieRec.iloc[7]
movie9 = movieRec.iloc[8]
movie10 = movieRec.iloc[9]
print ('You may also like: 1. '+ movie1 + ' 2. ' + movie2 + ' 3. ' + movie3 + ' 4. ' + movie4 + ' 5. ' + movie5 + ' 6. ' + movie6 + ' 7. ' + movie7 + ' 8. ' + movie8 + ' 9. ' + movie9 + ' 10. ' + movie10)
|
# Generated by Django 2.2.6 on 2019-12-07 18:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0061_historicaldprqty_historicallog_historicalprogressqty_historicalprogressqtyextra_historicalshiftedqty'),
]
operations = [
migrations.AddField(
model_name='historicalsurveyqty',
name='pole_lt_8m',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='surveyqty',
name='pole_lt_8m',
field=models.IntegerField(blank=True, null=True),
),
]
|
tempPath = '/directory/to/folder
#Add the temp folder directory here please merci thank you
#make sure it doesn't end with a /
tempPath += '/e6tagimage'
import json
import requests
import random
import urllib.request
import PySimpleGUI as sg
import os
from PIL import Image
import tkinter
def download_progress_hook(count, blockSize, totalSize):
sg.OneLineProgressMeter('Image Loading Now...', count*blockSize, totalSize, 'key')
root = tkinter.Tk()
root.withdraw()
WIDTH, HEIGHT = root.winfo_screenwidth(), root.winfo_screenheight()
print('N.B. If the program skips some letters while revealing them, it\'s because the file associated to them is either a gif, video, or some flash file thing')
#sg.theme(random.choice(sg.ListOfLookAndFeelValues()))
#print('N.B. Only General Tags will be included')
headers = {"User-agent" : "TagHangman/1.0 (By WibbleTime on e926)"}
again = True
while again == True:
e621String = "https://e621.net/posts.json?tags=order:random&limit=1"
response = requests.get(e621String, headers = headers)
response = response.json()
text = json.dumps(response, indent = 4)
tag_list = response['posts'][0]['tags']['general']
chosen_tag = random.choice(tag_list)
#print(chosen_tag)
#input()
e621String2 = "https://e926.net/posts.json?tags=order:random {0}&limit={1}".format(chosen_tag, str(len(chosen_tag)-1))
response2 = requests.get(e621String2, headers = headers)
response2 = response2.json()
text2 = json.dumps(response2, indent = 4)
#print(text2)
file_list = []
for i in range(len(response2['posts'])):
tag_url = response2['posts'][i]['sample']['url']
file_list.append(tag_url)
#input()
opener = urllib.request.build_opener()
opener.addheaders = [("User-agent", "MyProject/1.0 (By WibbleTime on e621)")]
urllib.request.install_opener(opener)
get_correct = False
for n in range(len(file_list)):
sg.theme(random.choice(sg.ListOfLookAndFeelValues()))
cannot_convert_list = [
'swf',
'webm',
'gif'
]
OGextension = response2['posts'][n]['file']['ext']
if file_list[n] is None or OGextension in cannot_convert_list:
continue
actualfile = urllib.request.urlretrieve(file_list[n], ((tempPath)), reporthook=download_progress_hook)
#convert jpeg to png
im = Image.open(tempPath)
width, height = im.size
buffer = 0.6
ratio = height/width
if ratio > HEIGHT/WIDTH:
newW = round(width / height * HEIGHT*buffer)
newH = round(height / height * HEIGHT*buffer)
else:
newW = round(width / width * WIDTH*buffer)
newH = round(height / width * WIDTH*buffer)
im = im.resize(((newW,newH)))
im.save(tempPath + '.png')
im_new = Image.open(tempPath + '.png')
width_pic, height_pic = im.size
#print('done!')
layout = [
[sg.Text("+".join(response2['posts'][n]['tags']['artist'])), sg.Text(response2['posts'][n]['id'])],
[sg.Image(tempPath + '.png', key='-IMAGE-')]
]
clue = chosen_tag[:n+1-len(chosen_tag)] + '?'*(len(chosen_tag)-n-1)
#print(HEIGHT, WIDTH, height_pic, width_pic)
answer_layout = [
[sg.Text(clue, font='ANY 20')],
[sg.Input(font='ANY 20', key='-GUESS-'), sg.Submit()]
]
window = sg.Window('Image', layout, location = ((WIDTH/2)-width_pic/2,0))
window.read(timeout=1)
answer_window = sg.Window('Answer', answer_layout, location =(150, (HEIGHT+height_pic*buffer)/2))
event, values = answer_window.read()
if values['-GUESS-'].lower() == chosen_tag:
score = round((len(chosen_tag)-(n+1))/len(chosen_tag)*100)
get_correct = True
window.close()
answer_window.close()
correct_layout = [
[sg.Text('Well done, you got it right!', font = 'ANY 20')],
[sg.Text('The mystery tag was ' + chosen_tag + '!', font = 'ANY 20')],
[sg.Text('Your score is ' + str(score) + ' out of 100!', font = 'ANY 20')]
]
correct_window = sg.Window('Correct!', correct_layout, return_keyboard_events = True)
correct_window.read()
correct_window.close()
break
window.close()
answer_window.close()
window.close()
answer_window.close()
if get_correct == False:
incorrect_layout = [
[sg.Text('Better luck next time,', font = 'ANY 20')],
[sg.Text('The mystery tag was ' + chosen_tag + '!', font = 'ANY 20')],
[sg.Text('Your score is 0 out of 100 :(', font = 'ANY 20')]
]
incorrect_window = sg.Window('Sadly!', incorrect_layout, return_keyboard_events = True)
incorrect_window.read()
incorrect_window.close()
again_layout = [
[sg.Text('Play again?', font = 'ANY 20')],
[sg.Yes(), sg.No()]
]
again_window = sg.Window('Again?', again_layout, return_keyboard_events = True)
event, values = again_window.read()
if event in ['Yes', 'y', 'Y' 'y:29', 'Y:29', 'Enter', 'Return', 'Return:36']:
again = True
elif event in ['No', 'n', 'N', 'n:57', 'N:57', 'Escape:9', 'Escape'] :
again = False
again_window.close()
|
import random
def randInt(min=0, max=100):
num = random.random()*max
return num
print(randInt())
#print(randInt(max=50))
import random
def randInt(min=0, max=100):
num = random.random()*max
return num
print(randInt(max=50))
#print(randInt(min=50))
import random
def randInt(min=0, max=100):
num = random.random()*(max-min)+min
return num
print(randInt(min=50))
#print(randInt(min=50, max=500))
import random
def randInt(min=0, max=100):
num = random.random()*(max-min)+min
return num
print(randInt(min=50, max=500)) |
from django.db import models
# Create your models here.
class Company(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(default='')
city = models.CharField(max_length=300)
address = models.TextField(default='')
class Meta:
verbose_name = 'Company'
verbose_name_plural = 'Companies'
# ordering = ('name',)
def to_json(self):
return {
'id': self.id,
'name': self.name,
'description': self.description,
'city': self.city,
'address': self.address
}
def __str__(self):
return f'{self.id}: {self.name}'
class Vacancy(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(default='')
salary = models.FloatField(default=0.0)
company = models.ForeignKey(Company, on_delete=models.CASCADE, null=True,
related_name='vacancies')
# tags = models.ManyToManyField(Tag)
class Meta:
verbose_name = 'Vacancy'
verbose_name_plural = 'Vacancies'
def __str__(self):
return f'{self.id}: {self.name} | {self.salary}'
def to_json(self):
return {
'id': self.id,
'name': self.name,
'description': self.description,
'salary': self.salary
}
class NewProduct(models.Model):
nam = models.CharField(max_length=2500)
pric = models.FloatField(default=0.0)
# descriptio = models.TextField
coun = models.IntegerField(default=0)
is_activ = models.BooleanField(default=False)
def to_json(self):
return {
'nam': self.nam,
'pric': self.pric,
# 'descriptio': self.descriptio,
'coun': self.coun,
'is_activ': self.is_activ
}
class NewCategories(models.Model):
nam = models.CharField(max_length=2500)
def to_Json(self):
return {
'nam': self.nam
}
|
from classes.model.package import Package
import json
from pathlib import Path
from typing import Any, IO, List
class JsonParser:
@staticmethod
def get_names(path: Path) -> List[str]:
names: List[str] = []
data: Any = JsonParser.get_data(path)
groups: List[str] = ["require", "require-dev"]
for group in groups:
if group in data:
for name in data[group]:
names.append(name)
return names
@staticmethod
def get_packages(path: Path) -> List[Package]:
packages: List[Package] = []
data: Any = JsonParser.get_data(path)
packages_data: List[Any] = []
groups: List[str] = ["packages", "packages-dev"]
for group in groups:
if group in data:
packages_data += data[group]
for package in packages_data:
packages.append(Package(package["name"], package))
return packages
@staticmethod
def get_data(path: Path) -> Any:
stream: IO = path.open(encoding="utf8")
data: Any = json.loads(stream.read())
stream.close()
return data
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from main.models import *
import json
from json import JSONEncoder
from django.core.serializers import serialize
from rest_framework import serializers
from django.conf import settings
from django.http import FileResponse
from django.views.decorators.csrf import csrf_exempt
host = "https://9e97e1fd0d9a.ngrok.io/"
@csrf_exempt
def getFollowers(request):
print("GetFollowers")
all_pro = Follower.objects.all().values('follower_id', 'name', 'image')
url = host + "image/"
for temp in all_pro:
temp['image'] = temp['image']
temp['image'] = url + temp['image']
return JsonResponse(list(all_pro), safe=False)
@csrf_exempt
def getFollowerById(request, follower_id):
print("GetFollowerById")
all_pro = Follower.objects.filter(follower_id=follower_id).all().values('follower_id', 'name', 'image')
url = host + "image/"
for temp in all_pro:
temp['image'] = temp['image']
temp['image'] = url + temp['image']
return JsonResponse(list(all_pro), safe=False)
@csrf_exempt
def seasons(request):
print("GetSeasons")
data = {
'summer': 'Summer',
'spring': 'Spring',
'fall': 'Fall',
'winter': 'Winter'
}
return JsonResponse(data)
@csrf_exempt
def helper(fid):
all_pro = Follower.objects.filter(follower_id=fid).all().values('follower_id', 'name', 'image')
url = host + "image/"
for temp in all_pro:
temp['image'] = temp['image']
temp['image'] = url + temp['image']
return all_pro[0]
@csrf_exempt
def getTrips(request):
print("GetTrips")
url = host + "image/"
all_pro = Trip.objects.all().values('trip_id', 'season', 'name', 'image', 'location', 'year', 'duration')
for temp in all_pro:
temp['image'] = temp['image']
temp['image'] = url + temp['image']
t_followers = TripFollower.objects.filter(trip_id = temp['trip_id']).all().values('follower')
res = []
for qwe in t_followers:
res.append(helper(qwe['follower']))
temp['follower'] = list(res)
# print(sorted(list(all_pro), key=lambda x: x['follower'], reverse=True))
# .sort(key=lambda x: )
return JsonResponse(sorted(list(all_pro), key=lambda x: x['follower'], reverse=True), safe=False)
@csrf_exempt
def getTripById(request, trip_id):
print("GetTripById")
url = host + "image/"
all_pro = Trip.objects.filter(trip_id=trip_id).all().values('trip_id', 'season', 'name', 'image', 'location', 'year', 'duration')
for temp in all_pro:
temp['image'] = temp['image']
temp['image'] = url + temp['image']
t_followers = TripFollower.objects.filter(trip_id = temp['trip_id']).all().values('follower')
res = []
for qwe in t_followers:
res.append(helper(qwe['follower']))
temp['follower'] = list(res)
return JsonResponse(list(all_pro), safe=False)
@csrf_exempt
def getImage(request, image_name):
print("GetImage")
print(settings.MEDIA_ROOT)
url = settings.MEDIA_ROOT + "/" + image_name
print(url)
# image_data = open(url, 'rb')
# mbytes = image_data.read()
# content = mbytes
return FileResponse(open(url, 'rb'))
@csrf_exempt
def postFollower(request):
print("PostFollower")
fname = request.POST.get('name', None)
fimage = request.FILES["image"]
Follower(name=fname, image=fimage).save()
return JsonResponse(list(), safe=False)
@csrf_exempt
def postTrip(request):
print("PostTrip")
tname = request.POST.get('name', None)
tseason = request.POST.get('season', None)
timage = request.FILES["image"]
tlocation = request.POST.get('location', None)
tyear = request.POST.get('year', None)
tduration = request.POST.get('duration', None)
Trip(name = tname, season = tseason, image = timage, location = tlocation, year = tyear, duration = tduration).save()
return JsonResponse(list(), safe=False)
@csrf_exempt
def postTripFollow(request):
print("Follow Trip")
fid = request.POST.get('fid', None)
tid = request.POST.get('tid', None)
fff = Follower.objects.filter(follower_id = fid).first()
ttt = Trip.objects.filter(trip_id = tid).first()
TripFollower(follower = fff, trip = ttt).save()
return JsonResponse(list(), safe=False)
|
from turtle import *
import turtle
import random
class Ball(Turtle):
def __init__(self,x,y,dx,dy,radius,colour):
Turtle.__init__(self)
self.x=x
self.y=y
self.penup()
self.setposition(x,y) #each time we use this we relocate
self.dx = dx
self.dy = dy
self.radius = radius
self.shape("circle")
self.shapesize(radius/10)
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
self.color(r,g,b)
def move(self,screen_width,screen_height):
current_x=self.x
current_y=self.y
new_x=current_x+self.dx
new_y=current_y+self.dy
right_side_ball=new_x+self.radius
left_side_ball=new_x-self.radius
up_side_ball=new_y+self.radius
down_side_ball=new_y+self.radius
self.x, self.y = new_x,new_y
if right_side_ball >= screen_width:
self.dx-=self.dx
if left_side_ball <= screen_width:
self.dx-=self.dx
if up_side_ball >= screen_height:
self.dy-=self.dy
if down_side_ball <= screen_height:
self.dy-=self.dy
|
def port_channel(ip_address,dev=0):
import time
import ssh
if dev != 0:
print("[[DEV:] Getting port-channel information]")
for retries in range(0,3):
try:
show_port_channel_sum = ssh.connect_silent('show etherchannel summary',"show port-channel summary",ip_address=ip_address,dev=dev)
# parses the output line by line, delimits variables and collects all of them in a list
full_list = []
for line in show_port_channel_sum.split('\n'):
if len([character for character in line if character.isdigit()]) > 0 and "(" in line:
segmented_line = (' '.join(line.split(")")).split())
full_list += segmented_line
# parses trough the full list of items and creates sub lists
# the lists are delimited from the first numeral only item to the next numeral only item - 1
delimiters = []
for number in [item for item in full_list if item.isdigit()]:
delimiters.append(full_list.index(number))
delimiters.append(len(full_list))
delimited_list = [full_list[delimiters[n]:delimiters[n + 1]] for n in range(0, len(delimiters) - 1)]
# removes some unwanted items
for junk in range(0, len(delimited_list)):
del delimited_list[junk][1]
if "show port-channel summary" in show_port_channel_sum:
del delimited_list[junk][1]
# magic
list_of_port_lists = [delimited_list[n][2:len(delimited_list[n])] for n in range(0, len(delimited_list))]
formatted_ports = [
[''.join([character for character in port if character.isdigit() or character == "/"]) for port in port_lists]
for port_lists in list_of_port_lists]
list_of_portchannels = [delimited_list[n][0:2] for n in range(0, len(delimited_list))]
for n in range(0, len(formatted_ports)):
list_of_portchannels[n].append(formatted_ports[n])
#re-format to JSON
list_of_portchannels_json = {}
for n in range(0,len(list_of_portchannels)):
if list_of_portchannels[n][0] not in list_of_portchannels_json:
list_of_portchannels_json[list_of_portchannels[n][0]] = {}
if list_of_portchannels[n][1] == "-":
list_of_portchannels_json[list_of_portchannels[n][0]]["protocol"] = "NONE"
else:
list_of_portchannels_json[list_of_portchannels[n][0]]["protocol"] = list_of_portchannels[n][1]
list_of_portchannels_json[list_of_portchannels[n][0]]["ports"] = list_of_portchannels[n][2]
return list_of_portchannels_json
except ssh.SSHnotEnabled:
print ("[[DEV:] Future: Raise error for different module or pass to Telnet")
break
except Exception:
print ("[[DEV:] General exception triggered in cisco.port_channel")
time.sleep(3)
continue
def hostname(ip_address, dev=0):
import time
import ssh
if dev != 0:
print("[[DEV:] Getting hostname]")
for retries in range(0, 3):
try:
output = ssh.hostname_silent(ip_address=ip_address, dev=dev)
return output
except ssh.SSHnotEnabled:
print ("[[DEV:] Future: Raise error for different module or pass to Telnet")
break
except Exception:
print ("[[DEV:] General exception triggered in cisco.hostname")
time.sleep(3)
continue
def cdp_neighbor(ip_address, dev=0):
import time
import ssh
if dev != 0:
print("[[DEV:] Getting CDP neighbor information]")
for retries in range(0, 3):
try:
show_cdp_ne_de = ssh.connect_silent('show cdp neighbors detail', ip_address=ip_address, dev=dev)
split_cdp = show_cdp_ne_de.split('\n')
network_devices = {}
for line in split_cdp:
if '----------------' in line:
hostname = ''
if 'Device ID:' in line:
(junk, hostname) = line.split('Device ID:')
hostname = hostname.strip()
if '.' in hostname:
hostname = hostname[0:hostname.find('.')]
if not hostname in network_devices:
network_devices[hostname] = {}
if 'IP address:' in line:
(junk, ip) = line.split('IP address:')
ip = ip.strip()
network_devices[hostname]['ip'] = ip
elif 'IPv4 Address: ' in line:
(junk, ip) = line.split('IPv4 Address:')
ip = ip.strip()
network_devices[hostname]['ip'] = ip
if 'Platform:' in line:
(platform, capabilities) = line.split(',')
(junk, model) = platform.split("Platform:")
model = model.strip()
network_devices[hostname]['model'] = model
(junk, capabilities) = capabilities.split("Capabilities: ")
if 'Router' in capabilities:
device_type = 'router'
elif 'Switch' in capabilities:
device_type = 'switch'
elif 'Phone' in capabilities:
device_type = 'phone'
elif 'Trans-Bridge' in capabilities:
device_type = 'wireless access point'
else:
device_type = 'unknown'
network_devices[hostname]['model'] = model
network_devices[hostname]['device_type'] = device_type
if "outgoing port" in line:
(remote, local) = line.split(",")
(junk, local) = local.split(":")
(junk, remote) = remote.split(":")
if "/" in local:
local = (''.join([character for character in local if character.isdigit() or character == "/"]))
if "/" in remote:
remote = (''.join([character for character in remote if character.isdigit() or character == "/"]))
network_devices[hostname][local] = remote
return network_devices
except ssh.SSHnotEnabled:
print("[[DEV:] Future: Raise error for different module or pass to Telnet")
break
except Exception:
print("[[DEV:] General exception triggered in cisco.cdp_neighbor")
time.sleep(3)
continue
|
import xlrd
kml = '<?xml version="1.0" encoding="UTF-8"?>\n \
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">\n \
<Document>'
workbook = xlrd.open_workbook('collections.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
old_species = ''
for row in range(worksheet.nrows):
if row != 0 and worksheet.cell_value(row, 6) == 'Chylismia':
collection_id = worksheet.cell_value(row, 0)
species = worksheet.cell_value(row, 6) + ' ' + worksheet.cell_value(row, 7) + ' ssp. ' + worksheet.cell_value(row, 8)
latitude = worksheet.cell_value(row, 9)
longitude = worksheet.cell_value(row, 10)
if species != old_species:
if old_species != '':
kml = kml + '</Folder>\n'
kml = kml + '<Folder><name>' + species + '</name>\n'
kml = kml + '<Placemark>\n<name>' + str(collection_id) + '</name>\n'
kml = kml + '<description>' + species + '</description>\n'
kml = kml + '<Point><coordinates>-' + str(longitude) + ',' + str(latitude) + ',0</coordinates></Point>\n</Placemark>\n'
old_species = species
kml = kml + '</Folder>\n</Document></kml>'
kml_file = open("chylismia_collections.kml", "w")
kml_file.write(kml)
"""
# iterate over rows
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = -1
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
print 'Row:', curr_row
curr_cell = -1
# iterate through cells in row
while curr_cell < num_cells:
curr_cell += 1
# Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
cell_type = worksheet.cell_type(curr_row, curr_cell)
cell_value = worksheet.cell_value(curr_row, curr_cell)
print ' ', cell_type, ':', cell_value
"""
|
import unittest
from katas.beta.no_duplicates_here import list_de_dup
class ListDeDuplicateTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(list_de_dup(['g', 3, 'a', 'a']), [3, 'a', 'g'])
def test_equals_2(self):
self.assertEqual(list_de_dup([1, 2, 3, 4, 1, 2, 3, 4]), [1, 2, 3, 4])
def test_equals_3(self):
self.assertEqual(list_de_dup([
'code', 'wars', 'ain\'t', None, None, 'code', 'wars', 'ain\'t',
'the', 'same', 'as', 'the', 'rest']),
['ain\'t', 'as', 'code', 'rest', 'same', 'the', 'wars'])
|
import tkinter
import tadretangulo
import time
def movimentar(retangulo, cv, dx, dy, cor):
cv.create_rectangle(retangulo, fill= cor,outline="black")
tadretangulo.move(retangulo, dx, dy)
#~ cv.update()
cv.create_rectangle(retangulo, fill=cor,outline="black")
cv.update()
#~ return None
#
def main():
w = 800
h = 600
dx = 10; da = 25
dy = 10; db = 25
x1, y1, x2, y2 = 10, 10, 100, 100
xa, ya, xb, yb = 500, 100, 700, 300
retangulo1 = tadretangulo.criarVtx(x1, y1, x2, y2)
retangulo2 = tadretangulo.criarVtx(xa, ya, xb, yb)
raiz = tkinter.Tk()
canvas = tkinter.Canvas(raiz, width = w, height = h, cursor="X_cursor", bg="white")
canvas.pack()
while True:
movimentar(retangulo1, canvas, dx, dy, "")
movimentar(retangulo2, canvas, da, db, "")
inte1 = tadretangulo.intersec(retangulo1, retangulo2)
inte2 = tadretangulo.intersec(retangulo2, retangulo1)
if inte1 != None:
canvas.create_rectangle(inte1[0], inte1[3], fill="black", outline="black")
canvas.update()
elif inte2 != None:
canvas.create_rectangle(inte1[0], inte1[3], fill="black", outline="black")
canvas.update()
#
canvas.update()
time.sleep(0.10)
if retangulo2[3][1] >= h - 3:
db *= -1
if retangulo2[3][0] >= w - 3:
da *= -1
if retangulo2[0][1] == 0:
db *= -1
if retangulo2[0][0] == 0:
da *= -1
#
if retangulo1[3][1] >= h - 3:
dy *= -1
if retangulo1[3][0] >= w - 3:
dx *= -1
if retangulo1[0][1] == 0:
dy *= -1
if retangulo1[0][0] == 0:
dx *= -1
#
canvas.delete("all")
#
raiz.mainloop()
return 0
if __name__ == '__main__':
main()
|
import bisect
def partition(array, a, b):
x = array[a]
j = a
for i in range(a + 1, b):
if array[i] <= x:
j += 1
c = array[i]
array[i] = array[j]
array[j] = c
c = array[a]
array[a] = array[j]
array[j] = c
return j
def quick_sort(array, a=0, b=None):
if b is None:
b = len(array)
while a < b:
m = partition(array, a, b)
if (m - 1)-a > b-(m + 1):
quick_sort(array, m, b)
b = m
else:
quick_sort(array, a, m)
a = m + 1
def main():
n, m = map(int, input().split())
intervals = []
for i in range(n):
intervals.append([int(i) for i in input().split()])
l_limits = [i[0] for i in intervals]
r_limits = [i[1] for i in intervals]
quick_sort(l_limits)
quick_sort(r_limits)
points = [int(i) for i in input().split()]
res = []
for point in points:
interv_count = bisect.bisect_right(l_limits, point) - bisect.bisect_left(r_limits, point)
res.append(interv_count)
print(" ".join(str(i) for i in res))
if __name__ == "__main__":
main() |
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
from ..utils import db
class Auth(db.Model):
__tablename__ = 'auth'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(255))
password = db.Column(db.String(255))
is_active = db.Column(db.Boolean, default=True)
create_time = db.Column(db.DateTime, server_default=db.func.now())
modify_time = db.Column(db.DateTime, server_default=db.func.now())
last_time = db.Column(db.DateTime, default=None)
last_ip = db.Column(db.String(255), default=None)
def __init__(self, username, password, is_active=True):
self.username = username
self.password = password
self.is_active = is_active
def __repr__(self):
return '<Auth %s>' % self.username
def to_dict(self):
model_dict = dict(self.__dict__)
del model_dict['_sa_instance_state']
del model_dict['password']
for key in ['create_time', 'modify_time', 'last_time']:
if model_dict[key]:
model_dict[key] = model_dict[key].strftime('%Y-%m-%d %H:%M:%S')
model_dict['is_active'] = '是' if model_dict['is_active'] == 1 else '否'
return model_dict
|
import argparse
import matplotlib.pyplot as plt
import pandas as pd
from PROJECT import *
def show(data):
plt.figure(figsize=(8, 6))
col = [0, 0, 0, 1]
plt.plot(data[:, 1], data[:, 0], 'o', markerfacecolor=tuple(col), markeredgecolor=tuple(col), markersize=4)
# plt.xlim(116.28, 116.33)
# plt.ylim(39.98, 40.02)
plt.title('Original data')
plt.show()
def main():
parser = argparse.ArgumentParser(description='Show original data')
parser.add_argument('-f', '--filename', help='Name of the file', required=True)
args = parser.parse_args()
filename = args.filename
df = pd.read_csv(filename, converters={'date_time': parse_dates})
df = df.drop('date_time', 1)
show(df.values)
if __name__ == '__main__':
main()
|
class Load_synops():
def __init__(self, fname):
self.synops = {}
with open(fname, 'r') as data_file:
for fline in data_file:
line = fline.strip()
if self.line_valid(line):
self.process_line(line)
def line_valid(self, line):
if line[:3] != '333':
return True
return False
def process_line(self, line):
self.synops[line[:12]] = line[-4]
#l = Load_synops('SYNOPs/BA_Koliba_SYNOP_2014-2016.txt') |
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
num_map = {}
for i,n in enumerate(nums):
num_map[n] = i
for i in range(len(nums)):
s = target-nums[i]
if s in num_map and i != num_map[s]:
return([i,num_map[s]]) |
from GameManager import GameManager
import socket
client = socket.socket()
client.connect(('127.0.0.1', 8820))
gameManager = GameManager('x', 'o')
board = GameManager.StartingPosition
print "Connected - Waiting for move"
while( True ):
if (board != GameManager.StartingPosition):
client.send(board)
print "Waiting for move"
board = client.recv(1024)
if (gameManager.IsGameFinished(board)):
gameManager.PrintGameResult(board)
break
board = gameManager.MakeTurn(board)
if (gameManager.IsGameFinished(board)):
gameManager.PrintGameResult(board)
client.send(board)
break
client.close()
|
""" File: fug.py
Author: Abraham Aruguete
Purpose: To create a grid drawing such that it emulates pixels on a screen
or something. Graphics. Woo.
"""
import sys
def terminal():
""" This is a function which takes in the user input, and then checks it
among a list of valid terminal commands. """
#current grid for most of the functions
currentGrid = []
commandList = []
init_Flag = False
for line in sys.stdin:
if line == "":
print("Input error: Empty File")
sys.exit()
if line.lstrip().startswith("#"):
continue
line = line.strip()
try:
if ("init" in line) and (init_Flag == False):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
#TODO: the actual commands within the command list
currentGrid = init(commandList[1], commandList[2])
init_Flag = True
continue
elif ("print_raw" in line) and (init_Flag == True):
print(currentGrid)
continue
elif ("print" in line) and (init_Flag == True):
print_interpreted(currentGrid)
continue
elif ("set" in line) and (init_Flag == True):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
commandList[3] = int(commandList[3])
currentGrid = set_color(commandList[1], \
commandList[2], commandList[3], currentGrid)
continue
elif ("horiz_line" in line) and (init_Flag == True):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
commandList[3] = int(commandList[3])
commandList[4] = int(commandList[4])
commandList[5] = int(commandList[5])
currentGrid = horiz_line(commandList[1], commandList[2], \
commandList[3], commandList[4], commandList[5], currentGrid)
continue
elif ("vert_line" in line) and (init_Flag == True):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
commandList[3] = int(commandList[3])
commandList[4] = int(commandList[4])
commandList[5] = int(commandList[5])
currentGrid = vert_line(commandList[1], commandList[2],\
commandList[3], commandList[4], commandList[5], currentGrid)
continue
elif ("filled_rect" in line) and (init_Flag == True):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
commandList[3] = int(commandList[3])
commandList[4] = int(commandList[4])
commandList[5] = int(commandList[5])
currentGrid = filled_rectangle(commandList[1], commandList[2],\
commandList[3], commandList[4], commandList[5], currentGrid)
continue
elif ("hollow_rect" in line) and (init_Flag == True):
commandList = line.split()
commandList[1] = int(commandList[1])
commandList[2] = int(commandList[2])
commandList[3] = int(commandList[3])
commandList[4] = int(commandList[4])
commandList[5] = int(commandList[5])
currentGrid = hollow_rectangle(commandList[1], commandList[2], \
commandList[3], commandList[4], commandList[5], currentGrid)
continue
elif ("init" in line) and (init_Flag == True):
print("Input error: init already used. Closing...")
sys.exit()
else:
print("Input error: Invalid command.")
continue
except ValueError:
print("Input error: Generic.")
continue
def init(length, height):
"""This is a function which initializes the width times length grid we are workin' with
you know, like fucking hell holy shit a NESTED LOOP!!!!"""
grid = []
for x in range(height):
grid.append([0]*length)
return grid
def print_interpreted(grid_of_stuff):
"""This is a function which also does that thing where like you print the grid we're workin'
with, shoopdawoop etc. Except in reverse order."""
for i in range(len(grid_of_stuff)-1, -1, -1):
for j in range(len(grid_of_stuff[i])):
print(grid_of_stuff[i][j], end="")
print()
#print_raw is implemented in terminal()
def set_color(color, x, y, grid):
""" This is a function which sets the color of a given coordinate on the grid
to a given color value. NOTE THAT the system is in the coordinate system
described in the docs, x is the second list index, y is the first"""
grid[y][x] = color
return grid
def horiz_line(color, x1, x2, y1, y2, grid):
""" This again is a function which sets the color of a given line on the grid
to a given color value. Does some coordinate checks too to make sure everything
is cash money."""
if y1 == y2:
if x1 <= x2:
for xpos in range(x1, x2):
grid[y1][xpos] = color
return grid
else:
print("Input error: Incorrect parameters to horizontal line.")
return grid
else:
print("Input error: Incorrect parameters to horizontal line.")
return grid
def vert_line(color, x1, x2, y1, y2, grid):
"""Does the same thing as the above function, but with a vertical line instead."""
if x1 == x2:
if y1 <= y2:
for ypos in range(y1-1, y2):
grid[ypos][x1] = color
return grid
else:
print("Input error: Incorrect parameters to vertical line.")
return grid
else:
print("Input error: Incorrect parameters to vertical line.")
return grid
def filled_rectangle(color, x1, y1, x2, y2, grid):
""" This draws a rectangle of the specified color on the grid, by doing a box
from the first corner x1, y1 to x2, y2 with the color specified in the
parameters. """
if x1 <= x2:
if y1 <= y2:
for ypos in range(y1-1, y2):
for xpos in range(x1-1, x2):
grid[len(grid) - ypos][xpos] = color
return grid
else:
print("Input error: Incorrect parameters to filled rectangle.")
return grid
else:
print("Input error: Incorrect parameters to filled rectangle.")
return grid
def hollow_rectangle(color, x1, y1, x2, y2, grid):
""" This draws a hollow rectangle of the specified color on the grid, by doing
a hollow box from the first corner x1 to the second color x2 and so on and
so forth. """
if x1 <= x2:
if y1 <= y2:
grid = horiz_line(color, x1, x2, y1, y1, grid)
grid = horiz_line(color, x1, x2, y2, y2, grid)
grid = vert_line(color, x1-1, x1-1, y1+1, y2+1, grid)
grid = vert_line(color, x2-1, x2-1, y1+1, y2+1, grid)
return grid
else:
print("Input error: Incorrect parameters to hollow rectangle.")
return grid
else:
print("Input error: Incorrect parameters to hollow rectangle.")
return grid
def main():
terminal()
main()
|
#!/usr/bin/env python3
import os
import signal
from subprocess import check_output, PIPE, Popen, STDOUT
from addCrontab import add as addCron
from shlex import quote
from livePrintStdout import livePrintStdout
from certbot import run
from generateEnvFile import generate as generateEnvFile
from printSubprocessStdout import printSubprocessStdout
from certCheck import CertCheck
from fillEmptyDirectory import fillEmpty
def relative(subpath='', useCwd=False):
import os
basePath = os.getcwd() if useCwd else os.path.dirname(os.path.abspath(__file__))
return os.path.normpath(os.path.join(basePath, os.path.expanduser(subpath)))
def onSignal(signum, stack):
print('Received signal {}.'.format(signal.Signals(signum).name))
foregroundNginxProcess.terminate()
print('Remember to mount /etc/letsencrypt as a volume on the host. It will receive downloaded certificates from Let\'s Encrypt. {0} is the mount point for NGINX configuration. {1} inside of the directory is the file included by NGINX in its configuration, to be replaced by custom NGINX settings, specific to the image instance ran. The whole {0} directory can be used to store custom configuration as long as {1} is provided inside. Configuring Certbot. /var/log directory will contain logs from certbot renewal script executed using a cron job and it can be mounted on the host also. If any of the mount directories mentioned are mounted on the host and empty at startup, they will be filled with default files and directories. Keep in mind that configuring Docker in more extreme ways may require the default contents of these directories to be adjusted.'.format(
os.environ['NGINX_CONFIG_DIRECTORY'],
os.environ['NGINX_CONFIG_FILENAME']
))
for mountPath in ['/etc/letsencrypt', os.environ['NGINX_CONFIG_DIRECTORY'], '/var/log']:
print('Filling {} with default contents if empty.'.format(mountPath))
defaultFilesUsed = fillEmpty(
mountPath,
os.path.join('/blueprints/', os.path.basename(os.path.normpath(mountPath)))
)
if defaultFilesUsed:
print('{} was empty and therefore filled with default contents.'.format(mountPath))
else:
print('{} was not empty. Its contents will be used instead of default contents.'.format(mountPath))
useCertbot = os.environ.get('CERTBOT_ENABLED')
print('Saving env variables in file to be loaded by cron jobs.')
generateEnvFile()
if not useCertbot == 'true':
print('CERTBOT_ENABLED is not set to "true". Certbot will not auto renew SSL certificates for provided domains while this container is running. Change CERTBOT_ENABLED to "true" to have Certbot periodically check the age of the certificate and update it automatically. However, NGINX will be launched and new certificates placed inside {} will cause the server to be automatically reloaded (may happen after a delay) to ensure they are used.'.format('/etc/letsencrypt'))
print('Updating timestamp tracking the age of certificates.')
certCheck = CertCheck()
certCheck.updateTimestamp()
print('Adding cron job checking if there are new certificates.')
addCron(
'{} {} 2>&1 | logger -t cron-cert-check'
.format(
os.environ['CERTIFICATES_CHECK_FREQUENCY'],
quote(relative('startCronEnsureNewestCert'))
)
)
else:
email = os.environ.get('CERTBOT_EMAIL')
if not email:
print('CERTBOT_EMAIL is required when using Certbot for SSL.')
exit(1)
domains = os.environ.get('CERTBOT_DOMAINS')
if not domains:
print('CERTBOT_DOMAINS is required when using Certbot for SSL.')
exit(1)
domainsArray = domains.split(' ')
for domain in domainsArray:
if not domain:
print('CERTBOT_DOMAINS provided but no domain strings found.')
exit(1)
print('Found domains for Certbot: {0}.'.format(domainsArray))
challengeDirectory = os.environ['CERTBOT_CHALLENGE_DIRECTORY']
if os.environ.get('CERTBOT_STAGING') == 'false':
useTestCerts = False
else:
useTestCerts = True
print('Running NGINX in the background for initial Certbot challenge.')
printSubprocessStdout(check_output([relative('server/backgroundServerStart'), '--no-certs-yet']))
run(
email=email,
challengeDirectory=challengeDirectory,
domains=domainsArray,
test=useTestCerts,
reloadNginx=False
)
print('Stopping NGINX that was used for ACME challenge.')
livePrintStdout(Popen(['/usr/sbin/nginx', '-s', 'stop'], stdout=PIPE, stderr=STDOUT))
print('NGINX stopped. Making Certbot check and auto renew certificate periodically and reload NGINX configuration after every certificate renewal.')
addCron(
'{} {} 2>&1 | logger -t cron-renew'
.format(
os.environ['CERTIFICATES_CHECK_FREQUENCY'],
quote(relative('startCronRenew'))
)
)
print('Cron job for renewing certificates added.')
print('Starting NGINX in the foreground.')
foregroundNginxProcess = Popen(relative('server/foregroundServerStart'), stdout=PIPE, stderr=STDOUT)
print('NGINX started. Watching for signals to shutdown gracefully.')
signal.signal(signal.SIGTERM, onSignal)
signal.signal(signal.SIGINT, onSignal)
livePrintStdout(foregroundNginxProcess)
|
import matplotlib.colors
import numpy as np
import xarray as xr
from cartopy import crs as ccrs
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from confusion_matrix import plot_confusion_matrix
from crop import crop_center, crop_2d
def plot_results(x, y_true, y_pred, name, in_size, date, bw=False, binary=False):
proj = ccrs.LambertConformal(
central_latitude=50,
central_longitude=-107,
false_easting=5632642.22547,
false_northing=4612545.65137,
standard_parallels=(50, 50),
cutoff=-30
)
f = plt.figure(figsize=(16, 8))
f.suptitle("Fronts at {}".format(date), fontsize=16)
ax = plt.subplot(1, 2, 1, projection=proj)
ax.set_title("Prediction")
plot_fronts(x, y_pred, proj, ax, in_size, bw, binary)
ax = plt.subplot(1, 2, 2, projection=proj)
ax.set_title("Ground truth")
plot_fronts(x, y_true, proj, ax, in_size, bw, binary)
plt.savefig(name)
plt.close(f)
def plot_fronts(x, y, proj, ax, in_size, bw=False, binary=False):
with xr.open_dataset("/mnt/ldm_vol_DESKTOP-DSIGH25-Dg0_Volume1/DiplomData2/NARR/air.2m.nc") as example:
lat = crop_center(crop_2d(example.lat.values), in_size)
lon = crop_center(crop_2d(example.lon.values), in_size)
lon = (lon + 220) % 360 - 180 # Shift due to problems with crossing dateline in cartopy
shift = ccrs.PlateCarree(central_longitude=-40)
ax.set_xmargin(0.1)
ax.set_ymargin(0.1)
ax.set_extent((2.0e+6, 1.039e+07, 6.0e+5, 8959788), crs=proj)
if x.ndim == 3:
plt.contour(lon, lat, x[..., 1], levels=20, transform=shift, colors='black', linewidths=0.5)
if bw:
if binary:
cmap = matplotlib.colors.ListedColormap([(0, 0, 0, 0), 'black'])
plt.pcolormesh(lon, lat, y, cmap=cmap, zorder=10, transform=shift)
else:
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 1), hatch="||||", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 2), hatch="----", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 3), hatch="oooo", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 4), hatch="++++", alpha=0., transform=shift, zorder=100)
hot = mpatches.Patch(facecolor='white', label='Тёплый фронт', hatch="||||", alpha=1)
cold = mpatches.Patch(facecolor='white', label='Холодный фронт', hatch="----", alpha=1)
stat = mpatches.Patch(facecolor='white', label='Стационарный фронт', hatch="oooo", alpha=1)
occl = mpatches.Patch(facecolor='white', label='Фронт окклюзии', hatch="++++", alpha=1)
ax.legend(handles=[hot, cold, stat, occl], loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2,
prop={'size': 12})
else:
if x.ndim == 3:
plt.contourf(lon, lat, x[..., 0], levels=20, transform=shift)
else:
plt.contourf(lon, lat, x, levels=20, transform=shift)
if binary:
cmap = matplotlib.colors.ListedColormap([(0, 0, 0, 0), 'black'])
plt.pcolormesh(lon, lat, y, cmap=cmap, zorder=10, transform=shift)
else:
cmap = matplotlib.colors.ListedColormap([(0, 0, 0, 0), 'red', 'blue', 'green', 'purple'])
plt.pcolormesh(lon, lat, y, cmap=cmap, zorder=10, transform=shift)
hot = mpatches.Patch(facecolor='red', label='Тёплый фронт', alpha=1)
cold = mpatches.Patch(facecolor='blue', label='Холодный фронт', alpha=1)
stat = mpatches.Patch(facecolor='green', label='Стационарный фронт', alpha=1)
occl = mpatches.Patch(facecolor='purple', label='Фронт окклюзии', alpha=1)
ax.legend(handles=[hot, cold, stat, occl], loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2,
prop={'size': 12})
ax.coastlines()
ax.gridlines(draw_labels=True)
# hot = mpatches.Patch(facecolor='red', label='Тёплый фронт', alpha=1)
# cold = mpatches.Patch(facecolor='blue', label='Холодный фронт', alpha=1)
# stat = mpatches.Patch(facecolor='green', label='Стационарный фронт', alpha=1)
# occl = mpatches.Patch(facecolor='purple', label='Фронт окклюзии', alpha=1)
# ax.legend(handles=[hot, cold, stat, occl], loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2,
# prop={'size': 12})
def plot_fronts_far_east(x, y, name, onehot, in_size, date, bw=False):
proj = ccrs.LambertConformal(
central_latitude=50,
central_longitude=130,
false_easting=5632642.22547,
false_northing=4612545.65137,
standard_parallels=(50, 50),
cutoff=-30
)
f = plt.figure(figsize=(8, 8))
f.suptitle("Fronts at {}".format(date), fontsize=16)
ax = plt.subplot(1, 1, 1, projection=proj)
y = np.argmax(y, axis=-1) if onehot else y
with xr.open_dataset("/mnt/ldm_vol_DESKTOP-DSIGH25-Dg0_Volume1/DiplomData2/NARR/air.2m.nc") as example:
lat = crop_center(crop_2d(example.lat.values), in_size)
lon = crop_center(crop_2d(example.lon.values), in_size) # Steal lat/lon from NARR
lon = ((lon + 220) % 360 - 180 + 237) % 360 # Shift due to problems with crossing dateline in cartopy
shift = ccrs.PlateCarree(central_longitude=-40)
ax.set_xmargin(0.1)
ax.set_ymargin(0.1)
ax.set_extent((2.0e+6, 1.039e+07, 6.0e+5, 8959788), crs=proj)
plt.contour(lon, lat, x[..., 1], levels=20, transform=shift, colors='black', linewidths=0.5)
if bw:
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 1), hatch="||||", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 2), hatch="----", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 3), hatch="oooo", alpha=0., transform=shift, zorder=100)
plt.pcolor(lon, lat, np.ma.masked_not_equal(y, 4), hatch="++++", alpha=0., transform=shift, zorder=100)
hot = mpatches.Patch(facecolor='white', label='Тёплый фронт', hatch="||||", alpha=1)
cold = mpatches.Patch(facecolor='white', label='Холодный фронт', hatch="----", alpha=1)
stat = mpatches.Patch(facecolor='white', label='Стационарный фронт', hatch="oooo", alpha=1)
occl = mpatches.Patch(facecolor='white', label='Фронт окклюзии', hatch="++++", alpha=1)
ax.legend(handles=[hot, cold, stat, occl], loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2,
prop={'size': 12})
else:
plt.contourf(lon, lat, x[..., 0], levels=20, transform=shift)
cmap = matplotlib.colors.ListedColormap([(0, 0, 0, 0), 'red', 'blue', 'green', 'purple'])
plt.pcolormesh(lon, lat, y, cmap=cmap, zorder=10, transform=shift)
hot = mpatches.Patch(facecolor='red', label='Тёплый фронт', alpha=1)
cold = mpatches.Patch(facecolor='blue', label='Холодный фронт', alpha=1)
stat = mpatches.Patch(facecolor='green', label='Стационарный фронт', alpha=1)
occl = mpatches.Patch(facecolor='purple', label='Фронт окклюзии', alpha=1)
ax.legend(handles=[hot, cold, stat, occl], loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2,
prop={'size': 12})
ax.coastlines()
ax.gridlines(draw_labels=True)
plt.savefig(name)
plt.close(f)
def plot_conf_matrix(y_true, y_pred, filename, binary=False, normalize=True, title=None, cmap='Greys'):
if binary:
plot_confusion_matrix(y_true, y_pred, ["Нет фронта", "Фронт"], normalize=normalize, title=title, cmap=cmap)
else:
plot_confusion_matrix(y_true, y_pred, ["Нет фронта", "Тёплый", "Холодный", "Стационарный", "Окклюзии"],
normalize=normalize, title=title, cmap=cmap)
plt.savefig(filename)
plt.close()
def plot_sample(dataset, model, prefix, in_size, binary=False):
x, y_true = dataset[0]
dates = dataset.get_dates(0)
y_pred = model.predict(x)
if binary:
y_true = y_true[..., 0]
for i in range(x.shape[0]):
plot_results(x[i], y_true[i], y_pred[i], "{}/{}".format(prefix, i), in_size, dates[i])
def plot_filtered(dataset, model, in_size, prefix, filter_func, binary=False):
for m, i in zip(dataset, range(len(dataset))):
x, y = m
r = model.evaluate(x, y, verbose=0)
d = dataset.get_dates(i)[0]
if filter_func(r[1]):
pred = model.predict(x)
if binary:
y[0] = y[0, ..., 0]
plot_results(x[0], y[0], pred[0], "{2}/{0}_{1:.2f}.png".format(i, r[1], prefix), in_size, d)
def plot_metrics_histogram(dataset, model, prefix):
d = [[] for _ in model.keras_model.metrics_names]
for (x, y) in dataset:
r = model.evaluate(x, y, verbose=0)
for i, j in zip(d, r):
i.append(j)
for n, i in zip(model.metrics_names, d):
plt.hist(i, bins=100)
plt.title(n)
plt.savefig("{}/{}".format(prefix, n))
plt.close()
|
import requests
f = true
while (f):
login = input("Введите логин: ")
message = input("Введите текст сообщения: ")
req = requests.get("http://192.168.1.178:4567/message?login=" + login + "&text=" + message)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 19:04:32 2018
@author: Rafael Rocha
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import keras
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
from keras.losses import categorical_crossentropy
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input
from keras.models import Model
from keras import backend as K
np.random.seed(42)
data_set_name = 'train_test_splits_2.npz'
#data_set_name = 'dataset_pad_128x256_aug.npz'
data = np.load(data_set_name)
x = data['x']
y = data['y']
x_test = data['x_test']
y_test = data['y_test']
samples = data['samples']
x_test = x_test.reshape(x_test.shape[0], 128, 256, 1)
y_test = keras.utils.to_categorical(y_test, 3)
if data_set_name is 'dataset_pad_28x32.npz':
conv0_ks = (3, 3)
else:
conv0_ks = (3, 3)
result = []
batch_sizes = range(2)
for i in batch_sizes:
print('\n'+'Batch size: '+str(i)+'\n')
K.clear_session()
x_train = x[samples[i]]
y_train = y[samples[i]]
x_train = x_train.reshape(x_train.shape[0], 128, 256, 1)
y_train = keras.utils.to_categorical(y_train, 3)
input_shape = (np.size(x_train, 1), np.size(x_train, 2), 1)
inputs = Input(input_shape)
# My net
conv0 = Conv2D(32, kernel_size=(11, 11), strides=5, activation='relu',
input_shape=input_shape)(inputs)
conv1 = Conv2D(64, (3,3), activation='relu')(conv0)
pool0 = MaxPooling2D(pool_size=(2, 2))(conv1)
# pool0 = Dropout(0.25)(pool0)
flatt0 = Flatten()(pool0)
dense0 = Dense(128, activation='relu')(flatt0)
# dense0 = Dropout(0.25)(dense0)
outputs = Dense(3, activation='softmax')(dense0) # x
# # Lenet5
# conv0 = Conv2D(20, kernel_size=(11, 11), strides=(5, 5), padding='same',
# activation='tanh', input_shape=input_shape)(inputs)
## conv0 = BatchNormalization()(conv0)
# pool0 = MaxPooling2D(pool_size=(2, 2))(conv0)
# conv1 = Conv2D(50, kernel_size=5, strides=5, padding='same',
# activation='tanh')(pool0)
## conv1 = BatchNormalization()(conv1)
# pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# flatt0 = Flatten()(pool1)
# dense0 = Dense(500, activation='tanh')(flatt0)
## dense0 = BatchNormalization()(dense0)
# outputs = Dense(3, activation='softmax')(dense0)
#
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=.3),
metrics=['accuracy'])
# h = model.fit(x_train,
# y_train,
# batch_size=batch_size,
# epochs=epochs,
# verbose=1)
# result.append(h.history)
start_time = time.time()
h = model.fit(x_train,
y_train,
batch_size=5,
epochs=50,
verbose=1)
training_time = time.time() - start_time
score = model.evaluate(x_train, y_train, verbose=0)
# del x_train
print("\n--- Training time: %s seconds ---" % training_time)
print('Traning loss:', score[0])
print('Training accuracy:', score[1])
result.append(h.history)
start_time = time.time()
score = model.evaluate(x_test,
y_test,
verbose=0)
test_time = time.time() - start_time
print("\n--- Test time: %s seconds ---" % test_time)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_pred = model.predict(x_test)
#y_pred = model.predict_classes(x_test)
model_name = 'model_' + str(i)
model.save(model_name)
# del x_test
target_names = ['Absent', 'Undamaged', 'Damaged']
print('\n')
cr = classification_report(np.argmax(y_test, axis=1),
np.argmax(y_pred, axis=1),
target_names=target_names,
digits=4)
print(cr)
print('\nConfusion matrix:\n')
cm = confusion_matrix(np.argmax(y_test, axis=1),
np.argmax(y_pred, axis=1))
print(cm)
str_acc = "%.2f" % (100*score[1])
x = np.arange(1, np.size(result[0]['acc'])+1)
plt.figure()
plt.plot(x, result[0]['loss'])
plt.plot(x, result[1]['loss'])
plt.plot(x, result[2]['loss'])
plt.plot(x, result[3]['loss'])
plt.plot(x, result[4]['loss'])
plt.plot(x, result[5]['loss'])
plt.plot(x, result[6]['loss'])
plt.plot(x, result[7]['loss'])
plt.plot(x, result[8]['loss'])
plt.plot(x, result[9]['loss'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(x,
loc='best')
#plt.savefig('teste1')
plt.figure()
plt.plot(x, result[0]['acc'])
plt.plot(x, result[1]['acc'])
plt.plot(x, result[2]['acc'])
plt.plot(x, result[3]['acc'])
plt.plot(x, result[4]['acc'])
plt.plot(x, result[5]['acc'])
plt.plot(x, result[6]['acc'])
plt.plot(x, result[7]['acc'])
plt.plot(x, result[8]['acc'])
plt.plot(x, result[9]['acc'])
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(x,
loc='best')
#plt.savefig('teste2')
#np.savez(name_file, result=result) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wherecoffee', '0003_auto_20150618_1708'),
]
operations = [
migrations.AddField(
model_name='coffee',
name='img0',
field=models.ImageField(upload_to=b'wherecoffee/coffee/', verbose_name='\u56fe\u72471', blank=True),
),
migrations.AddField(
model_name='coffee',
name='img1',
field=models.ImageField(upload_to=b'wherecoffee/coffee/', verbose_name='\u56fe\u72472', blank=True),
),
migrations.AddField(
model_name='coffee',
name='img2',
field=models.ImageField(upload_to=b'wherecoffee/coffee/', verbose_name='\u56fe\u72473', blank=True),
),
migrations.AddField(
model_name='coffee',
name='price_new',
field=models.FloatField(default=0.0, verbose_name='\u73b0\u4ef7', blank=True),
),
migrations.AddField(
model_name='coffee',
name='price_old',
field=models.FloatField(default=0.0, verbose_name='\u539f\u4ef7', blank=True),
),
]
|
# Generated by Django 3.1.5 on 2021-01-26 11:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0004_task_error'),
]
operations = [
migrations.AlterField(
model_name='task',
name='status',
field=models.IntegerField(choices=[(1, 'Error'), (5, 'In process'), (10, 'Completed')], default=5, verbose_name='Status'),
),
]
|
db_file = './db/moleskine.db' |
from vector import Vector3D
import struct
import sys as sys
sys.setcheckinterval(10000)
# Globals vars
canvas = None
scene = None
lightsgrid = None
origin = None
invorigin = None
guidelines = []
# Inital scene bounds
max_x = -1
max_y = -1
max_z = -1
min_x = float('inf')
min_y = float('inf')
min_z = float('inf')
# Global constants
CANVAS_WIDTH = 280
CANVAS_HEIGHT = 280
DRAW_GUIDE_LINES = True
CAMERA_Z = 1000
LIGHTS_GRID_BG_COLOR = 'lightgray'
LIGHTS_GRID_X_MULT = 5.
LIGHTS_GRID_Y_MULT = 5.
LIGHTS_GRID_NODE_SIZE = 10
LIGHT_INTENSITY_MULTIPLIER = 1.5 / 255
INITIAL_Z_OFFSET = 600
WIREFRAME_COLORS = ('#%02x%02x%02x' % (33, 33, 33), '')
VIEW_ANGLE = Vector3D(0, 0, -0.5)
BASE_IMAGE = [struct.pack("BBB", 60, 60, 60)] * CANVAS_WIDTH * CANVAS_HEIGHT
HIRES_IMAGE = [struct.pack("BBB", 60, 60, 60)] * CANVAS_WIDTH * CANVAS_HEIGHT * 16
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-10 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0017_election_election_title")]
operations = [
migrations.AddField(
model_name="election",
name="group_type",
field=models.CharField(blank=True, max_length=100),
)
]
|
def house_numbers_sum(inp):
return sum(inp[:inp.index(0)])
'''
Task
A boy is walking a long way from school to his home. To make the walk more fun
he decides to add up all the numbers of the houses that he passes by during his
walk. Unfortunately, not all of the houses have numbers written on them, and on
top of that the boy is regularly taking turns to change streets, so the numbers
don't appear to him in any particular order.
At some point during the walk the boy encounters a house with number 0 written on it,
which surprises him so much that he stops adding numbers to his total right after
seeing that house.
For the given sequence of houses determine the sum that the boy will get. It is
guaranteed that there will always be at least one 0 house on the path.
Example
For inputArray = [5, 1, 2, 3, 0, 1, 5, 0, 2], the output should be 11.
The answer was obtained as 5 + 1 + 2 + 3 = 11.
Input/Output
[input] integer array inputArray
Constraints: 5 ≤ inputArray.length ≤ 50, 0 ≤ inputArray[i] ≤ 10.
[output] an integer
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.