hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65d1935d60c24b3adbd4cfdad6ba81b04f4d1399
| 9,501
|
py
|
Python
|
pyspi/SPILike.py
|
grburgess/pyspi
|
084884c3fd06a09ef3a850cd19e7b751d7929e59
|
[
"BSD-3-Clause"
] | null | null | null |
pyspi/SPILike.py
|
grburgess/pyspi
|
084884c3fd06a09ef3a850cd19e7b751d7929e59
|
[
"BSD-3-Clause"
] | null | null | null |
pyspi/SPILike.py
|
grburgess/pyspi
|
084884c3fd06a09ef3a850cd19e7b751d7929e59
|
[
"BSD-3-Clause"
] | 3
|
2018-06-05T12:57:29.000Z
|
2019-01-09T17:12:31.000Z
|
import collections
from typing import Optional
import numpy as np
from astromodels import Parameter, Model
from astromodels.functions.priors import Cosine_Prior, Uniform_prior
from threeML import PluginPrototype
from threeML.io.file_utils import sanitize_filename
from threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike
from threeML.plugins.SpectrumLike import SpectrumLike
from threeML.io.logging import setup_logger
from pyspi.utils.response.spi_drm import SPIDRM
log = setup_logger(__name__)
class SPILike(DispersionSpectrumLike):
"""
Plugin for the data of SPI, based on PySPI
"""
def __init__(
self,
name: str,
observation,
background,
bkg_base_array,
free_position: bool,
verbose: bool = True,
**kwargs
):
"""
Init the plugin for a constant source analysis with PySPI
:param name: Name of plugin
:param observation: observed spectrum
:param background: background spectrum
:param bkg_base_array: Base array for background model
:param free_position: Free the position in the fit?
:param verbose: Verbose?
:returns: Object
"""
self._free_position: bool = free_position
if not isinstance(
observation.response, SPIDRM
):
log.error("The response associated with the observation"
" is not a SPIDRM")
raise AssertionError()
super(SPILike, self).__init__(name,
observation,
background,
verbose,
**kwargs)
self._bkg_base_array = bkg_base_array
self._bkg_array = np.ones(len(self._bkg_base_array))
def set_model(self, likelihood_model: Model) -> None:
"""
Set the model to be used in the joint minimization.
:param likelihood_model: likelihood model instance
:returns:
"""
super(SPILike, self).set_model(likelihood_model)
if self._free_position:
log.info(f"Freeing the position of {self.name} and setting priors")
for key in self._like_model.point_sources.keys():
self._like_model.point_sources[key].position.ra.free = True
self._like_model.point_sources[key].position.dec.free = True
self._like_model.point_sources[key].position.ra.prior = \
Uniform_prior(lower_bound=0.0, upper_bound=360)
self._like_model.point_sources[key].position.dec.prior = \
Cosine_Prior(lower_bound=-90.0, upper_bound=90)
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
else:
for key in self._like_model.point_sources.keys():
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
self._response.set_location(ra, dec)
def _evaluate_model(self, precalc_fluxes=None):
"""
Evaluate the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
source = super(SPILike, self)._evaluate_model(precalc_fluxes=
precalc_fluxes)
self._update_bkg_array()
bkg = self._bkg_array*self._bkg_base_array
return source+bkg
def get_model(self, precalc_fluxes: Optional[np.ndarray] = None) -> np.ndarray:
"""
Get the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
if self._free_position:
# assumes that the is only one point source which is how
# it should be!
ra, dec = self._like_model.get_point_source_position(0)
self._response.set_location(ra, dec)
return super(SPILike, self).get_model(precalc_fluxes=precalc_fluxes)
def _add_bkg_nuisance_parameter(self, bkg_parameters) -> None:
"""
Add the bkg parameter. Are saved as array.
:param bkg_parameters:
:returns:
"""
self._bkg_parameters = bkg_parameters
for parameter in bkg_parameters:
self.nuisance_parameters[parameter.name] = parameter
self._bkg_array = np.ones(len(bkg_parameters))
def _update_bkg_array(self) -> None:
"""
Update the array with the background parameter
:returns:
"""
for key in self._like_model.parameters.keys():
if "bkg" in key:
idx = int(key.split("_")[-1])
self._bkg_array[idx] = self._like_model.parameters[key].value
def set_free_position(self, flag):
"""
Set the free position flag
:param flag: True or False
:returns:
"""
self._free_position = flag
@classmethod
def from_spectrumlike(
cls,
spectrum_like,
bkg_base_array,
free_position=False
):
"""
Generate SPILikeGRB from an existing SpectrumLike child
:param spectrum_like: SpectrumLike child
:param rsp_object: Response object
:free_position: Free the position? boolean
:returns: Initialized Object
"""
return cls(
spectrum_like.name,
spectrum_like._observed_spectrum,
spectrum_like._background_spectrum,
bkg_base_array,
free_position,
spectrum_like._verbose,
)
class SPILikeGRB(DispersionSpectrumLike):
"""
Plugin for the data of SPI, based on PySPI
"""
def __init__(
self,
name,
observation,
background=None,
free_position=False,
verbose=True,
**kwargs
):
"""
Init the plugin for a GRB analysis with PySPI
:param name: Name of plugin
:param observation: observed spectrum
:param background: background spectrum
:param free_position: Free the position in the fit?
:param verbose: Verbose?
"""
self._free_position = free_position
assert isinstance(
observation.response, SPIDRM
), "The response associated with the observation is not a SPIDRM"
super(SPILikeGRB, self).__init__(name,
observation,
background,
verbose,
**kwargs)
def set_model(self, likelihood_model):
"""
Set the model to be used in the joint minimization.
:param likelihood_model: likelihood model instance
:returns:
"""
super(SPILikeGRB, self).set_model(likelihood_model)
if self._free_position:
print("Freeing the position of %s and setting priors" % self.name)
for key in self._like_model.point_sources.keys():
self._like_model.point_sources[key].position.ra.free = True
self._like_model.point_sources[key].position.dec.free = True
self._like_model.point_sources[key].position.ra.prior = \
Uniform_prior(lower_bound=0.0, upper_bound=360)
self._like_model.point_sources[key].position.dec.prior = \
Cosine_Prior(lower_bound=-90.0, upper_bound=90)
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
else:
for key in self._like_model.point_sources.keys():
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
self._response.set_location(ra, dec)
def get_model(self, precalc_fluxes=None):
"""
Get the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
if self._free_position:
# assumes that the is only one point source which is how
# it should be!
ra, dec = self._like_model.get_point_source_position(0)
self._response.set_location(ra, dec)
return super(SPILikeGRB, self).get_model(precalc_fluxes=precalc_fluxes)
def set_free_position(self, flag):
"""
Set the free position flag
:param flag: True or False
:returns:
"""
self._free_position = flag
@classmethod
def from_spectrumlike(
cls, spectrum_like, free_position=False
):
"""
Generate SPILikeGRB from an existing SpectrumLike child
:param spectrum_like: SpectrumLike child
:param rsp_object: Response object
:free_position: Free the position? boolean
:returns: Initialized Object
"""
return cls(
spectrum_like.name,
spectrum_like._observed_spectrum,
spectrum_like._background_spectrum,
free_position,
spectrum_like._verbose,
)
| 30.549839
| 83
| 0.592674
| 8,978
| 0.944953
| 0
| 0
| 1,238
| 0.130302
| 0
| 0
| 2,828
| 0.297653
|
65d2ce3a02d1d452763725cfe14c593fd3bb7cbb
| 1,881
|
py
|
Python
|
dianna/visualization/image.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/visualization/image.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/visualization/image.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
def _determine_vmax(max_data_value):
vmax = 1
if max_data_value > 255:
vmax = None
elif max_data_value > 1:
vmax = 255
return vmax
def plot_image(heatmap, original_data=None, heatmap_cmap=None, data_cmap=None, show_plot=True, output_filename=None): # pylint: disable=too-many-arguments
"""
Plots a heatmap image.
Optionally, the heatmap (typically a saliency map of an explainer) can be
plotted on top of the original data. In that case both images are plotted
transparantly with alpha = 0.5.
Args:
heatmap: the saliency map or other heatmap to be plotted.
original_data: the data to plot together with the heatmap, both with
alpha = 0.5 (optional).
heatmap_cmap: color map for the heatmap plot (see mpl.Axes.imshow
documentation for options).
data_cmap: color map for the (optional) data image (see mpl.Axes.imshow
documentation for options). By default, if the image is two
dimensional, the color map is set to 'gray'.
show_plot: Shows plot if true (for testing or writing plots to disk
instead).
output_filename: Name of the file to save the plot to (optional).
Returns:
None
"""
# default cmap depends on shape: grayscale or colour
_, ax = plt.subplots()
alpha = 1
if original_data is not None:
if len(original_data.shape) == 2 and data_cmap is None:
# 2D array, grayscale
data_cmap = 'gray'
ax.imshow(original_data, cmap=data_cmap, vmin=0, vmax=_determine_vmax(original_data.max()))
alpha = .5
ax.imshow(heatmap, cmap=heatmap_cmap, alpha=alpha)
if show_plot:
plt.show()
if output_filename:
plt.savefig(output_filename)
| 34.833333
| 155
| 0.642212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,100
| 0.584795
|
65d4480d133aa88d0ea4335e0f5e5f6c3cb17894
| 4,680
|
py
|
Python
|
models/lenet.py
|
davidstutz/random-bit-error-robustness
|
59d8533c8db87ba1b220a64032cf929e5d67fbfa
|
[
"Unlicense"
] | null | null | null |
models/lenet.py
|
davidstutz/random-bit-error-robustness
|
59d8533c8db87ba1b220a64032cf929e5d67fbfa
|
[
"Unlicense"
] | null | null | null |
models/lenet.py
|
davidstutz/random-bit-error-robustness
|
59d8533c8db87ba1b220a64032cf929e5d67fbfa
|
[
"Unlicense"
] | null | null | null |
import torch
import common.torch
from .classifier import Classifier
from .utils import get_normalization2d, get_activation
class LeNet(Classifier):
"""
LeNet classifier.
"""
def __init__(self, N_class, resolution=(1, 32, 32), channels=64, activation='relu', normalization='bn', linear=1024, dropout=False, **kwargs):
"""
Initialize classifier.
:param N_class: number of classes to classify
:type N_class: int
:param resolution: resolution (assumed to be square)
:type resolution: int
:param channels: channels to start with
:type channels: int
:param units: units per layer
:type units: [int]
:param activation: activation function
:type activation: str
:param normalization: normalization to use
:type normalization: None or torch.nn.Module
:param bias: whether to use bias
:type bias: bool
"""
super(LeNet, self).__init__(N_class, resolution, **kwargs)
# the constructor parameters must be available as attributes for state to work
self.channels = channels
""" (int) Channels. """
self.activation = activation
""" (str) Activation. """
self.linear = linear
""" (int) Additional linear layer. """
self.dropout = dropout
""" (str) Dropout. """
activation_layer = get_activation(self.activation)
assert activation_layer is not None
self.normalization = normalization
""" (bool) Normalization. """
layer = 0
layers = []
resolutions = []
while True:
input_channels = self.resolution[0] if layer == 0 else layers[layer - 1]
output_channels = self.channels if layer == 0 else layers[layer - 1] * 2
conv = torch.nn.Conv2d(input_channels, output_channels, kernel_size=5, stride=1, padding=2, bias=self.include_bias)
#torch.nn.init.normal_(conv.weight, mean=0, std=0.1)
common.torch.kaiming_normal_(conv.weight, nonlinearity=activation, scale=self.init_scale)
if self.include_bias:
torch.nn.init.constant_(conv.bias, 0)
self.append_layer('conv%d' % layer, conv)
self.append_layer('%s%d' % (self.normalization, layer), get_normalization2d(self.normalization, output_channels))
if self.activation:
relu = activation_layer()
self.append_layer('act%d' % layer, relu)
pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.append_layer('pool%d' % layer, pool)
layers.append(output_channels)
resolutions.append([
self.resolution[1] // 2 if layer == 0 else resolutions[layer - 1][0] // 2,
self.resolution[2] // 2 if layer == 0 else resolutions[layer - 1][1] // 2,
])
if resolutions[-1][0] // 2 < 3 or resolutions[-1][0] % 2 == 1 or resolutions[-1][1] // 2 < 3 or resolutions[-1][1] % 2 == 1:
break
layer += 1
representation = int(resolutions[-1][0] * resolutions[-1][1] * layers[-1])
assert representation > 0
view = common.torch.ViewOrReshape(-1, representation)
self.append_layer('view', view)
if self.linear > 0:
fc = torch.nn.Linear(representation, self.linear, bias=self.include_bias)
common.torch.kaiming_normal_(fc.weight, nonlinearity=activation, scale=self.init_scale)
if self.include_bias:
torch.nn.init.constant_(fc.bias, 0)
self.append_layer('fc%d' % layer, fc)
if self.activation:
relu = activation_layer()
self.append_layer('act%d' % layer, relu)
if self.dropout:
drop = torch.nn.Dropout(p=0.5)
self.append_layer('drop', drop)
logits = torch.nn.Linear(self.linear if self.linear > 0 else representation, self._N_output, bias=self.include_bias)
common.torch.kaiming_normal_(logits.weight, nonlinearity=activation, scale=self.init_scale)
if self.include_bias:
torch.nn.init.constant_(logits.bias, 0)
self.append_layer('logits', logits)
def __str__(self):
"""
Print network.
"""
string = super(LeNet, self).__str__()
string += '(channels: %d)\n' % self.channels
string += '(activation: %s)\n' % self.activation
string += '(normalization: %s)\n' % self.normalization
string += '(linear: %d)\n' % self.linear
string += '(dropout: %s)\n' % self.dropout
return string
| 37.142857
| 146
| 0.59594
| 4,555
| 0.973291
| 0
| 0
| 0
| 0
| 0
| 0
| 1,111
| 0.237393
|
65d4761a181f8a12d33c2a0e4fbbb20be034782f
| 309
|
py
|
Python
|
project/server/main/modules/__init__.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | 1
|
2019-01-15T10:33:04.000Z
|
2019-01-15T10:33:04.000Z
|
project/server/main/modules/__init__.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | null | null | null |
project/server/main/modules/__init__.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | null | null | null |
""" All Available Module on Server Belong to Here """
AVAILABLE_MODULES = (
"api",
)
def init_app(app, **kwargs):
from importlib import import_module
for module in AVAILABLE_MODULES:
import_module(
f".{module}",
package=__name__
).init_app(app, **kwargs)
| 23.769231
| 53
| 0.614887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.226537
|
65d585b3c927a0a65da4783e776cd19589017f27
| 2,154
|
py
|
Python
|
slixmpp/plugins/xep_0380/eme.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0380/eme.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0380/eme.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2016 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
import slixmpp
from slixmpp.stanza import Message
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.xmlstream import register_stanza_plugin, ElementBase, ET
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0380 import stanza, Encryption
log = logging.getLogger(__name__)
class XEP_0380(BasePlugin):
"""
XEP-0380: Explicit Message Encryption
"""
name = 'xep_0380'
description = 'XEP-0380: Explicit Message Encryption'
dependencies = {'xep_0030'}
default_config = {
'template': 'This message is encrypted with {name} ({namespace})',
}
mechanisms = {
'jabber:x:encrypted': 'Legacy OpenPGP',
'urn:xmpp:ox:0': 'OpenPGP for XMPP',
'urn:xmpp:otr:0': 'OTR',
'eu.siacs.conversations.axolotl': 'Legacy OMEMO',
'urn:xmpp:omemo:0': 'OMEMO',
}
def plugin_init(self):
self.xmpp.register_handler(
Callback('Explicit Message Encryption',
StanzaPath('message/eme'),
self._handle_eme))
register_stanza_plugin(Message, Encryption)
def session_bind(self, jid):
self.xmpp.plugin['xep_0030'].add_feature(Encryption.namespace)
def has_eme(self, msg):
return msg.xml.find('{%s}encryption' % Encryption.namespace) is not None
def add_eme(self, msg: Message, namespace: str) -> Message:
msg['eme']['name'] = self.mechanisms[namespace]
msg['eme']['namespace'] = namespace
return msg
def replace_body_with_eme(self, msg):
eme = msg['eme']
namespace = eme['namespace']
name = self.mechanisms[namespace] if namespace in self.mechanisms else eme['name']
body = self.config['template'].format(name=name, namespace=namespace)
msg['body'] = body
def _handle_eme(self, msg):
self.xmpp.event('message_encryption', msg)
| 29.916667
| 90
| 0.659703
| 1,591
| 0.738626
| 0
| 0
| 0
| 0
| 0
| 0
| 684
| 0.317549
|
65d5f60d4b7acc40612bcf45d7c9efe894269057
| 1,050
|
py
|
Python
|
JSS Users Cleanup/setup.py
|
killahquam/JAMF
|
77b003a72375b9b01bdb961cb466b7519c859116
|
[
"MIT"
] | 34
|
2015-06-11T16:37:54.000Z
|
2021-06-02T20:42:55.000Z
|
JSS Users Cleanup/setup.py
|
killahquam/JAMF
|
77b003a72375b9b01bdb961cb466b7519c859116
|
[
"MIT"
] | 1
|
2016-01-03T04:05:30.000Z
|
2016-09-26T20:25:51.000Z
|
JSS Users Cleanup/setup.py
|
killahquam/JAMF
|
77b003a72375b9b01bdb961cb466b7519c859116
|
[
"MIT"
] | 6
|
2015-12-29T20:39:56.000Z
|
2020-06-30T19:33:23.000Z
|
#!/usr/bin/python
#Quam Sodji 2015
#Setup script to install the needed python modules
#Installs kn/Slack and python-jss modules
#We assume you have Git installed.......
import subprocess
import os
import sys
import shutil
clone_jss = subprocess.check_output(['git','clone','git://github.com/sheagcraig/python-jss.git'])
clone_slack = subprocess.check_output(['git','clone','git://github.com/kn/slack.git'])
path = os.path.dirname(os.path.realpath(__file__))
#Installing Slack
print "Installing Slack"
slack_folder = os.chdir(path + '/slack')
install_slack = subprocess.check_output(['python','setup.py','install'])
print "slack module installed"
#Installing Python JSS
print "Installing Python JSS"
jss_folder = os.chdir(path + '/python-jss')
install_jss = subprocess.check_output(['python','setup.py','install'])
print "python-jss module installed"
#Cleaning up
print "Cleaning up"
change_location = os.chdir(path)
remove_slack_clone = shutil.rmtree(path + '/slack')
remove_jss_clone = shutil.rmtree(path + '/python-jss')
print "Done."
sys.exit(0)
| 33.870968
| 97
| 0.75619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 524
| 0.499048
|
65da9cfd0758b74606005cccaa574f86bf734619
| 969
|
py
|
Python
|
sharpy/linear/utils/sselements.py
|
ACea15/sharpy
|
c89ecb74be3cb9e37b23ac8a282c73b9b55dd792
|
[
"BSD-3-Clause"
] | 80
|
2018-08-30T13:01:52.000Z
|
2022-03-24T15:02:48.000Z
|
sharpy/linear/utils/sselements.py
|
ACea15/sharpy
|
c89ecb74be3cb9e37b23ac8a282c73b9b55dd792
|
[
"BSD-3-Clause"
] | 88
|
2018-05-17T16:18:58.000Z
|
2022-03-11T21:05:48.000Z
|
sharpy/linear/utils/sselements.py
|
ACea15/sharpy
|
c89ecb74be3cb9e37b23ac8a282c73b9b55dd792
|
[
"BSD-3-Clause"
] | 44
|
2018-01-02T14:27:28.000Z
|
2022-03-12T13:49:36.000Z
|
"""
Linear State Space Element Class
"""
class Element(object):
"""
State space member
"""
def __init__(self):
self.sys_id = str() # A string with the name of the element
self.sys = None # The actual object
self.ss = None # The state space object
self.settings = dict()
def initialise(self, data, sys_id):
self.sys_id = sys_id
settings = data.linear.settings[sys_id] # Load settings, the settings should be stored in data.linear.settings
# data.linear.settings should be created in the class above containing the entire set up
# Get the actual class object (like lingebm) from a dictionary in the same way that it is done for the solvers
# in sharpy
# sys = sys_from_string(sys_id)
# To use the decorator idea we would first need to instantiate the class. Need to see how this is done with NL
# SHARPy
def assemble(self):
pass
| 28.5
| 119
| 0.643963
| 926
| 0.955624
| 0
| 0
| 0
| 0
| 0
| 0
| 585
| 0.603715
|
65db9046fcabc0fdacbff5217e489cc008a5a30b
| 4,819
|
py
|
Python
|
pt_mesh_renderer/RasterizeTriangles.py
|
FuxiCV/pt_mesh_renderer
|
15153fbbe73d7c4c59d8f0b2bce7320173b3d396
|
[
"Apache-2.0"
] | 61
|
2020-08-06T06:39:15.000Z
|
2022-03-25T03:48:02.000Z
|
pt_mesh_renderer/RasterizeTriangles.py
|
shty32/pt_mesh_renderer
|
15153fbbe73d7c4c59d8f0b2bce7320173b3d396
|
[
"Apache-2.0"
] | 6
|
2020-09-08T10:30:56.000Z
|
2021-07-10T14:24:23.000Z
|
pt_mesh_renderer/RasterizeTriangles.py
|
shty32/pt_mesh_renderer
|
15153fbbe73d7c4c59d8f0b2bce7320173b3d396
|
[
"Apache-2.0"
] | 9
|
2020-08-25T08:46:08.000Z
|
2021-07-25T04:43:41.000Z
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications: this file implements a pytorch interface to c++ codes
# Copyright 2020 Netease Fuxi AI LAB
# SPDX-License-Identifier: Apache-2.0
from torch.autograd import Function
import pt_mesh_renderer.kernels.rasterize_triangles as rasterize_triangles_kernels
try:
import pt_mesh_renderer.kernels.rasterize_triangles_cuda as rasterize_triangles_kernels_cuda
except Exception:
print("Cannot import cuda rasterizer, renderer is running in CPU mode.")
class RasterizeTriangles(Function):
@staticmethod
def forward(ctx, vertices, triangles, image_width, image_height):
"""Rasterizes the single input mesh expressed in clip-space (xyzw) coordinates.
Args:
vertices: 2-D float32 tensor of homogenous vertices (xyzw) with
shape [vertex_count, 4].
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
barycentric_coordinates: 3-D tensor with shape [image_height, image_width, 3]
containing the rendered barycentric coordinate triplet per pixel, before
perspective correction. The triplet is the zero vector if the pixel is outside
the mesh boundary. For valid pixels, the ordering of the coordinates
corresponds to the ordering in triangles.
triangle_ids: 2-D tensor with shape [image_height, image_width]. Contains the
triangle id value for each pixel in the output image. For pixels within the
mesh, this is the integer value in the range [0, num_vertices] from triangles.
For vertices outside the mesh this is 0; 0 can either indicate belonging to
triangle 0, or being outside the mesh. This ensures all returned triangle ids
will validly index into the vertex array, enabling the use of torch.index_select
(instead of tf.gather) with indices from this tensor. The barycentric coordinates
can be used to determine pixel validity instead.
z_buffer: 2-D tensor with shape [image_height, image_width]. Contains the Z
coordinate in Normalized Device Coordinates for each pixel occupied by a
triangle.
"""
# project mesh to image
if vertices.is_cuda:
forward_function = rasterize_triangles_kernels_cuda.forward_rasterize_triangles_cuda
else:
forward_function = rasterize_triangles_kernels.forward_rasterize_triangles
barycentric, triangle_ids, z_buffer = forward_function(vertices, triangles, image_width, image_height)
# only barycentric needs grad
ctx.mark_non_differentiable(triangle_ids, z_buffer)
# save variables
ctx.save_for_backward(vertices, barycentric, triangle_ids)
ctx.triangles = triangles
ctx.image_size = [image_width, image_height]
return barycentric, triangle_ids, z_buffer
@staticmethod
def backward(ctx, grad_barycentric, grad_triangle_ids, grad_z_buffer):
# get variables
vertices, barycentric, triangle_ids = ctx.saved_tensors
triangles = ctx.triangles
image_width, image_height = ctx.image_size
# compute grad from image to mesh vertices
if vertices.is_cuda:
backward_function = rasterize_triangles_kernels_cuda.backward_rasterize_triangles_cuda
else:
backward_function = rasterize_triangles_kernels.backward_rasterize_triangles
grad_vertices = backward_function(
vertices, triangles, barycentric, triangle_ids, grad_barycentric, image_width, image_height
)
return grad_vertices[0], None, None, None
rasterize_triangles = RasterizeTriangles.apply
| 50.197917
| 110
| 0.708861
| 3,728
| 0.773604
| 0
| 0
| 3,682
| 0.764059
| 0
| 0
| 3,033
| 0.629384
|
65db99db18c44b4e940ff60964e5dae8b718ca83
| 3,988
|
py
|
Python
|
datamining_assignments/datamining_assiment_3/nmf.py
|
xuerenlv/PaperWork
|
f096b57a80e8d771f080a02b925a22edbbee722a
|
[
"Apache-2.0"
] | 1
|
2015-10-15T12:26:07.000Z
|
2015-10-15T12:26:07.000Z
|
datamining_assignments/datamining_assiment_3/nmf.py
|
xuerenlv/PaperWork
|
f096b57a80e8d771f080a02b925a22edbbee722a
|
[
"Apache-2.0"
] | null | null | null |
datamining_assignments/datamining_assiment_3/nmf.py
|
xuerenlv/PaperWork
|
f096b57a80e8d771f080a02b925a22edbbee722a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on Oct 27, 2015
@author: nlp
'''
import numpy as np
import math
# nmf 聚类主体
def nmf(file_list, k):
X = np.array(file_list).transpose()
m_x, n_x = X.shape
# 随机生成初始矩阵
U = np.random.rand(m_x, k)
V = np.random.rand(n_x, k)
is_convergence = False
count = 0
while not is_convergence:
count+=1
U_old = U.copy()
V_old = V.copy()
X_V = np.dot(X, V)
U_VT_V = np.dot(U, np.dot(V.transpose(), V))
U = U * X_V / U_VT_V
XT_U = np.dot(X.transpose(), U)
V_UT_U = np.dot(V, np.dot(U.transpose(), U))
V = V * XT_U / V_UT_U
if abs((U - U_old).sum()) < 0.01 and abs((V - V_old).sum()) < 0.01:
is_convergence = True
# normalize U and V
u_pow_2 = (U ** 2).sum(axis=0)
u_sqrt_pow_2 = [math.sqrt(w) for w in u_pow_2]
for i in range(m_x):
for j in range(k):
U[i, j] = U[i, j] / u_sqrt_pow_2[j]
for i in range(n_x):
for j in range(k):
V[i, j] *= u_sqrt_pow_2[j]
# restlt_example_map_cluster
restlt_example_map_cluster = {}
for i in range(n_x):
max_val = 0
for j in range(k):
if V[i][j] > max_val:
max_val = V[i][j]
restlt_example_map_cluster[i] = j
return restlt_example_map_cluster
# 读文件 生成list[list[]],里面的list代表文件的一行; list[] 代表第i行所属的类。
def read_file(file_name):
file_list = []
lable_list = []
for line in open(file_name).readlines():
arr_line = list(line.split(','));
lable_list.append(arr_line[-1][:-1]);
del arr_line[-1];
file_list.append([float(one) if not one=='0' else 0.000001 for one in arr_line])
return (file_list, lable_list)
#***************** 评价标准 **********************************
# purity
def gen_purity(file_list, lable_list, restlt_example_map_cluster, cluster_num):
# 初始化 m(i,j)二维数组
gen_matrix = [[0 for j in range(cluster_num)] for i in range(cluster_num)]
for index in xrange(len(file_list)):
lable = int(lable_list[index]) if int(lable_list[index]) > 0 else 0
gen_matrix[lable][restlt_example_map_cluster[index]] += 1
p_j = [0 for i in range(cluster_num)]
for j in range(cluster_num):
max_m_i_j = 0
for i in range(cluster_num):
if gen_matrix[i][j] > max_m_i_j:
max_m_i_j = gen_matrix[i][j]
p_j[j] = max_m_i_j
sum_val = 0
for x in p_j:
sum_val += x
return float(sum_val) / float(len(file_list))
# Gini
def gen_gini(file_list, lable_list, restlt_example_map_cluster, cluster_num):
# 初始化 m(i,j)二维数组
gen_matrix = np.array([[0 for j in range(cluster_num)] for i in range(cluster_num)])
for index in xrange(len(file_list)):
lable = int(lable_list[index]) if int(lable_list[index]) > 0 else 0
gen_matrix[lable][restlt_example_map_cluster[index]] += 1
M_j = gen_matrix.sum(axis=0)
g_j = [0 for i in range(cluster_num)]
for j in range(cluster_num):
for i in range(cluster_num):
g_j[j] += (float(gen_matrix[i][j]) / float(M_j[j])) ** 2
g_j[j] = 1 - g_j[j]
fenzi_sum = 0.0
for j in range(cluster_num):
fenzi_sum += g_j[j] * M_j[j]
return float(fenzi_sum) / float(len(file_list))
#****************************************************************************
def nmf_main(file_name,cluster_nums):
file_list, lable_list = read_file(file_name)
restlt_example_map_cluster = nmf(file_list, cluster_nums)
purity = gen_purity(file_list, lable_list, restlt_example_map_cluster, cluster_nums)
gini = gen_gini(file_list, lable_list, restlt_example_map_cluster, cluster_nums)
print file_name,'purity:',purity, "gini:",gini
if __name__ == '__main__':
nmf_main("german.txt", 2)
nmf_main("mnist.txt", 10)
pass
| 31.15625
| 89
| 0.570963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 572
| 0.138902
|
65dca40840f63106b93a09800755e3aaddd6d379
| 3,834
|
py
|
Python
|
heat2d.py
|
atk91/heat-batman
|
ce76fa25ba56e65b842575a99a029379be54e687
|
[
"BSD-2-Clause"
] | null | null | null |
heat2d.py
|
atk91/heat-batman
|
ce76fa25ba56e65b842575a99a029379be54e687
|
[
"BSD-2-Clause"
] | null | null | null |
heat2d.py
|
atk91/heat-batman
|
ce76fa25ba56e65b842575a99a029379be54e687
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
from scipy.sparse.linalg import cgs
from scipy.sparse import csr_matrix
fig = plt.figure()
t_min = 0.0
t_max = 10.0
x_min = -10.0
x_max = 10.0
y_min = -10.0
y_max = 10.0
a = 1.0
c = 5.0
m = 400
n = 100
def ind(i, j):
return i * m + j
def abs(x):
return np.fabs(x)
def sqrt(x):
return np.sqrt(x)
def u_init(x, y):
if x**2/(49*a**2)+y**2/(9*a**2)-1<=0 and (abs(x/a)>=4 and -(3*sqrt(33))/7<=y/a<=0 or abs(x/a)>=3 and y>=0 or \
-3<=y/a<=0 and -4<=x/a<=4 and \
(abs(x/a))/2+sqrt(1-(abs(abs(x/a)-2)-1)**2)-((3*sqrt(33)-7)*x**2)/(112*a**2)-y/a-3<=0 or y>=0 and \
3.0/4.0<=abs(x/a)<=1.0 and -8*abs(x/a)-y/a+9>=0 or 1.0/2.0<=abs(x/a)<=3.0/4.0 and \
3*abs(x/a)-y/a+3.0/4.0>=0 and y>=0 or abs(x/a)<=1.0/2.0 and y>=0 and 9.0/4.0-y/a>=0 or abs(x/a)>=1 \
and y>=0 and -(abs(x/a))/2-3.0/7.0 * sqrt(10) * sqrt(4-(abs(x/a)-1)**2)-y/a+(6*sqrt(10))/7+3.0/2.0>=0):
return 1.0
else:
return 0.0
def x_0(t):
return 0.0
def y_0(t):
return 0.0
x = np.linspace(x_min, x_max, m)
y = np.linspace(y_min, y_max, m)
t = np.linspace(t_min, t_max, n)
dx = (x_max - x_min)/(m - 1)
dy = (y_max - y_min)/(m - 1)
dt = (t_max - t_min)/(n - 1)
matr_size = m**2
L = csr_matrix((matr_size, matr_size))
right = np.zeros(matr_size)
u_prev = np.zeros(m * m)
u = np.zeros(m * m)
for i in range(m):
for j in range(m):
u_prev[(m - 1 - j) * m + i] = u_init(x_min + i * dx, y_min + j * dy)
u[(m - 1 - j) * m + i] = u_init(x_min + i * dx, y_min + j * dy)
for k in range(n):
data = []
row = []
col = []
L = csr_matrix((matr_size, matr_size))
to_plot = np.zeros((m, m))
for i in range(m):
for j in range(m):
to_plot[i][j] = u_prev[i * m + j]
ax = fig.add_subplot(111)
ax.set_title("Heat equation solution, t = %.2f" % (k * dt))
plt.imshow(to_plot, vmax=1.0)
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.get_yaxis().set_ticklabels([])
cax.get_xaxis().set_ticklabels([])
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
plt.savefig("images/%d.png" % k)
plt.clf()
for i in range(m):
for j in range(m):
str_num = i * m + j
if i == 0 or i == m - 1:
data.append(1.0)
row.append(str_num)
col.append(ind(i, j))
right[str_num] = x_0(j * dx)
elif j == 0 or j == m - 1:
data.append(1.0)
row.append(str_num)
col.append(ind(i, j))
right[str_num] = y_0(i * dy)
else:
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i - 1, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i, j - 1))
data.append(- 4.0*c/(dx**2) - 1.0/dt)
row.append(str_num)
col.append(ind(i, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i + 1, j))
data.append(c / (dx**2))
row.append(str_num)
col.append(ind(i, j + 1))
right[str_num] = - u_prev[ind(i, j)] / dt
L = csr_matrix((np.array(data), (np.array(row), np.array(col))), shape=(matr_size, matr_size))
u, info = cgs(L, right, x0 = u_prev, tol=1e-10)
# print "residual: %le" % la.norm(np.dot(L, u) - right)
# print "norm u + u_prev = %le" % la.norm(u - u_prev)
u_prev = u
| 28.61194
| 114
| 0.490871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.049035
|
65dceda09e2e4a4ab6cb1d2b5780ccbfb1f4f6c7
| 2,667
|
py
|
Python
|
DQN DDQN Dueling/network.py
|
eayvali/DeepRL
|
4722af0f75487dd3167faafd4eabe8f01aea4305
|
[
"MIT"
] | 2
|
2020-01-29T20:49:29.000Z
|
2020-03-27T21:45:12.000Z
|
DQN DDQN Dueling/network.py
|
eayvali/DeepRL
|
4722af0f75487dd3167faafd4eabe8f01aea4305
|
[
"MIT"
] | null | null | null |
DQN DDQN Dueling/network.py
|
eayvali/DeepRL
|
4722af0f75487dd3167faafd4eabe8f01aea4305
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 23:19:43 2020
@author: elif.ayvali
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class deep_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(deep_Q_net, self).__init__()
self.seed = torch.manual_seed(seed)
self.dqn_net = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
('relu3', nn.ReLU()),
('fc4', nn.Linear(64, action_size))
]))
def forward(self, state):
"""Build a network that maps state -> action values."""
return self.dqn_net(state)
class dueling_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(dueling_Q_net, self).__init__()
self.feature_modules = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
]))
self.value_modules = nn.Sequential(OrderedDict([
('fc_v1', nn.Linear(64, 32)),
('relu)v1', nn.ReLU()),
('fc_v2', nn.Linear(32, 1)),
]))
self.advantage_modules = nn.Sequential(OrderedDict([
('fc_a1', nn.Linear(64, 32)),
('relu_a1', nn.ReLU()),
('fc_a2', nn.Linear(32, action_size)),
]))
def forward(self, state):
#Get common features
common_layers=self.feature_modules(state)
advantage=self.advantage_modules(common_layers)# batch_size x action_size
value=self.value_modules(common_layers) #batch_size x 1
return value + advantage - advantage.mean(dim=1).unsqueeze(1)
| 31.011628
| 81
| 0.544432
| 2,466
| 0.924634
| 0
| 0
| 0
| 0
| 0
| 0
| 921
| 0.345332
|
65dd2821be3445190af71c053c0a5fe0757716d8
| 3,352
|
py
|
Python
|
bin/meyer.py
|
leipzig/meripseqpipe
|
b16139dfa0805827fec54a33c2a3583d99780591
|
[
"MIT"
] | 13
|
2020-06-09T05:45:11.000Z
|
2022-02-17T09:44:34.000Z
|
bin/meyer.py
|
leipzig/meripseqpipe
|
b16139dfa0805827fec54a33c2a3583d99780591
|
[
"MIT"
] | 2
|
2021-04-02T21:22:19.000Z
|
2021-09-28T15:48:50.000Z
|
bin/meyer.py
|
leipzig/meripseqpipe
|
b16139dfa0805827fec54a33c2a3583d99780591
|
[
"MIT"
] | 12
|
2020-06-09T05:55:51.000Z
|
2022-02-09T03:07:20.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 18:53:30 2019
@author: zky
"""
from sys import argv
from math import log
from scipy import stats
input_bin25_file = argv[1]
ip_bin25_file = argv[2]
input_total_reads_count = int(argv[3])
ip_total_reads_count = int(argv[4])
peak_windows_number = int(argv[5])
output_ip_file = argv[6]
def windows_fisher_test(input_count, ip_count, input_total_reads_count, ip_total_reads_count):
"""fisher test for the PeakCalling of meyer"""
site_input_rest_reads_count = input_total_reads_count - int(input_count)
site_ip_rest_reads_count = ip_total_reads_count - int(ip_count)
ip_oddsratio, ip_pvalue = stats.fisher_exact([[input_count, ip_count], [input_total_reads_count, ip_total_reads_count]], 'less')
input_oddsratio, input_pvalue = stats.fisher_exact([[input_count, ip_count], [site_input_rest_reads_count, site_ip_rest_reads_count]], 'greater')
return input_pvalue,ip_pvalue
def cluster_bin( bonferroni_filter_list ):
bonferroni_peak = []
peak_line = []
idx = 0
pre_end_position = 0
for data in bonferroni_filter_list:
distance = data[1] - pre_end_position
if pre_end_position == 0 or distance > 0 :
if peak_line :
peak_region = peak_line[2] - peak_line[1]
if peak_region >= 100 :
bonferroni_peak.append([])
bonferroni_peak[idx] = peak_line
idx += 1
peak_line = []
peak_line = data[:]
pre_end_position = data[2]
else:
peak_line[2] = data[2]
pre_end_position = data[2]
peak_line.append(data[3])
for data in bonferroni_peak:
statistic, pval = stats.combine_pvalues(data[3:len(data)], method='fisher', weights=None)
data[3] = pval
del data[4:len(data)]
return bonferroni_peak
with open (input_bin25_file) as input_bin25,open (ip_bin25_file) as ip_bin25:
"""Generate the list of bonferroni_filter_windows"""
ip_bonferroni_filter_list = []
ip_index = 0
print ("Generate the list of bonferroni_filter_windows")
while True:
input_line = input_bin25.readline().rstrip("\n")
ip_line = ip_bin25.readline().rstrip("\n")
if input_line == '':
break
input_line_list = input_line.split("\t")
ip_line_list = ip_line.split("\t")
input_pvalue,ip_pvalue = windows_fisher_test(input_line_list[-1],ip_line_list[-1],input_total_reads_count,ip_total_reads_count)
if (ip_pvalue < 0.05/peak_windows_number ):
del ip_line_list[-1]
ip_line_list.append(ip_pvalue)
ip_line_list[1] = int(ip_line_list[1])
ip_line_list[2] = int(ip_line_list[2])
ip_bonferroni_filter_list.append([])
ip_bonferroni_filter_list[ip_index] = ip_line_list
ip_index += 1
"""Generate the list of bonferroni_filter_peaks"""
print ("Generate the list of bonferroni_filter_peaks")
ip_bonferroni_peak = cluster_bin(ip_bonferroni_filter_list[:])
"""Write the list of bonferroni_filter_peaks"""
print ("Write the list of bonferroni_filter_peaks")
with open(output_ip_file,'w') as output_file:
for data in ip_bonferroni_peak:
output_file.write('\t'.join(str(i) for i in data))
output_file.write('\n')
| 41.9
| 149
| 0.672136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 464
| 0.138425
|
65ddc57bb1b73bd27f58c41a027c88ec873b6740
| 2,541
|
py
|
Python
|
setup.py
|
jimbydamonk/jenkins-job-builder-addons
|
172672e25089992ed94dc223c7e30f29c46719b0
|
[
"Apache-2.0"
] | 8
|
2015-08-21T15:53:22.000Z
|
2019-04-09T20:42:58.000Z
|
setup.py
|
jimbydamonk/jenkins-job-builder-addons
|
172672e25089992ed94dc223c7e30f29c46719b0
|
[
"Apache-2.0"
] | 5
|
2016-03-23T17:46:16.000Z
|
2018-03-05T13:56:17.000Z
|
setup.py
|
jimbydamonk/jenkins-job-builder-addons
|
172672e25089992ed94dc223c7e30f29c46719b0
|
[
"Apache-2.0"
] | 11
|
2015-10-05T21:58:33.000Z
|
2019-04-14T04:50:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools.command.test import test as TestCommand
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
tox.cmdline(args=args)
setup(
name='jenkins-job-builder-addons',
version='1.0.5',
description="A suite of jenkins job builder addons",
long_description=readme + '\n\n' + history,
author="Mike Buzzetti",
author_email='mike.buzzetti@gmail.com',
url='https://github.com/jimbydamonk/jenkins-job-builder-addons',
packages=['jenkins_jobs_addons'],
include_package_data=True,
install_requires=requirements,
license="Apache",
zip_safe=False,
keywords='jenkins ',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=['tox'] + test_requirements,
cmdclass={'test': Tox},
entry_points={
'jenkins_jobs.projects': [
'folder=jenkins_jobs_addons.folders:Folder',
],
'jenkins_jobs.views': [
'all=jenkins_jobs_addons.views:all_view',
'build_pipeline=jenkins_jobs_addons.views:build_pipeline_view',
'delivery_pipeline=jenkins_jobs_addons.'
'views:delivery_pipeline_view'
],
'jenkins_jobs.modules': [
'views=jenkins_jobs_addons.views:Views'
]
},
)
| 28.550562
| 75
| 0.637151
| 583
| 0.229437
| 0
| 0
| 0
| 0
| 0
| 0
| 1,077
| 0.423849
|
65de85c428d2e16780398c226cf7243329f834fa
| 1,895
|
py
|
Python
|
arrp/utils/sanitize.py
|
LucaCappelletti94/arrp_dataset
|
bcea455a504e8ff718458ce12623c63e0314badb
|
[
"MIT"
] | null | null | null |
arrp/utils/sanitize.py
|
LucaCappelletti94/arrp_dataset
|
bcea455a504e8ff718458ce12623c63e0314badb
|
[
"MIT"
] | null | null | null |
arrp/utils/sanitize.py
|
LucaCappelletti94/arrp_dataset
|
bcea455a504e8ff718458ce12623c63e0314badb
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from typing import Tuple, Dict
from .load_csv import load_raw_classes, load_raw_epigenomic_data, load_raw_nucleotides_sequences
from .store_csv import store_raw_classes, store_raw_epigenomic_data, store_raw_nucleotides_sequences
from auto_tqdm import tqdm
def drop_unknown_datapoints(epigenomic_data:pd.DataFrame, nucleotides_sequences:np.ndarray, nucleotides_sequences_index:np.ndarray, classes:pd.DataFrame)->Tuple[pd.DataFrame, np.ndarray, np.ndarray, pd.DataFrame]:
"""Remove datapoints labeled as unknown (UK)."""
unknown = classes["UK"] == 1
epigenomic_data = epigenomic_data.drop(index=epigenomic_data.index[unknown])
nucleotides_sequences = nucleotides_sequences[~unknown]
nucleotides_sequences_index = nucleotides_sequences_index[~unknown]
classes = classes.drop(index=classes.index[unknown])
classes = classes.drop(columns=["UK"])
return epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes
def sanitize(target:str, settings:Dict):
for cell_line in tqdm(settings["cell_lines"], desc="Sanitizing data"):
classes = load_raw_classes(target, cell_line)
if "UK" not in classes.columns:
continue
epigenomic_data = load_raw_epigenomic_data(target, cell_line)
nucleotides_sequences, nucleotides_sequences_index, nucleotides_sequences_columns = load_raw_nucleotides_sequences(target, cell_line)
epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes = drop_unknown_datapoints(epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes)
store_raw_epigenomic_data(target, cell_line, epigenomic_data)
store_raw_nucleotides_sequences(target, cell_line, nucleotides_sequences, nucleotides_sequences_index, nucleotides_sequences_columns)
store_raw_classes(target, cell_line, classes)
| 67.678571
| 213
| 0.803694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.046966
|
65ded556650f5e35ee3489143d332a0dbd1e324c
| 7,857
|
py
|
Python
|
plugin.video.plexodus/resources/lib/indexers/fanarttv.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | 1
|
2020-03-03T10:01:21.000Z
|
2020-03-03T10:01:21.000Z
|
plugin.video.plexodus/resources/lib/indexers/fanarttv.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.plexodus/resources/lib/indexers/fanarttv.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
plexOdus Add-on
'''
import json
from resources.lib.modules import client
from resources.lib.modules import control
user = control.setting('fanart.tv.user')
if user == '' or user is None:
user = 'cf0ebcc2f7b824bd04cf3a318f15c17d'
headers = {'api-key': '3eb5ed2c401a206391ea8d1a0312c347'}
if not user == '':
headers.update({'client-key': user})
base_url = "http://webservice.fanart.tv/v3/%s/%s"
lang = control.apiLanguage()['trakt']
def get_tvshow_art(tvdb):
url = base_url % ('tv', '%s')
try:
art = client.request(url % tvdb, headers=headers, timeout='30', error=True)
art = json.loads(art)
except:
return None
try:
poster2 = art['tvposter']
poster2 = [(x['url'], x['likes']) for x in poster2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in poster2 if x.get('lang') == '']
poster2 = [(x[0], x[1]) for x in poster2]
poster2 = sorted(poster2, key=lambda x: int(x[1]), reverse=True)
poster2 = [x[0] for x in poster2][0]
poster2 = poster2.encode('utf-8')
except:
poster2 = '0'
try:
fanart2 = art['showbackground']
fanart2 = [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == '']
fanart2 = [(x[0], x[1]) for x in fanart2]
fanart2 = sorted(fanart2, key=lambda x: int(x[1]), reverse=True)
fanart2 = [x[0] for x in fanart2][0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2= '0'
try:
banner2 = art['tvbanner']
banner2 = [(x['url'], x['likes']) for x in banner2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in banner2 if x.get('lang') == '']
banner2 = [(x[0], x[1]) for x in banner2]
banner2 = sorted(banner2, key=lambda x: int(x[1]), reverse=True)
banner2 = [x[0] for x in banner2][0]
banner2 = banner2.encode('utf-8')
except:
banner2 = '0'
try:
if 'hdtvlogo' in art:
clearlogo = art['hdtvlogo']
else:
clearlogo = art['clearlogo']
clearlogo = [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == '']
clearlogo = [(x[0], x[1]) for x in clearlogo]
clearlogo = sorted(clearlogo, key=lambda x: int(x[1]), reverse=True)
clearlogo = [x[0] for x in clearlogo][0]
clearlogo = clearlogo.encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdclearart' in art:
clearart = art['hdclearart']
else:
clearart = art['clearart']
clearart = [(x['url'], x['likes']) for x in clearart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearart if x.get('lang') == '']
clearart = [(x[0], x[1]) for x in clearart]
clearart = sorted(clearart, key=lambda x: int(x[1]), reverse=True)
clearart = [x[0] for x in clearart][0]
clearart = clearart.encode('utf-8')
except:
clearart = '0'
try:
if 'tvthumb' in art:
landscape = art['tvthumb']
else:
landscape = art['showbackground']
landscape = [(x['url'], x['likes']) for x in landscape if x.get('lang') == lang] + [(x['url'], x['likes']) for x in landscape if x.get('lang') == '']
landscape = [(x[0], x[1]) for x in landscape]
landscape = sorted(landscape, key=lambda x: int(x[1]), reverse=True)
landscape = [x[0] for x in landscape][0]
landscape = landscape.encode('utf-8')
except:
landscape = '0'
extended_art = {'extended': True, 'poster2': poster2, 'banner2': banner2, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'landscape': landscape}
return extended_art
def get_movie_art(imdb):
url = base_url % ('movies', '%s')
try:
art = client.request(url % imdb, headers=headers, timeout='30', error=True)
art = json.loads(art)
except:
return None
try:
poster2 = art['movieposter']
poster2 = [(x['url'], x['likes']) for x in poster2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in poster2 if x.get('lang') == '']
poster2 = [(x[0], x[1]) for x in poster2]
poster2 = sorted(poster2, key=lambda x: int(x[1]), reverse=True)
poster2 = [x[0] for x in poster2][0]
poster2 = poster2.encode('utf-8')
except:
poster2 = '0'
try:
if 'moviebackground' in art:
fanart2 = art['moviebackground']
else:
fanart2 = art['moviethumb']
fanart2 = [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == '']
fanart2 = [(x[0], x[1]) for x in fanart2]
fanart2 = sorted(fanart2, key=lambda x: int(x[1]), reverse=True)
fanart2 = [x[0] for x in fanart2][0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2 = '0'
try:
banner2 = art['moviebanner']
banner2 = [(x['url'], x['likes']) for x in banner2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in banner2 if x.get('lang') == '']
banner2 = [(x[0], x[1]) for x in banner2]
banner2 = sorted(banner2, key=lambda x: int(x[1]), reverse=True)
banner2 = [x[0] for x in banner2][0]
banner2 = banner2.encode('utf-8')
except:
banner2 = '0'
try:
if 'hdmovielogo' in art:
clearlogo = art['hdmovielogo']
else:
clearlogo = art['movielogo']
clearlogo = [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == '']
clearlogo = [(x[0], x[1]) for x in clearlogo]
clearlogo = sorted(clearlogo, key=lambda x: int(x[1]), reverse=True)
clearlogo = [x[0] for x in clearlogo][0]
clearlogo = clearlogo.encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdmovieclearart' in art:
clearart = art['hdmovieclearart']
else:
clearart = art['movieart']
clearart = [(x['url'], x['likes']) for x in clearart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearart if x.get('lang') == '']
clearart = [(x[0], x[1]) for x in clearart]
clearart = sorted(clearart, key=lambda x: int(x[1]), reverse=True)
clearart = [x[0] for x in clearart][0]
clearart = clearart.encode('utf-8')
except:
clearart = '0'
try:
discart = art['moviedisc']
discart = [(x['url'], x['likes']) for x in discart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in discart if x.get('lang') == '']
discart = [(x[0], x[1]) for x in discart]
discart = sorted(discart, key=lambda x: int(x[1]), reverse=True)
discart = [x[0] for x in discart][0]
discart = discart.encode('utf-8')
except:
discart = '0'
try:
if 'moviethumb' in art:
landscape = art['moviethumb']
else:
landscape = art['moviebackground']
landscape = [(x['url'], x['likes']) for x in landscape if x.get('lang') == lang] + [(x['url'], x['likes']) for x in landscape if x.get('lang') == '']
landscape = [(x[0], x[1]) for x in landscape]
landscape = sorted(landscape, key=lambda x: int(x[1]), reverse=True)
landscape = [x[0] for x in landscape][0]
landscape = landscape.encode('utf-8')
except:
landscape = '0'
extended_art = {'extended': True, 'poster2': poster2, 'fanart2': fanart2, 'banner2': banner2, 'clearlogo': clearlogo, 'clearart': clearart, 'discart': discart, 'landscape': landscape}
return extended_art
| 38.704433
| 187
| 0.54881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,343
| 0.17093
|
65df761ba100f14026cfc9d900fd8f340f52bd34
| 3,599
|
py
|
Python
|
nuqql/conversation/helper.py
|
hwipl/nuqql
|
410ea5bd42e455d656b1b34612c3b0d5a0b433ef
|
[
"MIT"
] | 3
|
2019-04-15T18:33:36.000Z
|
2019-04-21T19:18:10.000Z
|
nuqql/conversation/helper.py
|
hwipl/nuqql
|
410ea5bd42e455d656b1b34612c3b0d5a0b433ef
|
[
"MIT"
] | 15
|
2019-04-15T18:35:56.000Z
|
2019-09-14T08:24:32.000Z
|
nuqql/conversation/helper.py
|
hwipl/nuqql
|
410ea5bd42e455d656b1b34612c3b0d5a0b433ef
|
[
"MIT"
] | 1
|
2019-06-16T12:00:30.000Z
|
2019-06-16T12:00:30.000Z
|
"""
nuqql conversation helpers
"""
import datetime
import logging
from typing import TYPE_CHECKING
import nuqql.win
from .conversation import CONVERSATIONS
from .logmessage import LogMessage
if TYPE_CHECKING: # imports for typing
# pylint: disable=cyclic-import
from nuqql.backend import Backend # noqa
logger = logging.getLogger(__name__)
def remove_backend_conversations(backend: "Backend") -> None:
"""
Remove all conversations beloning to the backend
"""
logger.debug("removing all conversations of backend %s", backend.name)
for conv in CONVERSATIONS[:]:
if conv.backend == backend:
conv.wins.list_win.remove(conv)
conv.wins.list_win.redraw()
logger.debug("removed conversation %s of backend %s",
conv.name, backend.name)
def log_main_window(msg: str) -> None:
"""
Log message to main windows
"""
logger.debug("logging message to main window: %s", msg)
now = datetime.datetime.now()
log_msg = LogMessage(now, "nuqql", msg)
nuqql.win.MAIN_WINS["log"].add(log_msg)
def log_nuqql_conv(msg: str) -> None:
"""
Log message to the nuqql conversation
"""
logger.debug("logging message to nuqql conversation: %s", msg)
for conv in CONVERSATIONS:
if conv.name == "nuqql":
conv.log("nuqql", msg)
return
def resize_main_window() -> None:
"""
Resize main window
"""
logger.debug("resizing main window")
# get main win
screen = nuqql.win.MAIN_WINS["screen"]
# get new maxima
max_y, max_x = screen.getmaxyx()
# redraw main windows
screen.clear()
screen.refresh()
# redraw conversation windows
found_active = False
for conv in CONVERSATIONS:
# resize and move conversation windows
if conv.wins.list_win:
size_y, size_x = conv.wins.list_win.config.get_size()
conv.wins.list_win.resize_win(size_y, size_x)
if conv.wins.log_win:
# TODO: move zoom/resizing to win.py?
if conv.wins.log_win.state.zoomed:
size_y, size_x = max_y, max_x
pos_y, pos_x = 0, 0
conv.wins.log_win.state.pad_y = 0 # reset pad position
else:
size_y, size_x = conv.wins.log_win.config.get_size()
pos_y, pos_x = conv.wins.log_win.config.get_pos()
conv.wins.log_win.resize_win(size_y, size_x)
conv.wins.log_win.move_win(pos_y, pos_x)
if conv.wins.input_win:
size_y, size_x = conv.wins.input_win.config.get_size()
conv.wins.input_win.resize_win(size_y, size_x)
pos_y, pos_x = conv.wins.input_win.config.get_pos()
conv.wins.input_win.move_win(pos_y, pos_x)
# redraw active conversation windows
if conv.is_active():
found_active = True
conv.wins.list_win.redraw()
conv.wins.input_win.redraw()
conv.wins.log_win.redraw()
# if there are no active conversations, redraw nuqql main windows
if not found_active:
# list win
list_win = nuqql.win.MAIN_WINS["list"]
size_y, size_x = list_win.config.get_size()
list_win.resize_win(size_y, size_x)
list_win.redraw()
# log main win
log_win = nuqql.win.MAIN_WINS["log"]
size_y, size_x = log_win.config.get_size()
pos_y, pos_x = log_win.config.get_pos()
log_win.resize_win(size_y, size_x)
log_win.move_win(pos_y, pos_x)
log_win.redraw()
| 29.260163
| 74
| 0.624618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 821
| 0.228119
|
65df788e5e4510c44fcdee2981d1538a1d6e2abd
| 801
|
py
|
Python
|
tests/gcs_test.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | 1
|
2020-08-31T17:53:01.000Z
|
2020-08-31T17:53:01.000Z
|
tests/gcs_test.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | null | null | null |
tests/gcs_test.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | null | null | null |
import vaex
import pytest
@pytest.mark.skipif(vaex.utils.devmode, reason='runs too slow when developing')
def test_gcs():
df = vaex.open('gs://vaex-data/testing/xys.hdf5?cache=false&token=anon')
assert df.x.tolist() == [1, 2]
assert df.y.tolist() == [3, 4]
assert df.s.tolist() == ['5', '6']
df = vaex.open('gs://vaex-data/testing/xys.hdf5?cache=true&token=anon')
assert df.x.tolist() == [1, 2]
assert df.y.tolist() == [3, 4]
assert df.s.tolist() == ['5', '6']
@pytest.mark.skipif(vaex.utils.devmode, reason='runs too slow when developing')
def test_gcs_masked():
df = vaex.open('gs://vaex-data/testing/xys-masked.hdf5?cache=false&token=anon')
assert df.x.tolist() == [1, None]
assert df.y.tolist() == [None, 4]
assert df.s.tolist() == ['5', None]
| 33.375
| 83
| 0.627965
| 0
| 0
| 0
| 0
| 769
| 0.96005
| 0
| 0
| 251
| 0.313358
|
65dfc680b069d19bcf150f9f7a0bdfd6384fb313
| 388
|
py
|
Python
|
arcutils/const.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
arcutils/const.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
arcutils/const.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
"""Constants."""
# A ``None``-ish constant for use where ``None`` may be a valid value.
NOT_SET = type('NOT_SET', (), {
'__bool__': (lambda self: False),
'__str__': (lambda self: 'NOT_SET'),
'__repr__': (lambda self: 'NOT_SET'),
'__copy__': (lambda self: self),
})()
# An alias for NOT_SET that may be more semantically-correct in some
# contexts.
NO_DEFAULT = NOT_SET
| 25.866667
| 70
| 0.639175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 231
| 0.595361
|
65e0bbe0d695a274843f413dfb11aa1bde11659d
| 340
|
py
|
Python
|
students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33422/Elizaveta_Makhotina/labs/lab1/3/server_html.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
import socket
print("Waiting for connections...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 9090))
sock.listen()
n_sock, addr = sock.accept()
print("Client connected! Sending HTTP-message")
conn.send(
b"HTTP/1.0 200 OK\nContent-Type: text/html\n\n" + open("index.html", "rb").read()
)
n_sock.close()
| 18.888889
| 85
| 0.694118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.391176
|
65e1870d1694528b9c38bfaea11b991273afa141
| 2,998
|
py
|
Python
|
tests/test_private_storage.py
|
glasslion/django-qiniu-storage
|
b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad
|
[
"MIT"
] | 209
|
2015-01-04T09:24:42.000Z
|
2022-03-20T12:29:05.000Z
|
tests/test_private_storage.py
|
manlan2/django-qiniu-storage
|
b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad
|
[
"MIT"
] | 39
|
2015-04-10T05:38:07.000Z
|
2021-09-09T02:26:54.000Z
|
tests/test_private_storage.py
|
manlan2/django-qiniu-storage
|
b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad
|
[
"MIT"
] | 69
|
2015-03-03T14:31:20.000Z
|
2021-10-11T08:31:25.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from datetime import datetime
import os
from os.path import dirname, join
import sys
import time
import unittest
import uuid
import logging
LOGGING_FORMAT = '\n%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
logger = logging.getLogger(__name__)
import six
import django
from requests.exceptions import ConnectionError
from qiniu import BucketManager
from .utils import retry
# Add repo/demo_site to sys.path
DEMO_SITE_DIR = join(dirname(dirname(__file__)), 'demo_site')
sys.path.append(DEMO_SITE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo_site.settings")
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
from django.conf import settings
from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config
from qiniustorage.utils import QiniuError
USING_TRAVIS = os.environ.get('USING_TRAVIS', None) is None
UNIQUE_PATH = str(uuid.uuid4())
class QiniuStorageTest(unittest.TestCase):
def setUp(self):
self.storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
def test_read_file(self):
ASSET_FILE_NAMES = [u'Read.txt', u'读.txt']
for assert_file_name in ASSET_FILE_NAMES:
REMOTE_PATH = join(UNIQUE_PATH, assert_file_name)
test_file = six.BytesIO()
test_file.write(u"你好世界 Hello World".encode('utf-8'))
test_file.seek(0)
self.storage.save(REMOTE_PATH, test_file)
fil = self.storage.open(REMOTE_PATH, 'r')
assert fil._is_read == False
content = fil.read()
assert content.startswith(u"你好")
assert fil._is_read == True
# Test open mode
fil = self.storage.open(REMOTE_PATH, 'rb')
bin_content = fil.read()
assert bin_content.startswith(u"你好".encode('utf-8'))
@classmethod
def teardown_class(cls):
"""Delete all files in the test bucket.
"""
storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
auth = storage.auth
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(storage.bucket_name, limit=100)
if ret is None:
print(info)
break
for item in ret['items']:
name = item['key']
if six.PY2:
name = name.encode('utf-8')
ret, info = bucket.delete(storage.bucket_name, name)
if ret is None:
print(info)
if eof:
break
| 29.106796
| 82
| 0.645097
| 1,933
| 0.640915
| 0
| 0
| 859
| 0.284814
| 0
| 0
| 489
| 0.162135
|
65e1926c2e3ccf0ad609aeee24520c09fc7d1f0b
| 1,227
|
py
|
Python
|
inference_sagemaker_simple.py
|
benayas1/MNIST-deployment
|
36eab6589816ca6598a42d637755ad1432cb8b1c
|
[
"MIT"
] | null | null | null |
inference_sagemaker_simple.py
|
benayas1/MNIST-deployment
|
36eab6589816ca6598a42d637755ad1432cb8b1c
|
[
"MIT"
] | null | null | null |
inference_sagemaker_simple.py
|
benayas1/MNIST-deployment
|
36eab6589816ca6598a42d637755ad1432cb8b1c
|
[
"MIT"
] | null | null | null |
# This file implements functions model_fn, input_fn, predict_fn and output_fn.
# Function model_fn is mandatory. The other functions can be omitted so the standard sagemaker function will be used.
# An alternative to the last 3 functions is to use function transform_fn(model, data, input_content_type, output_content_type)
#
# More info on https://github.com/aws/sagemaker-inference-toolkit/tree/master/src/sagemaker_inference
#
import torch
from mnist_demo.models.model import Net
import os
from torchvision import transforms
from sagemaker_inference import (
content_types,
decoder,
encoder,
errors,
utils,
)
def model_fn(model_dir):
"""
Function used for Sagemaker to load a model. The function must have this signature. Sagemaker will look for this function.
Used only when Elastic Inference is not used.
"""
print('Loading model')
model = Net()
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f: # model_cnn.pth is the name given in the train script
model.load_state_dict(torch.load(f))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device) #let's keep inference in CPU
print('Model loaded')
return model
| 37.181818
| 126
| 0.740016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 748
| 0.609617
|
65e1ff2eb00e84049f3aabe94179a02fc82570ba
| 802
|
py
|
Python
|
hw/scripts/__main__.py
|
jonasblixt/mongoose
|
4f392353f42d9c9245cdb5d9511348ec40bd936f
|
[
"BSD-3-Clause"
] | 4
|
2019-07-31T17:59:14.000Z
|
2019-10-06T11:46:28.000Z
|
hw/scripts/__main__.py
|
jonasblixt/mongoose
|
4f392353f42d9c9245cdb5d9511348ec40bd936f
|
[
"BSD-3-Clause"
] | null | null | null |
hw/scripts/__main__.py
|
jonasblixt/mongoose
|
4f392353f42d9c9245cdb5d9511348ec40bd936f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import kicad
import model
from stackups import JLCPCB6Layers
#from dram import lp4
# IMX8MM
# Diff pairs should be matched within 1ps
# CK_t/CK_c max 200 ps
# CA[5:0]
# CS[1:0] min: CK_t - 25ps, max: CK_t + 25ps
# CKE[1:0]
# DQS0_t/DQS0_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[7:0] min: DQS0_t - 10ps, max DQS0_t + 10ps
# DM0
# DQS1_t/DQS1_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[15:8] min: DQS1_t - 10ps, max DQS1_t + 10ps
# DM1
if __name__ == "__main__":
pcb = kicad.KicadPCB("../mongoose.kicad_pcb", JLCPCB6Layers())
# DiffPair(pcb, "_n","_p", max_delay_ps=200.0, max_skew_ps=1.0)
for net_index in pcb.get_nets().keys():
net = pcb.get_nets()[net_index]
print(net.get_name() + " dly: %.2f ps"%(net.get_delay_ps()))
| 21.675676
| 68
| 0.627182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.63591
|
65e2a0c64857964a543fdd7ce72cd8eee8d2cbac
| 165
|
py
|
Python
|
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/subscriptions/views.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | 2
|
2018-06-19T12:12:08.000Z
|
2018-06-25T18:45:36.000Z
|
from django.shortcuts import render
# Create your views here.
def subscribe(request):
return render(request, "subscribe.html",
{'data': {}})
| 20.625
| 44
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.284848
|
65e2db02f151a8da25b3c6a7203333c4f0b917f2
| 4,795
|
py
|
Python
|
scripts/runOptimizer.py
|
sschulz365/PhC_Optimization
|
9a4add4eb638d797647cabbdf0f96b29b78114f2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2017-05-13T05:33:06.000Z
|
2021-02-26T14:39:44.000Z
|
scripts/runOptimizer.py
|
sschulz365/PhC_Optimization
|
9a4add4eb638d797647cabbdf0f96b29b78114f2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
scripts/runOptimizer.py
|
sschulz365/PhC_Optimization
|
9a4add4eb638d797647cabbdf0f96b29b78114f2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Sean Billings, 2015
import random
import numpy
import subprocess
import constraints
from experiment import Experiment
from objectiveFunctions import WeightedSumObjectiveFunction, IdealDifferentialObjectiveFunction
from waveGuideMPBOptimizer import differentialEvolution, createPopulation, gradientDescentAlgorithm
import utilities
import math
paramMap = {}
paramMap["s1"] = 0 # First row vertical shift
paramMap["s2"] = 0 # Second row vertical shift
paramMap["s3"] = 0 # Third row vertical shift
paramMap["p1"] = 0 # First row horizontal shift
paramMap["p2"] = 0 # Second row horizontal shift
paramMap["p3"] = 0 # Third row horizontal shift
paramMap["r0"] = 0.3 # Default air-hole radius
paramMap["r1"] = 0.3 # Default first row radius
paramMap["r2"] = 0.3 # Default second row radius
paramMap["r3"] = 0.3 # Default third row radius
# absolute path to the mpb executable
mpb = "/Users/sean/documents/mpb-1.5/mpb/mpb"
# absolute path to the input ctl
inputFile = "/Users/sean/documents/W1_2D_v03.ctl.txt"
# absolute path to the output ctl
outputFile = "/Users/sean/documents/optimizerTestFile.txt"
# we define a general experiment object
# that we reuse whenever we need to make a command-line mpb call
# see experiment.py for functionality
experiment = Experiment(mpb, inputFile, outputFile)
# ex.setParams(paramVector)
experiment.setCalculationType('4') # accepts an int from 0 to 5
experiment.setBand(23)
# see constraints.py
constraintFunctions = [constraints.latticeConstraintsLD]
max_generation = 15 # number of iterations of the DE alg
population_size = 20 # number of solutions to consider in DE
random_update = 0.2 # chance of updating vector fields in DE alg
elite_size = 10 # number of solutions to store in DE, and use for GD
band = 23 # band of interest for MPB computations
# specify the weights for the IdealDifferentialObjectiveFunction
w1 = 0 #0.01 # bandwidth weight
w2 = 30 #100 # group index weight
w3 = 0 # average loss weight
w4 = 0 # BGP weight
w5 = 30 #0.002 # loss at ngo (group index) weight
w6 = 0
# these wights are use in the Objective Function to score mpb results
weights = [ w1, w2, w3, w4, w5, w6]
ideal_group_index = 30 #self.ideal_solution[0]
ideal_bandwidth = 0.007 #self.ideal_solution[1]
ideal_loss_at_group_index = 30 #self.ideal_solution[2]
ideal_bgp = 0.3 #self.ideal_solution[3]
ideal_delay = 300 #self.ideal_solution[4]
ideal = [ideal_group_index, ideal_bandwidth, ideal_loss_at_group_index, ideal_bgp, ideal_delay]
#Initialize objective function
#objFunc = IdealDifferentialObjectiveFunction(weights, experiment, ideal)
objFunc = WeightedSumObjectiveFunction(weights, experiment)
# Differential Evolution section
print "Starting Differential Evolution Optimizer"
# DEsolutions is an array of solutions generated by the DE alg
DEsolutions = differentialEvolution(constraintFunctions, objFunc,
max_generation, population_size, random_update,
paramMap, elite_size, experiment)
print "\nDifferential Evolution solutions generated"
population = DEsolutions
# test line
#population = createPopulation(constraintFunctions, population_size, paramMap)
descent_scaler = 0.2
completion_scaler = 0.1
alpha_scaler = 0.9
# Gradient Descent Section
print "\nStarting Gradient Descent Optimizer"
# GDsolutions is an array of solutions generated by the GD algorihtms
GDsolutions = gradientDescentAlgorithm(objFunc,
constraintFunctions,
population, descent_scaler,
completion_scaler, alpha_scaler)
population = GDsolutions
print "\nResults"
for solution in population:
print "\nSolution: " + str(solution)
results = objFunc.evaluate(solution)
solution_score = results[0]
bandwidth = results[1]
group_index = results[2]
avgLoss = results[3] # average loss
bandwidth_group_index_product = results[4] #BGP
loss_at_ng0 = results[5] # loss at group index
print "\nScore: " + str(solution_score)
print "\nNormalized Bandwidth: " + str(bandwidth)
print "\nGroup Index: " + str(group_index)
print "\nAverage Loss: " + str(avgLoss)
print "\nLoss at Group Index: " + str(loss_at_ng0)
print "\nBGP: " + str(bandwidth_group_index_product)
#print "\nComputing Fabrication Stability..."
#laplacian = utilities.computeLaplacian(weights, weightedSumObjectiveFunction, solution, experiment)
#fabrication_stability = 0
#for key in laplacian.keys():
# fabrication_stability = fabrication_stability + laplacian[key]**2
#fabrication_stability = math.sqrt(fabrication_stability)
#print "\nFabrication Stability " + str(fabrication_stability)
print "\nOptimization Complete"
| 33.531469
| 104
| 0.737018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,447
| 0.510323
|
65e30ee0c4097461b20374e7d55ddbfdf3a3908e
| 2,781
|
py
|
Python
|
packages/w3af/w3af/core/data/url/handlers/cookie_handler.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
tools/w3af/w3af/core/data/url/handlers/cookie_handler.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/w3af/w3af/core/data/url/handlers/cookie_handler.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
"""
cookie_handler.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from cookielib import MozillaCookieJar
from urllib2 import HTTPCookieProcessor
class CookieHandler(HTTPCookieProcessor):
def __init__(self, default_cookiejar=None):
HTTPCookieProcessor.__init__(self, None)
# Store the different cookie jars here, these represent the different
# browser sessions that the plugins might request
self.jars = {}
if default_cookiejar is None:
default_cookiejar = MozillaCookieJar()
self.default_cookiejar = default_cookiejar
def get_cookie_jar(self, request):
"""
:param request: The HTTP request, with a browser session attribute, or
None if the default cookiejar should be used.
:return: The cookiejar instance
"""
if request.session is None:
return self.default_cookiejar
session = self.jars.get(request.session, None)
if session is not None:
return session
new_session = MozillaCookieJar()
self.jars[request.session] = new_session
return new_session
def clear_cookies(self):
"""
Clear the cookies from all cookie jars.
:return: None
"""
for cookiejar in self.jars.itervalues():
cookiejar.clear()
cookiejar.clear_session_cookies()
self.default_cookiejar.clear()
self.default_cookiejar.clear_session_cookies()
def http_request(self, request):
if not request.cookies:
# Don't do any cookie handling
return request
try:
cookiejar = self.get_cookie_jar(request)
cookiejar.add_cookie_header(request)
except AttributeError:
# https://github.com/andresriancho/w3af/issues/13842
pass
return request
def http_response(self, request, response):
cookiejar = self.get_cookie_jar(request)
cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
| 30.56044
| 78
| 0.678533
| 1,987
| 0.714491
| 0
| 0
| 0
| 0
| 0
| 0
| 1,201
| 0.431859
|
65e31c331679c439236e3ccff96fa39b9166d6f4
| 435
|
py
|
Python
|
setup.py
|
jigyasudhingra/music-recommendation-system
|
09c66c4f207002b200d6394cf72e853741e44b6e
|
[
"MIT"
] | 2
|
2021-12-04T08:47:41.000Z
|
2021-12-06T16:54:36.000Z
|
setup.py
|
jigyasudhingra/music-recommendation-system
|
09c66c4f207002b200d6394cf72e853741e44b6e
|
[
"MIT"
] | null | null | null |
setup.py
|
jigyasudhingra/music-recommendation-system
|
09c66c4f207002b200d6394cf72e853741e44b6e
|
[
"MIT"
] | 1
|
2020-12-12T15:55:20.000Z
|
2020-12-12T15:55:20.000Z
|
import os
import urllib.request
from zipfile import ZipFile
HOME_DIRECTORY = os.path.join('datasets','raw')
ROOT_URL = 'https://os.unil.cloud.switch.ch/fma/fma_metadata.zip'
if not os.path.isdir(HOME_DIRECTORY):
os.makedirs(HOME_DIRECTORY)
zip_path = os.path.join(HOME_DIRECTORY, 'data.zip')
urllib.request.urlretrieve(ROOT_URL, zip_path)
with ZipFile(zip_path, 'r') as zip:
zip.extractall(HOME_DIRECTORY)
print("Done!")
| 29
| 65
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.204598
|
65e488ce45eec9f35c2f059eb9cb3c3452d611ab
| 2,601
|
py
|
Python
|
src/thesis/parsers/utils.py
|
emanuelevivoli/2021-Master-Thesis-UNIFI
|
e702795f71ecf0e09fca64e72780f4f6367558c9
|
[
"MIT"
] | 1
|
2022-02-14T00:06:21.000Z
|
2022-02-14T00:06:21.000Z
|
src/thesis/parsers/utils.py
|
emanuelevivoli/2021-Master-Thesis-UNIFI
|
e702795f71ecf0e09fca64e72780f4f6367558c9
|
[
"MIT"
] | null | null | null |
src/thesis/parsers/utils.py
|
emanuelevivoli/2021-Master-Thesis-UNIFI
|
e702795f71ecf0e09fca64e72780f4f6367558c9
|
[
"MIT"
] | 1
|
2022-02-14T00:06:14.000Z
|
2022-02-14T00:06:14.000Z
|
from thesis.parsers.classes import Args
def split_args(args: Args):
dataset_args = args.datatrain
training_args = args.training
model_args = args.model
embedding_args = args.embedds
visual_args = args.visual
run_args = args.runs
log_args = args.logs
return dataset_args, training_args, model_args, embedding_args, visual_args, run_args, log_args
def tag_generation(args_: Args):
"""
Generation of the Run tags (from arguments).
"""
# empty tags' list
tags = []
dataset_args, training_args, model_args, embedding_args, visual_args, run_args, log_args = split_args(
args_)
model_args = args_.model
visual_args = args_.visual
# PAPER FIELDs
tags += visual_args.fields
# EMBEDDING network
tags += [model_args.model_name_or_path]
# PRE
tags += [f'pre.choice: {visual_args.pre.choice}']
if visual_args.pre.choice == 'UMAP':
tags += [f'UMAP.pre.n_neighbors: {visual_args.pre.umap.n_neighbors}',
f'UMAP.pre.n_components: {visual_args.pre.umap.n_components}',
f'UMAP.pre.metric: {visual_args.pre.umap.metric}']
elif visual_args.pre.choice == 'PCA':
tags += [f'PCA.pre.n_components: {visual_args.pre.pca.n_components}']
elif visual_args.pre.choice == 'TSNE':
tags += [f'TSNE.pre.n_components: {visual_args.pre.tsne.n_components}']
# CLUSTER
tags += [f'clust.choice: {visual_args.clust.choice}']
if visual_args.clust.choice == 'KMEANS':
tags += [f'KMEANS.n_clusters: {visual_args.clust.kmeans.n_clusters}']
elif visual_args.clust.choice == 'HDBSCAN':
tags += [f'HDBSCAN.min_cluster_size: {visual_args.clust.hdbscan.min_cluster_size}',
f'HDBSCAN.metric: {visual_args.clust.hdbscan.metric}',
f'HDBSCAN.cluster_selection_method: {visual_args.clust.hdbscan.cluster_selection_method}']
# POST
tags += [f'post.choice: {visual_args.post.choice}']
if visual_args.post.choice == 'UMAP':
tags += [f'UMAP.post.n_neighbors: {visual_args.post.umap.n_neighbors}',
f'UMAP.post.n_components: {visual_args.post.umap.n_components}',
f'UMAP.post.min_dist: {visual_args.post.umap.min_dist}',
f'UMAP.post.metric: {visual_args.post.umap.metric}']
elif visual_args.post.choice == 'PCA':
tags += [f'PCA.post.n_components: {visual_args.post.pca.n_components}']
elif visual_args.post.choice == 'TSNE':
tags += [f'TSNE.post.n_components: {visual_args.post.tsne.n_components}']
return tags
| 35.148649
| 107
| 0.662438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,222
| 0.469819
|
65e524699bbb4c1ad2b8a3703eba4df80c4f8ec4
| 6,625
|
py
|
Python
|
tensorflow_examples/lite/model_maker/core/task/custom_model.py
|
Abhi1code/FaceMaskDetection
|
689abda8243665c218193384aa655c11d555c4e9
|
[
"Apache-2.0"
] | 1
|
2022-01-08T16:02:18.000Z
|
2022-01-08T16:02:18.000Z
|
tensorflow_examples/lite/model_maker/core/task/custom_model.py
|
Abhi1code/MaskDetection
|
689abda8243665c218193384aa655c11d555c4e9
|
[
"Apache-2.0"
] | 4
|
2021-06-08T21:30:20.000Z
|
2022-03-12T00:28:38.000Z
|
tensorflow_examples/lite/model_maker/core/task/custom_model.py
|
Abhi1code/MaskDetection
|
689abda8243665c218193384aa655c11d555c4e9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base custom model that is already retained by data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import tempfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core import compat
DEFAULT_QUANTIZATION_STEPS = 2000
def get_representative_dataset_gen(dataset, num_steps):
def representative_dataset_gen():
"""Generates representative dataset for quantized."""
for image, _ in dataset.take(num_steps):
yield [image]
return representative_dataset_gen
class CustomModel(abc.ABC):
""""The abstract base class that represents a Tensorflow classification model."""
def __init__(self, model_spec, shuffle):
"""Initialize a instance with data, deploy mode and other related parameters.
Args:
model_spec: Specification for the model.
shuffle: Whether the data should be shuffled.
"""
self.model_spec = model_spec
self.shuffle = shuffle
self.model = None
def preprocess(self, sample_data, label):
"""Preprocess the data."""
# TODO(yuqili): remove this method once preprocess for image classifier is
# also moved to DataLoader part.
return sample_data, label
@abc.abstractmethod
def train(self, train_data, validation_data=None, **kwargs):
return
@abc.abstractmethod
def export(self, **kwargs):
return
def summary(self):
self.model.summary()
@abc.abstractmethod
def evaluate(self, data, **kwargs):
return
def _gen_dataset(self,
data,
batch_size=32,
is_training=True,
input_pipeline_context=None):
"""Generates training / validation dataset."""
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
ds = data.dataset
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
ds = ds.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
ds = ds.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
if self.shuffle:
ds = ds.shuffle(buffer_size=min(data.size, 100))
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _export_saved_model(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
Args:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the target
location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model to
Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in
TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: Optional `tf.saved_model.SaveOptions` object that specifies
options for saving to SavedModel.
"""
if filepath is None:
raise ValueError(
"SavedModel filepath couldn't be None when exporting to SavedModel.")
self.model.save(filepath, overwrite, include_optimizer, save_format,
signatures, options)
def _export_tflite(self,
tflite_filepath,
quantized=False,
quantization_steps=None,
representative_data=None):
"""Converts the retrained model to tflite format and saves it.
Args:
tflite_filepath: File path to save tflite model.
quantized: boolean, if True, save quantized model.
quantization_steps: Number of post-training quantization calibration steps
to run. Used only if `quantized` is True.
representative_data: Representative data used for post-training
quantization. Used only if `quantized` is True.
"""
if tflite_filepath is None:
raise ValueError(
"TFLite filepath couldn't be None when exporting to tflite.")
tf.compat.v1.logging.info('Exporting to tflite model in %s.',
tflite_filepath)
temp_dir = None
if compat.get_tf_behavior() == 1:
temp_dir = tempfile.TemporaryDirectory()
save_path = os.path.join(temp_dir.name, 'saved_model')
self.model.save(save_path, include_optimizer=False, save_format='tf')
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(save_path)
else:
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
if quantized:
if quantization_steps is None:
quantization_steps = DEFAULT_QUANTIZATION_STEPS
if representative_data is None:
raise ValueError(
'representative_data couldn\'t be None if model is quantized.')
ds = self._gen_dataset(
representative_data, batch_size=1, is_training=False)
converter.representative_dataset = tf.lite.RepresentativeDataset(
get_representative_dataset_gen(ds, quantization_steps))
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
tflite_model = converter.convert()
if temp_dir:
temp_dir.cleanup()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
| 36.401099
| 83
| 0.683321
| 5,422
| 0.818415
| 252
| 0.038038
| 221
| 0.033358
| 0
| 0
| 2,819
| 0.425509
|
65e60e80d09e1199bb195a86c8a1614239235c24
| 3,479
|
py
|
Python
|
src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py
|
isaachenrion/jets
|
59aeba81788d0741af448192d9dfb764fb97cf8d
|
[
"BSD-3-Clause"
] | 9
|
2017-10-09T17:01:52.000Z
|
2018-06-12T18:06:05.000Z
|
src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py
|
isaachenrion/jets
|
59aeba81788d0741af448192d9dfb764fb97cf8d
|
[
"BSD-3-Clause"
] | 31
|
2017-11-01T14:39:02.000Z
|
2018-04-18T15:34:24.000Z
|
src/architectures/nmp/stacked_nmp/stacked_fixed_nmp.py
|
isaachenrion/jets
|
59aeba81788d0741af448192d9dfb764fb97cf8d
|
[
"BSD-3-Clause"
] | 10
|
2017-10-17T19:23:14.000Z
|
2020-07-05T04:44:45.000Z
|
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from src.architectures.readout import READOUTS
from src.architectures.embedding import EMBEDDINGS
from .attention_pooling import POOLING_LAYERS
from ..message_passing import MP_LAYERS
from ..adjacency import construct_adjacency
from src.monitors import BatchMatrixMonitor
from src.monitors import Histogram
class AbstractStackedFixedNMP(nn.Module):
def __init__(
self,
scales=None,
features=None,
hidden=None,
iters=None,
readout=None,
pooling_layer=None,
pool_first=False,
mp_layer=None,
emb_init=None,
**kwargs
):
super().__init__()
emb_kwargs = {x: kwargs[x] for x in ['act', 'wn']}
self.embedding = EMBEDDINGS['n'](dim_in=features, dim_out=hidden, n_layers=int(emb_init), **emb_kwargs)
#self.embedding = EMBEDDINGS['n'](dim_in=features, dim_out=hidden, act=kwargs.get('act', None))
mp_kwargs = {x: kwargs[x] for x in ['act', 'wn', 'update', 'message']}
MPLayer = MP_LAYERS[mp_layer]
self.nmps = nn.ModuleList(
[nn.ModuleList(
[MPLayer(hidden=hidden,**mp_kwargs) for _ in range(iters)
]
)
for _ in scales
]
)
Pool = POOLING_LAYERS[pooling_layer]
self.attn_pools = nn.ModuleList([Pool(scales[i], hidden, **kwargs) for i in range(len(scales))])
Readout = READOUTS[readout]
self.readout = Readout(hidden, hidden)
self.pool_first = pool_first
def forward(self, *args, **kwargs):
raise NotImplementedError
class StackedFixedNMP(AbstractStackedFixedNMP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.adjs = self.set_adjacency_matrices(**kwargs)
def set_adjacency_matrices(self, scales=None, features=None, hidden=None,matrix=None, **kwargs):
m1 = construct_adjacency(
matrix=matrix,
dim_in=features,
index=str(1),
**kwargs
)
matrices = [construct_adjacency(
matrix=matrix,
dim_in=hidden,
index=str(i+2),
**kwargs
)
for i in range(len(scales) - 1)]
return nn.ModuleList([m1] + matrices)
def forward(self, jets, mask=None, **kwargs):
h = self.embedding(jets)
attns = None
#import ipdb; ipdb.set_trace()
for i, (nmp, pool, adj) in enumerate(zip(self.nmps, self.attn_pools, self.adjs)):
if i > 0:
#mask = None
dij = torch.bmm(attns, dij)
dij = torch.bmm(dij, attns.transpose(1,2))
#dij = adj(h, mask=None, **kwargs)
else:
dij = adj(jets, mask=mask, **kwargs)
if self.pool_first:
h, attns = pool(h, **kwargs)
#dij = adj(h, mask=mask)
for mp in nmp:
h = mp(h=h, mask=mask, dij=dij)
if not self.pool_first:
h, attns = pool(h, **kwargs)
out = self.readout(h)
return out
| 31.627273
| 111
| 0.540673
| 3,037
| 0.872952
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.066973
|
65e7b3e3e21dcd54310f063dfef34f4349a1bdff
| 533
|
py
|
Python
|
slixmpp/plugins/xep_0122/data_validation.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 86
|
2016-07-04T13:26:02.000Z
|
2022-02-19T10:26:21.000Z
|
slixmpp/plugins/xep_0122/data_validation.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 10
|
2016-09-30T18:55:41.000Z
|
2020-05-01T14:22:47.000Z
|
slixmpp/plugins/xep_0122/data_validation.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 45
|
2016-09-30T18:48:41.000Z
|
2022-03-18T21:39:33.000Z
|
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0004 import stanza
from slixmpp.plugins.xep_0004.stanza import FormField
from slixmpp.plugins.xep_0122.stanza import FormValidation
class XEP_0122(BasePlugin):
"""
XEP-0122: Data Forms
"""
name = 'xep_0122'
description = 'XEP-0122: Data Forms Validation'
dependencies = {'xep_0004'}
stanza = stanza
def plugin_init(self):
register_stanza_plugin(FormField, FormValidation)
| 26.65
| 58
| 0.754221
| 281
| 0.527205
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.166979
|
65e86aaff0a0cc5f5a6394f9f3fd03cd47bf0ab3
| 497
|
py
|
Python
|
q037.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
q037.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
q037.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import lib
N=1000000
sieve = lib.get_prime_sieve(N)
primes = lib.primes(N, sieve)
primes = primes[4:]
def is_truncatable(n):
num = n
c = 0
while num:
if not sieve[num]:
return False
num = int(num / 10)
c += 1
while c:
num = n % 10**c
if not sieve[num]:
return False
c -= 1
return True
result = []
for i in primes:
if is_truncatable(i):
result.append(i)
if len(result) == 11:
break
print(sum(result))
| 15.53125
| 30
| 0.581489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.044266
|
65e8c2b06a56311edf49d920f21df0bd1cab027c
| 708
|
py
|
Python
|
StationeersSaveFileDebugTools.py
|
lostinplace/StationeersSaveFileDebugTools
|
372a2fc86a9fc3af25044a56131271b577d4d97b
|
[
"MIT"
] | null | null | null |
StationeersSaveFileDebugTools.py
|
lostinplace/StationeersSaveFileDebugTools
|
372a2fc86a9fc3af25044a56131271b577d4d97b
|
[
"MIT"
] | 1
|
2021-01-10T21:12:41.000Z
|
2021-01-10T21:14:49.000Z
|
StationeersSaveFileDebugTools.py
|
lostinplace/StationeersSaveFileDebugTools
|
372a2fc86a9fc3af25044a56131271b577d4d97b
|
[
"MIT"
] | null | null | null |
import click
@click.group()
def cli():
pass
@cli.command("restore_atmo")
@click.argument('currentFile')
@click.argument('backupFile')
@click.argument('newFilePath')
def restore_atmo(current_file, backup_file, new_file_path):
from Utils.AtmoFileProcessing.RestoreAtmo import create_restored_world_file
create_restored_world_file(current_file, backup_file, new_file_path)
@cli.command("generate_start_condition")
@click.argument('world')
def generate_start_condition(world):
from Utils.StartConditionProcessing.StartConditionGenerator import convert_world_file_to_startconditions
out = convert_world_file_to_startconditions(world)
print(out)
if __name__ == '__main__':
cli()
| 26.222222
| 108
| 0.79661
| 0
| 0
| 0
| 0
| 648
| 0.915254
| 0
| 0
| 95
| 0.134181
|
65e9978ee2200931e2a3bf2760b84a179ae2b472
| 3,610
|
py
|
Python
|
RL/get_depthmaps.py
|
RECON-Labs-Inc/svox2
|
2946c1573fc4c8c8f378bf8154c29ba8d62af927
|
[
"BSD-2-Clause"
] | null | null | null |
RL/get_depthmaps.py
|
RECON-Labs-Inc/svox2
|
2946c1573fc4c8c8f378bf8154c29ba8d62af927
|
[
"BSD-2-Clause"
] | null | null | null |
RL/get_depthmaps.py
|
RECON-Labs-Inc/svox2
|
2946c1573fc4c8c8f378bf8154c29ba8d62af927
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
from pathlib import Path
from datetime import datetime
import argparse
import json
import torch
from torchvision.utils import save_image
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
import open3d as o3d
from svox2 import *
from pyvox.models import Vox, Color
from pyvox.writer import VoxWriter
from importlib import reload as reload
reload(svox2)
from svox2 import *
#TODO> modify this:
sys.path.append("/workspace/svox2/opt")
from util.dataset import datasets
from util import config_util
# Our nice tools
sys.path.append("/workspace/aseeo-research")
import RLResearch.utils.depth_utils as du
data_dir = "/workspace/datasets/dog"
exp_name = "std"
checkpoint_path = Path(data_dir)/"ckpt"/exp_name/"ckpt.npz"
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
device = "cpu"
# Load arguments from json
json_config_path = Path(data_dir)/"ckpt"/exp_name/"args.json"
parser = argparse.ArgumentParser()
with open(str(json_config_path.resolve()), 'rt') as f:
t_args = argparse.Namespace()
t_args.__dict__.update(json.load(f))
args = parser.parse_args(namespace=t_args)
# parser = argparse.ArgumentParser()
# args = parser.parse_args([])
dataset = datasets["nsvf"](
data_dir,
split="test",
device=device,
factor=1,
n_images=None,
**config_util.build_data_options(args))
grid = SparseGrid.load(str(checkpoint_path.resolve()))
# grid.requires_grad_(True)
config_util.setup_render_opts(grid.opt, args)
print('Render options', grid.opt)
## Single camera position
img_id = 0
c2w = dataset.c2w[img_id].to(device = device)
print("Rendering pose:", img_id)
print(c2w)
print("ndc")
print(dataset.ndc_coeffs)
cam = svox2.Camera(c2w,
dataset.intrins.get('fx', img_id),
dataset.intrins.get('fy', img_id),
dataset.intrins.get('cx', img_id),
dataset.intrins.get('cy', img_id),
width=dataset.get_image_size(img_id)[1],
height=dataset.get_image_size(img_id)[0],
ndc_coeffs=dataset.ndc_coeffs)
print("Cam is cuda", cam.is_cuda)
print("Using thres", args.log_depth_map_use_thresh)
# NOTE: no_grad enables the fast image-level rendering kernel for cuvol backend only
# other backends will manually generate rays per frame (slow)
with torch.no_grad():
depth_img = grid.volume_render_depth_image(cam,
args.log_depth_map_use_thresh if
args.log_depth_map_use_thresh else None
, batch_size=500)
## Export colored pointcloud to check in meshlab
depth_o3d = o3d.geometry.Image(depth_img.numpy())
intrinsics = o3d.camera.PinholeCameraIntrinsic(
cam.width,
cam.height,
dataset.intrins.get('fx', img_id),
dataset.intrins.get('fy', img_id),
dataset.intrins.get('cx', img_id),
dataset.intrins.get('cx', img_id)
)
pointcloud = o3d.geometry.PointCloud.create_from_depth_image(depth_o3d, intrinsics, stride = 8)
o3d.io.write_point_cloud("/workspace/data/pointcloud.ply", pointcloud)
a = 5
| 30.59322
| 95
| 0.645983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.19169
|
65e9a81560bd3bd5d8fd30d98016ea9c330e4eba
| 6,350
|
py
|
Python
|
monty/exts/info/global_source.py
|
onerandomusername/monty-python
|
fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7
|
[
"MIT"
] | 20
|
2021-12-31T10:17:20.000Z
|
2022-03-31T04:16:17.000Z
|
monty/exts/info/global_source.py
|
onerandomusername/monty-bot
|
b1c769e44b56bc45f37fc809064571d59c80db27
|
[
"MIT"
] | 1
|
2022-03-13T22:34:33.000Z
|
2022-03-13T22:34:52.000Z
|
monty/exts/info/global_source.py
|
onerandomusername/monty-bot
|
b1c769e44b56bc45f37fc809064571d59c80db27
|
[
"MIT"
] | 3
|
2022-01-02T15:21:46.000Z
|
2022-03-05T09:37:54.000Z
|
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Final, List
from urllib.parse import urldefrag
import disnake
from disnake.ext import commands, tasks
from monty.log import get_logger
from monty.utils.helpers import encode_github_link
from monty.utils.messages import DeleteButton
if TYPE_CHECKING:
from monty.bot import Monty
from monty.exts.eval import Snekbox
logger = get_logger(__name__)
CODE_FILE = os.path.dirname(__file__) + "/_global_source_snekcode.py"
class GlobalSource(commands.Cog):
"""Global source for python objects."""
def __init__(self, bot: Monty):
self.bot = bot
with open(CODE_FILE, "r") as f:
self.code: Final[str] = f.read()
def cog_unload(self) -> None:
"""Stop the running task on unload if it is running."""
self.refresh_code.stop()
@property
def snekbox(self) -> Snekbox:
"""Return the snekbox cog where the code is ran."""
if snekbox := self.bot.get_cog("Snekbox"):
return snekbox
raise RuntimeError("Snekbox is not loaded")
@commands.command(name="globalsource", aliases=("gs",), hidden=True)
async def globalsource(self, ctx: commands.Context, object: str) -> None:
"""Get the source of a python object."""
object = object.strip("`")
async with ctx.typing():
result = await self.snekbox.post_eval(self.code.replace("REPLACE_THIS_STRING_WITH_THE_OBJECT_NAME", object))
# exit codes:
# 0: success
# 1: indeterminate error
# 2: module not resolvable
# 3: attribute does not exist
# 4: invalid characters, not a valid object path
# 5: dynamically created object
# 6: is a builtin object, prints module name
# 7: invalid metadata
# 8: unsupported package (does not use github)
# 9: module found but cannot find class definition
text = result["stdout"].strip()
if self.refresh_code.is_running():
logger.debug(text)
returncode = result["returncode"]
link = ""
if returncode == 0:
link = text.rsplit("#" * 80)[-1].strip()
text = f"Source of `{object}`:\n<{link}>"
elif returncode == 1:
# generic exception occured
logger.exception(result["stdout"])
raise Exception("Snekbox returned an error.")
elif returncode == 2:
text = "The module you provided was not resolvable to an installed module."
elif returncode == 3:
text = "The attribute you are looking for does not exist. Check for misspellings and try again."
elif returncode == 4:
text = "The object path you provided is invalid."
elif returncode == 5:
text = "That object exists, but is dynamically created."
elif returncode == 6:
text = (
f"`{object}` is a builtin object/implemented in C. "
"It is not currently possible to get source of those objects."
)
elif returncode == 7:
text = "The metadata for the provided module is invalid."
elif returncode == 8:
text = "The provided module is not supported."
elif returncode == 9:
text = "The definition could not be found."
else:
text = "Something went wrong."
components: List[disnake.ui.action_row.Components] = []
if isinstance(ctx, commands.Context):
components.append(DeleteButton(ctx.author, initial_message=ctx.message))
else:
components.append(DeleteButton(ctx.author))
if link:
components.append(disnake.ui.Button(style=disnake.ButtonStyle.link, url=link, label="Go to Github"))
custom_id = encode_github_link(link)
if frag := (urldefrag(link)[1]):
frag = frag.replace("#", "").replace("L", "")
if "-" in frag:
num1, num2 = frag.split("-")
show_source = int(num2) - int(num1) <= 21
else:
show_source = True
if show_source:
components.append(
disnake.ui.Button(style=disnake.ButtonStyle.blurple, label="Expand", custom_id=custom_id)
)
await ctx.reply(
text,
allowed_mentions=disnake.AllowedMentions(everyone=False, users=False, roles=False, replied_user=True),
components=components,
)
@tasks.loop(seconds=1)
async def refresh_code(self, ctx: commands.Context, query: str) -> None:
"""Refresh the internal code every second."""
modified = os.stat(CODE_FILE).st_mtime
if modified <= self.last_modified:
return
self.last_modified = modified
with open(CODE_FILE, "r") as f:
self.code = f.read()
logger.debug("Updated global_source code")
try:
await self.globalsource(ctx, query)
except Exception as e:
self.bot.dispatch("command_error", ctx, e)
@refresh_code.before_loop
async def before_refresh_code(self) -> None:
"""Set the current last_modified stat to zero starting the task."""
self.last_modified = 0
@commands.command("globalsourcedebug", hidden=True)
@commands.is_owner()
async def globalsourcedebug(self, ctx: commands.Context, query: str = None) -> None:
"""Refresh the existing code and reinvoke it continually until the command is run again."""
if self.refresh_code.is_running():
if query:
self.refresh_code.restart(ctx, query)
await ctx.send("Restarted the global source debug task.")
else:
self.refresh_code.stop()
await ctx.send("Cancelled the internal global source debug task.")
return
if not query:
class FakeParam:
name = "query"
raise commands.MissingRequiredArgument(FakeParam)
await ctx.send("Starting the global source debug task.")
self.refresh_code.start(ctx, query)
def setup(bot: Monty) -> None:
"""Add the global source cog to the bot."""
bot.add_cog(GlobalSource(bot))
| 37.352941
| 120
| 0.601732
| 5,724
| 0.901417
| 0
| 0
| 5,340
| 0.840945
| 4,896
| 0.771024
| 1,775
| 0.279528
|
65edc49e48e5587c1006c65ecaf10e38136be5e1
| 18,608
|
py
|
Python
|
grr/core/grr_response_core/lib/rdfvalue_test.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 4,238
|
2015-01-01T15:34:50.000Z
|
2022-03-31T08:18:05.000Z
|
grr/core/grr_response_core/lib/rdfvalue_test.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 787
|
2015-01-02T21:34:24.000Z
|
2022-03-02T13:26:38.000Z
|
grr/core/grr_response_core/lib/rdfvalue_test.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 856
|
2015-01-02T02:50:11.000Z
|
2022-03-31T11:11:53.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"迎欢迎\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
class RDFValueTest(absltest.TestCase):
"""RDFValue tests."""
def testStr(self):
"""Test RDFValue.__str__."""
self.assertEqual(str(rdfvalue.RDFInteger(1)), "1")
self.assertEqual(str(rdfvalue.RDFString(long_string)), long_string)
# TODO(hanuszczak): Current implementation of `repr` for RDF values is broken
# and not in line with Python guidelines. For example, `repr` should be
# unambiguous whereas current implementation will trim long representations
# with `...`. Moreover, the representation for most types is questionable at
# best.
#
# The implementation should be fixed and proper tests should be written.
class RDFBytesTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"zażółć gęślą jaźń"
result = rdfvalue.RDFBytes.FromHumanReadable(string)
expected = rdfvalue.RDFBytes.FromSerializedBytes(string.encode("utf-8"))
self.assertEqual(result, expected)
class RDFStringTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"pchnąć w tę łódź jeża lub ośm skrzyń fig"
result = rdfvalue.RDFString.FromHumanReadable(string)
self.assertEqual(str(result), string)
def testEqualWithBytes(self):
self.assertEqual(rdfvalue.RDFString(u"foo"), b"foo")
self.assertNotEqual(rdfvalue.RDFString(u"foo"), b"\x80\x81\x82")
def testLessThanWithBytes(self):
self.assertLess(rdfvalue.RDFString(u"abc"), b"def")
self.assertGreater(rdfvalue.RDFString(u"xyz"), b"ghi")
self.assertLess(rdfvalue.RDFString(u"012"), b"\x80\x81\x81")
# TODO: Python on Windows ships with UCS-2 by default, which does
# not properly support unicode.
@unittest.skipIf(
sys.maxunicode <= 65535,
"Your Python installation does not properly support Unicode (likely: "
"Python with no UCS4 support on Windows.")
def testLenOfEmoji(self):
self.assertLen(rdfvalue.RDFString("🚀🚀"), 2)
class RDFIntegerTest(absltest.TestCase):
def testFromHumanReadable(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"42")
self.assertEqual(result, rdfvalue.RDFInteger(42))
def testFromHumanReadablePositive(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"+108")
self.assertEqual(result, rdfvalue.RDFInteger(108))
def testFromHumanReadableNegative(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"-1337")
self.assertEqual(result, rdfvalue.RDFInteger(-1337))
def testFromHumanReadableZero(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"0")
self.assertEqual(result, rdfvalue.RDFInteger(0))
def testFromHumanReadableRaisesOnNonInteger(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12.3")
def testFromHumanReadableRaisesOnNonDecimal(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12A")
class RDFDateTimeTest(absltest.TestCase):
def testLerpMiddle(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = start_time + rdfvalue.Duration.From(10, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(5, rdfvalue.DAYS))
def testLerpZero(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, start_time)
def testLerpOne(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
1.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, end_time)
def testLerpQuarter(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = start_time + rdfvalue.Duration.From(4, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.25, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesTypeErrorIfTimesAreNotRDFDatetime(self):
now = rdfvalue.RDFDatetime.Now()
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(0.0, start_time=10, end_time=now)
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(
0.0,
start_time=now,
end_time=rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesValueErrorIfProgressIsNotNormalized(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2011-01-01")
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(1.5, start_time=start_time, end_time=end_time)
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(-0.5, start_time=start_time, end_time=end_time)
def testFloorToMinutes(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(60, rdfvalue.SECONDS)), expected)
def testFloorToHours(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.HOURS)), expected)
def testFloorToDays(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.DAYS)), expected)
def testFloorExact(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
self.assertEqual(dt.Floor(rdfvalue.Duration.From(1, rdfvalue.SECONDS)), dt)
class RDFDatetimeSecondsTest(absltest.TestCase):
def testFromDatetime_withMicroSeconds(self):
dt_with_micros = datetime.datetime(2000, 1, 1, microsecond=5000)
dt = datetime.datetime(2000, 1, 1)
self.assertEqual(
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt_with_micros),
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt))
def testBug122716179(self):
d = rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch(1)
self.assertEqual(d.AsMicrosecondsSinceEpoch(), 1000000)
diff = rdfvalue.RDFDatetimeSeconds(10) - rdfvalue.Duration("3s")
self.assertEqual(diff.AsMicrosecondsSinceEpoch(), 7000000)
class DurationSecondsTest(absltest.TestCase):
def testPublicAttributes(self):
duration = rdfvalue.DurationSeconds.FromHumanReadable("1h")
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3600)
self.assertEqual(duration.ToInt(rdfvalue.MILLISECONDS), 3600 * 1000)
self.assertEqual(duration.microseconds, 3600 * 1000 * 1000)
def testFromDays(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(2, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("2d"))
self.assertEqual(
rdfvalue.DurationSeconds.From(31, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("31d"))
def testFromHours(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(48, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("48h"))
self.assertEqual(
rdfvalue.DurationSeconds.From(24, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("24h"))
def testFromSeconds(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(1337,
rdfvalue.SECONDS).ToInt(rdfvalue.SECONDS),
1337)
def testFromMicroseconds(self):
duration = rdfvalue.DurationSeconds.From(3000000, rdfvalue.MICROSECONDS)
self.assertEqual(duration.microseconds, 3000000)
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3)
def testFloatConstructorRaises(self):
with self.assertRaises(TypeError):
rdfvalue.DurationSeconds(3.14)
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.DurationSeconds.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.DurationSeconds.From(1, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.DurationSeconds.From(2, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.DurationSeconds.From(999, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.DurationSeconds.From(1000,
rdfvalue.SECONDS).SerializeToBytes())
def testFromWireFormat(self):
for i in [0, 7, 1337]:
val = rdfvalue.DurationSeconds.FromWireFormat(i)
self.assertEqual(i, val.ToInt(rdfvalue.SECONDS))
val2 = rdfvalue.DurationSeconds.FromWireFormat(
val.SerializeToWireFormat())
self.assertEqual(val, val2)
MAX_UINT64 = 18446744073709551615
class DurationTest(absltest.TestCase):
def testInitializationFromMicroseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(i, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} us".format(i)))
self.assertEqual(val, rdfvalue.Duration(i))
def testInitializationFromMilliseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000]:
val = rdfvalue.Duration.From(i, rdfvalue.MILLISECONDS)
self.assertEqual(i * 1000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} ms".format(i)))
def testInitializationFromSeconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000000]:
val = rdfvalue.Duration.From(i, rdfvalue.SECONDS)
self.assertEqual(i * 1000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} s".format(i)))
def testInitializationFromMinutes(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 60000000]:
val = rdfvalue.Duration.From(i, rdfvalue.MINUTES)
self.assertEqual(i * 60000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} m".format(i)))
def testInitializationFromHours(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 3600000000]:
val = rdfvalue.Duration.From(i, rdfvalue.HOURS)
self.assertEqual(i * 3600000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} h".format(i)))
def testInitializationFromDays(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 86400000000]:
val = rdfvalue.Duration.From(i, rdfvalue.DAYS)
self.assertEqual(i * 86400000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} d".format(i)))
def testInitializationFromWeeks(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 604800000000]:
val = rdfvalue.Duration.From(i, rdfvalue.WEEKS)
self.assertEqual(i * 604800000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} w".format(i)))
def testConversionToInt(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(val.ToInt(rdfvalue.MICROSECONDS), i)
self.assertEqual(val.ToInt(rdfvalue.MILLISECONDS), i // 1000)
self.assertEqual(val.ToInt(rdfvalue.SECONDS), i // (1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.MINUTES), i // (60 * 1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.HOURS), i // (60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.DAYS), i // (24 * 60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.WEEKS), i // (7 * 24 * 60 * 60 * 1000 * 1000))
def testConversionToFractional(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MICROSECONDS), i)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MILLISECONDS), i / 1000)
self.assertAlmostEqual(
val.ToFractional(rdfvalue.SECONDS), i / (1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.MINUTES), i / (60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.HOURS), i / (60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.DAYS), i / (24 * 60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.WEEKS),
i / (7 * 24 * 60 * 60 * 1000 * 1000))
def testStringDeserialization(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(
rdfvalue.Duration.FromSerializedBytes(val.SerializeToBytes()), val)
def testHumanReadableStringSerialization(self):
self.assertEqual("0 us", str(rdfvalue.Duration.From(0, rdfvalue.WEEKS)))
self.assertEqual("1 us",
str(rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS)))
self.assertEqual("2 us",
str(rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS)))
self.assertEqual("999 us",
str(rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1, rdfvalue.MILLISECONDS)))
self.assertEqual(
"{} us".format(MAX_UINT64),
str(rdfvalue.Duration.From(MAX_UINT64, rdfvalue.MICROSECONDS)))
self.assertEqual("3 s", str(rdfvalue.Duration.From(3, rdfvalue.SECONDS)))
self.assertEqual("3 m", str(rdfvalue.Duration.From(3, rdfvalue.MINUTES)))
self.assertEqual("3 h", str(rdfvalue.Duration.From(3, rdfvalue.HOURS)))
self.assertEqual("3 d", str(rdfvalue.Duration.From(3, rdfvalue.DAYS)))
self.assertEqual("3 w", str(rdfvalue.Duration.From(21, rdfvalue.DAYS)))
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.Duration.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
str(MAX_UINT64).encode("utf-8"),
rdfvalue.Duration.From(MAX_UINT64,
rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"3000000",
rdfvalue.Duration.From(3, rdfvalue.SECONDS).SerializeToBytes())
def testAdditionOfDurationsIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
self.assertEqual(
rdfvalue.Duration(a) + rdfvalue.Duration(b),
rdfvalue.Duration(a + b))
def testSubtractionOfDurationsIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64]:
self.assertEqual(
rdfvalue.Duration(a) - rdfvalue.Duration(min(a, b)),
rdfvalue.Duration(a - min(a, b)))
def testFromWireFormat(self):
for i in [0, 7, 1337, MAX_UINT64]:
val = rdfvalue.Duration.FromWireFormat(i)
self.assertEqual(i, val.microseconds)
def testSubtractionFromDateTimeIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(min(a, b))
result = lhs - rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a - min(a, b))
def testAdditionToDateTimeIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(b)
result = lhs + rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a + b)
def testComparisonIsEqualToIntegerComparison(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
dur_a = rdfvalue.Duration(a)
dur_b = rdfvalue.Duration(b)
if a > b:
self.assertGreater(dur_a, dur_b)
if a >= b:
self.assertGreaterEqual(dur_a, dur_b)
if a == b:
self.assertEqual(dur_a, dur_b)
if a <= b:
self.assertLessEqual(dur_a, dur_b)
if a < b:
self.assertLess(dur_a, dur_b)
if a != b:
self.assertNotEqual(dur_a, dur_b)
class DocTest(test_lib.DocTest):
module = rdfvalue
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 39.591489
| 80
| 0.690294
| 17,527
| 0.940391
| 0
| 0
| 256
| 0.013735
| 0
| 0
| 2,032
| 0.109025
|
028ccbb703922e522d9de79fd431e21d9aeac192
| 909
|
py
|
Python
|
src/main/python/server/test.py
|
areichmann-tgm/client_travis
|
c00163e6d7630ff4efaf28605b134e356e02a9d1
|
[
"MIT"
] | null | null | null |
src/main/python/server/test.py
|
areichmann-tgm/client_travis
|
c00163e6d7630ff4efaf28605b134e356e02a9d1
|
[
"MIT"
] | null | null | null |
src/main/python/server/test.py
|
areichmann-tgm/client_travis
|
c00163e6d7630ff4efaf28605b134e356e02a9d1
|
[
"MIT"
] | null | null | null |
import pytest
from server import rest
@pytest.fixture
def client():
rest.app.testing = True
#rest.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///server.MyStudents'
client = rest.app.test_client()
yield client
def test_get(client):
res = client.get('/schueler')
assert res.status_code == 200
def test_get(client):
res = client.get('/schuelerA')
assert res.status_code == 200
def test_delete(client):
res = client.delete('/schuelerA',data={'schueler_id':'1000'})
assert res.status_code == 200
def test_update(client):
"""res = client.put('/schuelerA',data={'schueler_id':'1000','usernameX':'Adrian','emailX':'adrian@new.at','picture':'-'})"""
assert True
def test_insert(client):
res = client.put('/schuelerA',data={'schueler_id': '10', 'usernameX': 'Nicht_Adrian', 'emailX': 'adrian@new.at', 'picture': '-'})
assert res.status_code == 200
| 25.971429
| 133
| 0.667767
| 0
| 0
| 175
| 0.192519
| 191
| 0.210121
| 0
| 0
| 342
| 0.376238
|
028dbb898943de5745b9b0587b4aecb405f08834
| 3,143
|
py
|
Python
|
helper-scripts/instrnocombine.py
|
felixaestheticus/realcode-validation
|
c599cc41797fc074bd2b71d205d6b2b904e1d64b
|
[
"BSD-3-Clause"
] | null | null | null |
helper-scripts/instrnocombine.py
|
felixaestheticus/realcode-validation
|
c599cc41797fc074bd2b71d205d6b2b904e1d64b
|
[
"BSD-3-Clause"
] | null | null | null |
helper-scripts/instrnocombine.py
|
felixaestheticus/realcode-validation
|
c599cc41797fc074bd2b71d205d6b2b904e1d64b
|
[
"BSD-3-Clause"
] | null | null | null |
#combine.py, combines available dictionaries into one, and generates csv file for latex
#f = open('dict_random')
mem_ops = 'MOVS','MOV','LDR','LDRH','LDRB','LDRSH','LDRSB','LDM','STR','STRH','STRB','STM'
ari_ops = 'ADDS','ADD','ADC','ADCS','ADR','SUBS','SUB','SBCS','RSBS','MULS','MUL','RSB','SBC'
com_ops = 'CMP','CMN'
log_ops = 'ANDS','EORS','ORRS','BICS','MVNS','TST','EOR','MVN','ORR'
sys_ops = 'PUSH','POP','SVC','CPSID','CPSIE','MRS','MSR','BKPT','SEV','WFE','WFI','YIELD','NOP','ISB','DMB','DSB'
bra_ops = 'B','BL','BLX','BX','BCC','BCS','BEQ','BIC','BLS','BNE','BPL','BGE','BGT','BHI','BLE','BLT','BMI','BVC','BVS'
man_ops = 'SXTH','SXTB','UXTH','UXTB','REV','REV16','REVSH','LSLS','LSRS','RORS','ASR','ASRS','LSL','LSR','ROR'
import os,sys
path = '.'
#files = []
#for i in os.listdir(path):
# if os.path.isfile(os.path.join(path,i)) and i.startswith('typelist') and not i.endswith('~'):
# files.append(i)
files = sys.argv[1:]
print(files)
dic_all = {}
print(dic_all)
for f in files:
f = open(f)
lines = f.readlines()
dic = {}
line = lines[0]
if(line!= ''):
dic = eval(line)
for key in dic:
if(key not in dic_all):
dic_all[key] = str(dic[key])
else:
dic_all[key] = str(dic_all[key]) + "," + str(dic[key])
for key in dic_all:
dic_all[key] = ''
for f in files:
f = open(f)
lines = f.readlines()
dic = {}
line = lines[0]
if(line!= ''):
dic = eval(line)
for key in dic:
#if(dic_all[key] != ''):
dic_all[key] = str(dic_all[key]) + str(dic[key])
for key in dic_all:
if(key not in dic):
dic_all[key] = str(dic_all[key]) +"0"
dic_all[key] = str(dic_all[key]) +","
print(dic_all)
ou = open('dict_nocomb','w')
ou.write(str(dic_all))
csv1 = open("tablenocomb1.csv","w")
csv2 = open("tablenocomb2.csv","w")
csv1.write("Instr. Name, Occur.(Random),Occur.(Real),Type\n")
csv2.write("Instr. Name, Occur.(Random),Occur.(Real),Type\n")
keylist = [key for key in dic_all]
keylist.sort()
nonempty = 0.0
nonemptyr = 0.0
for key in dic_all:
h= str(key)
if(h in mem_ops):
#print("1\n")
dic_all[key] = dic_all[key]+'M'
elif(h in ari_ops):
#print("2\n")
dic_all[key] = dic_all[key]+'A'
elif(h in com_ops):
#print("3\n")
dic_all[key] = dic_all[key]+'C'
elif(h in log_ops):
#print("4\n")
dic_all[key] = dic_all[key]+'L'
elif(h in sys_ops):
#print("5\n")
dic_all[key] = dic_all[key]+'S'
elif(h in bra_ops):
#print("6\n")
dic_all[key] = dic_all[key]+'B'
elif(h in man_ops):
#print("7\n")
dic_all[key] = dic_all[key]+'R'
else:
#print("no cat, sorry\n")
dic_all[key] = dic_all[key]+'O'
#for key in dic_all:
for i in range(len(keylist)):
key = keylist[i]
if(dic_all[key].split(",")[1]!='0'):
nonempty = nonempty+1
#print(str(i)+",")
if(dic_all[key].split(",")[0]!='0'):
nonemptyr = nonemptyr+1
if(i < len(keylist)/2):
csv1.write(str(key) + ',' + str(dic_all[key])+'\n')
else:
csv2.write(str(key) + ',' + str(dic_all[key])+'\n')
print( "Coverage rate -real:" + str(nonempty/len(keylist)))
print( "Coverage rate - random:" + str(nonemptyr/len(keylist)))
csv1.close()
csv2.close()
#print( "Success rate:" + str((nonempty/len(keylist)))
| 25.144
| 119
| 0.602291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,240
| 0.394528
|
028e69371236efe13bb824f07b81ce319b9462f0
| 4,102
|
py
|
Python
|
ip_client.py
|
HuiiBuh/checkers-master
|
112eb1df1d8b0d691edd82978945ea5527b75fab
|
[
"MIT"
] | 1
|
2021-09-04T05:34:51.000Z
|
2021-09-04T05:34:51.000Z
|
ip_client.py
|
HuiiBuh/checkers-master
|
112eb1df1d8b0d691edd82978945ea5527b75fab
|
[
"MIT"
] | null | null | null |
ip_client.py
|
HuiiBuh/checkers-master
|
112eb1df1d8b0d691edd82978945ea5527b75fab
|
[
"MIT"
] | null | null | null |
#from src.piclient import PiClient
from src.streamclient import StreamClient
import math
options = {
"boarddetector": {
"prepare": {
"resize": [512, 512]
},
"corners": {
"maxcorners": 500,
"qualitylevel": 0.01,
"mindistance": 20
},
"lines": {
"harris": {
"rho": 1,
"theta": 0.01,
"threshold": 350
},
"filter": {
"rho": 20,
"theta": 0.15
}
},
"similarpoints": {
"range": 9
},
"correctlines": {
"amount": 9
}
},
"figuredetector": {
"circles": {
"rho": 1,
"mindist": 40,
"param1": 150,
"param2": 15,
"minradius": 0,
"maxradius": 30
},
"colors": {
"white": {
"normal": {
"lower": [160, 0, 170],
"upper": [180, 120, 255]
},
"king": {
"lower": [115, 0, 0],
"upper": [160, 255, 255]
},
},
"black": {
"normal": {
"lower": [160, 0, 0],
"upper": [180, 255, 150]
},
"king": {
"lower": [160, 120, 160],
"upper": [180, 255, 255]
}
}
}
}
}
# Setup the PiClient
client = StreamClient(
url='https://boarddetection.nebula.blue',
token='uhe3xXfev3H3ehjkUswY9QWpiqzhYvH8y5YmpSvMRDoy8yFvH5LXnbY5phJ5tu88',
stream_url="http://127.0.0.1:8000/stream.mjpg"
)
def ping_ip():
return client.ping()
def print_pieces():
pieces, _ = client.detect(options=options)
for piece in pieces:
print(piece)
def ip_get_board():
pieces, _ = client.detect(options=options)
return pieces
def check_configuration():
pieces, _ = client.detect(options=options)
if not pieces:
return -1
pos = -1
# TODO only one
for piece in pieces:
if 13 <= piece["position"] <= 20:
pos = piece["position"]
if pos == -1:
return -1
pos = pos - 12
if pos > 4:
pos = (pos - 4) * 2
else:
pos = pos * 2 - 1
return int(pos)
def _found_piece(problems, piece):
for problem in problems:
if problem["position"] == piece["position"]:
return problems.remove(problem)
def check_init_chess_board():
pieces, _ = client.detect(options=options)
if not pieces:
raise RuntimeError("API ERROR could not fetch current chess board")
problems = [{"position": i, "player": 1, "king": False, "error": "missing"} for i in range(1, 13)] +\
[{"position": i, "player": 2, "king": False, "error": "missing"} for i in range(21, 33)]
for piece in pieces:
# Player 1
if piece["position"] <= 12:
_found_piece(problems, piece)
if piece["player"] != 1 and piece["king"]:
piece["error"] = "wrong player and piece is king"
elif piece["player"] != 1:
piece["error"] = "wrong player"
elif piece["king"]:
piece["error"] = "piece is king"
if "error" in piece.keys():
problems.append(piece)
# Player 2
elif piece["position"] >= 21:
_found_piece(problems, piece)
if piece["player"] != 2 and piece["king"]:
piece["error"] = "wrong player and piece is king"
elif piece["player"] != 2:
piece["error"] = "wrong player"
elif piece["king"]:
piece["error"] = "piece is king"
if "error" in piece.keys():
problems.append(piece)
# Piece out of init pos
else:
piece["error"] = "incorrect position"
problems.append(piece)
return problems
| 25.165644
| 105
| 0.455144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,062
| 0.258898
|
028e7466100505ca2d031073edf99db35fd3966b
| 2,773
|
py
|
Python
|
registration_eval/different_days/dd_compute_dense_transformation_error.py
|
mirestrepo/voxels-at-lems
|
df47d031653d2ad877a97b3c1ea574b924b7d4c2
|
[
"BSD-2-Clause"
] | 2
|
2015-09-18T00:17:16.000Z
|
2019-02-06T04:41:29.000Z
|
registration_eval/different_days/dd_compute_dense_transformation_error.py
|
mirestrepo/voxels-at-lems
|
df47d031653d2ad877a97b3c1ea574b924b7d4c2
|
[
"BSD-2-Clause"
] | null | null | null |
registration_eval/different_days/dd_compute_dense_transformation_error.py
|
mirestrepo/voxels-at-lems
|
df47d031653d2ad877a97b3c1ea574b924b7d4c2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground trutrransformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2]
This script was intended to use with Vishal's results
"""
import os
import sys
import logging
import argparse
from vpcl_adaptor import *
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d
if __name__ == '__main__':
# fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 1.39523511977e-06 0.802221070301 2.98789826592
# fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 5.31970689721e-08 0.808909241082 4.83449482984
# fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/BH_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 2.57980939389e-07 0.763324882652 4.79257669203
| 48.649123
| 200
| 0.695636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,397
| 0.864407
|
028f14718283c8b1eabad98e17db6f0ca1dee6eb
| 16,301
|
py
|
Python
|
migrations/versions/be21086640ad_country_added.py
|
anjinkristou/assistor
|
02d9b826b9d8844d475c11c33db48cf278282183
|
[
"MIT"
] | 1
|
2022-01-29T14:00:32.000Z
|
2022-01-29T14:00:32.000Z
|
migrations/versions/be21086640ad_country_added.py
|
anjinkristou/assistor
|
02d9b826b9d8844d475c11c33db48cf278282183
|
[
"MIT"
] | null | null | null |
migrations/versions/be21086640ad_country_added.py
|
anjinkristou/assistor
|
02d9b826b9d8844d475c11c33db48cf278282183
|
[
"MIT"
] | null | null | null |
"""Country added
Revision ID: be21086640ad
Revises: 153f720f966f
Create Date: 2021-11-09 15:34:04.306218
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'be21086640ad'
down_revision = '153f720f966f'
branch_labels = None
depends_on = None
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('countries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('iso', sa.String(length=2), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('nicename', sa.String(length=80), nullable=True),
sa.Column('iso3', sa.String(length=3), nullable=True),
sa.Column('numcode', sa.Integer(), nullable=True),
sa.Column('phonecode', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('companies', schema=None, naming_convention=naming_convention) as batch_op:
batch_op.add_column(sa.Column('country_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key(batch_op.f('fk_company_country_id_country'), 'countries', ['country_id'], ['id'])
# ### end Alembic commands ###
op.execute("""
INSERT INTO `countries` (`id`, `iso`, `name`, `nicename`, `iso3`, `numcode`, `phonecode`) VALUES
(1, 'AF', 'AFGHANISTAN', 'Afghanistan', 'AFG', 4, 93),
(2, 'AL', 'ALBANIA', 'Albania', 'ALB', 8, 355),
(3, 'DZ', 'ALGERIA', 'Algeria', 'DZA', 12, 213),
(4, 'AS', 'AMERICAN SAMOA', 'American Samoa', 'ASM', 16, 1684),
(5, 'AD', 'ANDORRA', 'Andorra', 'AND', 20, 376),
(6, 'AO', 'ANGOLA', 'Angola', 'AGO', 24, 244),
(7, 'AI', 'ANGUILLA', 'Anguilla', 'AIA', 660, 1264),
(8, 'AQ', 'ANTARCTICA', 'Antarctica', NULL, NULL, 0),
(9, 'AG', 'ANTIGUA AND BARBUDA', 'Antigua and Barbuda', 'ATG', 28, 1268),
(10, 'AR', 'ARGENTINA', 'Argentina', 'ARG', 32, 54),
(11, 'AM', 'ARMENIA', 'Armenia', 'ARM', 51, 374),
(12, 'AW', 'ARUBA', 'Aruba', 'ABW', 533, 297),
(13, 'AU', 'AUSTRALIA', 'Australia', 'AUS', 36, 61),
(14, 'AT', 'AUSTRIA', 'Austria', 'AUT', 40, 43),
(15, 'AZ', 'AZERBAIJAN', 'Azerbaijan', 'AZE', 31, 994),
(16, 'BS', 'BAHAMAS', 'Bahamas', 'BHS', 44, 1242),
(17, 'BH', 'BAHRAIN', 'Bahrain', 'BHR', 48, 973),
(18, 'BD', 'BANGLADESH', 'Bangladesh', 'BGD', 50, 880),
(19, 'BB', 'BARBADOS', 'Barbados', 'BRB', 52, 1246),
(20, 'BY', 'BELARUS', 'Belarus', 'BLR', 112, 375),
(21, 'BE', 'BELGIUM', 'Belgium', 'BEL', 56, 32),
(22, 'BZ', 'BELIZE', 'Belize', 'BLZ', 84, 501),
(23, 'BJ', 'BENIN', 'Benin', 'BEN', 204, 229),
(24, 'BM', 'BERMUDA', 'Bermuda', 'BMU', 60, 1441),
(25, 'BT', 'BHUTAN', 'Bhutan', 'BTN', 64, 975),
(26, 'BO', 'BOLIVIA', 'Bolivia', 'BOL', 68, 591),
(27, 'BA', 'BOSNIA AND HERZEGOVINA', 'Bosnia and Herzegovina', 'BIH', 70, 387),
(28, 'BW', 'BOTSWANA', 'Botswana', 'BWA', 72, 267),
(29, 'BV', 'BOUVET ISLAND', 'Bouvet Island', NULL, NULL, 0),
(30, 'BR', 'BRAZIL', 'Brazil', 'BRA', 76, 55),
(31, 'IO', 'BRITISH INDIAN OCEAN TERRITORY', 'British Indian Ocean Territory', NULL, NULL, 246),
(32, 'BN', 'BRUNEI DARUSSALAM', 'Brunei Darussalam', 'BRN', 96, 673),
(33, 'BG', 'BULGARIA', 'Bulgaria', 'BGR', 100, 359),
(34, 'BF', 'BURKINA FASO', 'Burkina Faso', 'BFA', 854, 226),
(35, 'BI', 'BURUNDI', 'Burundi', 'BDI', 108, 257),
(36, 'KH', 'CAMBODIA', 'Cambodia', 'KHM', 116, 855),
(37, 'CM', 'CAMEROON', 'Cameroon', 'CMR', 120, 237),
(38, 'CA', 'CANADA', 'Canada', 'CAN', 124, 1),
(39, 'CV', 'CAPE VERDE', 'Cape Verde', 'CPV', 132, 238),
(40, 'KY', 'CAYMAN ISLANDS', 'Cayman Islands', 'CYM', 136, 1345),
(41, 'CF', 'CENTRAL AFRICAN REPUBLIC', 'Central African Republic', 'CAF', 140, 236),
(42, 'TD', 'CHAD', 'Chad', 'TCD', 148, 235),
(43, 'CL', 'CHILE', 'Chile', 'CHL', 152, 56),
(44, 'CN', 'CHINA', 'China', 'CHN', 156, 86),
(45, 'CX', 'CHRISTMAS ISLAND', 'Christmas Island', NULL, NULL, 61),
(46, 'CC', 'COCOS (KEELING) ISLANDS', 'Cocos (Keeling) Islands', NULL, NULL, 672),
(47, 'CO', 'COLOMBIA', 'Colombia', 'COL', 170, 57),
(48, 'KM', 'COMOROS', 'Comoros', 'COM', 174, 269),
(49, 'CG', 'CONGO', 'Congo', 'COG', 178, 242),
(50, 'CD', 'CONGO, THE DEMOCRATIC REPUBLIC OF THE', 'Congo, the Democratic Republic of the', 'COD', 180, 242),
(51, 'CK', 'COOK ISLANDS', 'Cook Islands', 'COK', 184, 682),
(52, 'CR', 'COSTA RICA', 'Costa Rica', 'CRI', 188, 506),
(53, 'CI', 'COTE D''IVOIRE', 'Cote D''Ivoire', 'CIV', 384, 225),
(54, 'HR', 'CROATIA', 'Croatia', 'HRV', 191, 385),
(55, 'CU', 'CUBA', 'Cuba', 'CUB', 192, 53),
(56, 'CY', 'CYPRUS', 'Cyprus', 'CYP', 196, 357),
(57, 'CZ', 'CZECH REPUBLIC', 'Czech Republic', 'CZE', 203, 420),
(58, 'DK', 'DENMARK', 'Denmark', 'DNK', 208, 45),
(59, 'DJ', 'DJIBOUTI', 'Djibouti', 'DJI', 262, 253),
(60, 'DM', 'DOMINICA', 'Dominica', 'DMA', 212, 1767),
(61, 'DO', 'DOMINICAN REPUBLIC', 'Dominican Republic', 'DOM', 214, 1809),
(62, 'EC', 'ECUADOR', 'Ecuador', 'ECU', 218, 593),
(63, 'EG', 'EGYPT', 'Egypt', 'EGY', 818, 20),
(64, 'SV', 'EL SALVADOR', 'El Salvador', 'SLV', 222, 503),
(65, 'GQ', 'EQUATORIAL GUINEA', 'Equatorial Guinea', 'GNQ', 226, 240),
(66, 'ER', 'ERITREA', 'Eritrea', 'ERI', 232, 291),
(67, 'EE', 'ESTONIA', 'Estonia', 'EST', 233, 372),
(68, 'ET', 'ETHIOPIA', 'Ethiopia', 'ETH', 231, 251),
(69, 'FK', 'FALKLAND ISLANDS (MALVINAS)', 'Falkland Islands (Malvinas)', 'FLK', 238, 500),
(70, 'FO', 'FAROE ISLANDS', 'Faroe Islands', 'FRO', 234, 298),
(71, 'FJ', 'FIJI', 'Fiji', 'FJI', 242, 679),
(72, 'FI', 'FINLAND', 'Finland', 'FIN', 246, 358),
(73, 'FR', 'FRANCE', 'France', 'FRA', 250, 33),
(74, 'GF', 'FRENCH GUIANA', 'French Guiana', 'GUF', 254, 594),
(75, 'PF', 'FRENCH POLYNESIA', 'French Polynesia', 'PYF', 258, 689),
(76, 'TF', 'FRENCH SOUTHERN TERRITORIES', 'French Southern Territories', NULL, NULL, 0),
(77, 'GA', 'GABON', 'Gabon', 'GAB', 266, 241),
(78, 'GM', 'GAMBIA', 'Gambia', 'GMB', 270, 220),
(79, 'GE', 'GEORGIA', 'Georgia', 'GEO', 268, 995),
(80, 'DE', 'GERMANY', 'Germany', 'DEU', 276, 49),
(81, 'GH', 'GHANA', 'Ghana', 'GHA', 288, 233),
(82, 'GI', 'GIBRALTAR', 'Gibraltar', 'GIB', 292, 350),
(83, 'GR', 'GREECE', 'Greece', 'GRC', 300, 30),
(84, 'GL', 'GREENLAND', 'Greenland', 'GRL', 304, 299),
(85, 'GD', 'GRENADA', 'Grenada', 'GRD', 308, 1473),
(86, 'GP', 'GUADELOUPE', 'Guadeloupe', 'GLP', 312, 590),
(87, 'GU', 'GUAM', 'Guam', 'GUM', 316, 1671),
(88, 'GT', 'GUATEMALA', 'Guatemala', 'GTM', 320, 502),
(89, 'GN', 'GUINEA', 'Guinea', 'GIN', 324, 224),
(90, 'GW', 'GUINEA-BISSAU', 'Guinea-Bissau', 'GNB', 624, 245),
(91, 'GY', 'GUYANA', 'Guyana', 'GUY', 328, 592),
(92, 'HT', 'HAITI', 'Haiti', 'HTI', 332, 509),
(93, 'HM', 'HEARD ISLAND AND MCDONALD ISLANDS', 'Heard Island and Mcdonald Islands', NULL, NULL, 0),
(94, 'VA', 'HOLY SEE (VATICAN CITY STATE)', 'Holy See (Vatican City State)', 'VAT', 336, 39),
(95, 'HN', 'HONDURAS', 'Honduras', 'HND', 340, 504),
(96, 'HK', 'HONG KONG', 'Hong Kong', 'HKG', 344, 852),
(97, 'HU', 'HUNGARY', 'Hungary', 'HUN', 348, 36),
(98, 'IS', 'ICELAND', 'Iceland', 'ISL', 352, 354),
(99, 'IN', 'INDIA', 'India', 'IND', 356, 91),
(100, 'ID', 'INDONESIA', 'Indonesia', 'IDN', 360, 62),
(101, 'IR', 'IRAN, ISLAMIC REPUBLIC OF', 'Iran, Islamic Republic of', 'IRN', 364, 98),
(102, 'IQ', 'IRAQ', 'Iraq', 'IRQ', 368, 964),
(103, 'IE', 'IRELAND', 'Ireland', 'IRL', 372, 353),
(104, 'IL', 'ISRAEL', 'Israel', 'ISR', 376, 972),
(105, 'IT', 'ITALY', 'Italy', 'ITA', 380, 39),
(106, 'JM', 'JAMAICA', 'Jamaica', 'JAM', 388, 1876),
(107, 'JP', 'JAPAN', 'Japan', 'JPN', 392, 81),
(108, 'JO', 'JORDAN', 'Jordan', 'JOR', 400, 962),
(109, 'KZ', 'KAZAKHSTAN', 'Kazakhstan', 'KAZ', 398, 7),
(110, 'KE', 'KENYA', 'Kenya', 'KEN', 404, 254),
(111, 'KI', 'KIRIBATI', 'Kiribati', 'KIR', 296, 686),
(112, 'KP', 'KOREA, DEMOCRATIC PEOPLE''S REPUBLIC OF', 'Korea, Democratic People''s Republic of', 'PRK', 408, 850),
(113, 'KR', 'KOREA, REPUBLIC OF', 'Korea, Republic of', 'KOR', 410, 82),
(114, 'KW', 'KUWAIT', 'Kuwait', 'KWT', 414, 965),
(115, 'KG', 'KYRGYZSTAN', 'Kyrgyzstan', 'KGZ', 417, 996),
(116, 'LA', 'LAO PEOPLE''S DEMOCRATIC REPUBLIC', 'Lao People''s Democratic Republic', 'LAO', 418, 856),
(117, 'LV', 'LATVIA', 'Latvia', 'LVA', 428, 371),
(118, 'LB', 'LEBANON', 'Lebanon', 'LBN', 422, 961),
(119, 'LS', 'LESOTHO', 'Lesotho', 'LSO', 426, 266),
(120, 'LR', 'LIBERIA', 'Liberia', 'LBR', 430, 231),
(121, 'LY', 'LIBYAN ARAB JAMAHIRIYA', 'Libyan Arab Jamahiriya', 'LBY', 434, 218),
(122, 'LI', 'LIECHTENSTEIN', 'Liechtenstein', 'LIE', 438, 423),
(123, 'LT', 'LITHUANIA', 'Lithuania', 'LTU', 440, 370),
(124, 'LU', 'LUXEMBOURG', 'Luxembourg', 'LUX', 442, 352),
(125, 'MO', 'MACAO', 'Macao', 'MAC', 446, 853),
(126, 'MK', 'MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF', 'Macedonia, the Former Yugoslav Republic of', 'MKD', 807, 389),
(127, 'MG', 'MADAGASCAR', 'Madagascar', 'MDG', 450, 261),
(128, 'MW', 'MALAWI', 'Malawi', 'MWI', 454, 265),
(129, 'MY', 'MALAYSIA', 'Malaysia', 'MYS', 458, 60),
(130, 'MV', 'MALDIVES', 'Maldives', 'MDV', 462, 960),
(131, 'ML', 'MALI', 'Mali', 'MLI', 466, 223),
(132, 'MT', 'MALTA', 'Malta', 'MLT', 470, 356),
(133, 'MH', 'MARSHALL ISLANDS', 'Marshall Islands', 'MHL', 584, 692),
(134, 'MQ', 'MARTINIQUE', 'Martinique', 'MTQ', 474, 596),
(135, 'MR', 'MAURITANIA', 'Mauritania', 'MRT', 478, 222),
(136, 'MU', 'MAURITIUS', 'Mauritius', 'MUS', 480, 230),
(137, 'YT', 'MAYOTTE', 'Mayotte', NULL, NULL, 269),
(138, 'MX', 'MEXICO', 'Mexico', 'MEX', 484, 52),
(139, 'FM', 'MICRONESIA, FEDERATED STATES OF', 'Micronesia, Federated States of', 'FSM', 583, 691),
(140, 'MD', 'MOLDOVA, REPUBLIC OF', 'Moldova, Republic of', 'MDA', 498, 373),
(141, 'MC', 'MONACO', 'Monaco', 'MCO', 492, 377),
(142, 'MN', 'MONGOLIA', 'Mongolia', 'MNG', 496, 976),
(143, 'MS', 'MONTSERRAT', 'Montserrat', 'MSR', 500, 1664),
(144, 'MA', 'MOROCCO', 'Morocco', 'MAR', 504, 212),
(145, 'MZ', 'MOZAMBIQUE', 'Mozambique', 'MOZ', 508, 258),
(146, 'MM', 'MYANMAR', 'Myanmar', 'MMR', 104, 95),
(147, 'NA', 'NAMIBIA', 'Namibia', 'NAM', 516, 264),
(148, 'NR', 'NAURU', 'Nauru', 'NRU', 520, 674),
(149, 'NP', 'NEPAL', 'Nepal', 'NPL', 524, 977),
(150, 'NL', 'NETHERLANDS', 'Netherlands', 'NLD', 528, 31),
(151, 'AN', 'NETHERLANDS ANTILLES', 'Netherlands Antilles', 'ANT', 530, 599),
(152, 'NC', 'NEW CALEDONIA', 'New Caledonia', 'NCL', 540, 687),
(153, 'NZ', 'NEW ZEALAND', 'New Zealand', 'NZL', 554, 64),
(154, 'NI', 'NICARAGUA', 'Nicaragua', 'NIC', 558, 505),
(155, 'NE', 'NIGER', 'Niger', 'NER', 562, 227),
(156, 'NG', 'NIGERIA', 'Nigeria', 'NGA', 566, 234),
(157, 'NU', 'NIUE', 'Niue', 'NIU', 570, 683),
(158, 'NF', 'NORFOLK ISLAND', 'Norfolk Island', 'NFK', 574, 672),
(159, 'MP', 'NORTHERN MARIANA ISLANDS', 'Northern Mariana Islands', 'MNP', 580, 1670),
(160, 'NO', 'NORWAY', 'Norway', 'NOR', 578, 47),
(161, 'OM', 'OMAN', 'Oman', 'OMN', 512, 968),
(162, 'PK', 'PAKISTAN', 'Pakistan', 'PAK', 586, 92),
(163, 'PW', 'PALAU', 'Palau', 'PLW', 585, 680),
(164, 'PS', 'PALESTINIAN TERRITORY, OCCUPIED', 'Palestinian Territory, Occupied', NULL, NULL, 970),
(165, 'PA', 'PANAMA', 'Panama', 'PAN', 591, 507),
(166, 'PG', 'PAPUA NEW GUINEA', 'Papua New Guinea', 'PNG', 598, 675),
(167, 'PY', 'PARAGUAY', 'Paraguay', 'PRY', 600, 595),
(168, 'PE', 'PERU', 'Peru', 'PER', 604, 51),
(169, 'PH', 'PHILIPPINES', 'Philippines', 'PHL', 608, 63),
(170, 'PN', 'PITCAIRN', 'Pitcairn', 'PCN', 612, 0),
(171, 'PL', 'POLAND', 'Poland', 'POL', 616, 48),
(172, 'PT', 'PORTUGAL', 'Portugal', 'PRT', 620, 351),
(173, 'PR', 'PUERTO RICO', 'Puerto Rico', 'PRI', 630, 1787),
(174, 'QA', 'QATAR', 'Qatar', 'QAT', 634, 974),
(175, 'RE', 'REUNION', 'Reunion', 'REU', 638, 262),
(176, 'RO', 'ROMANIA', 'Romania', 'ROU', 642, 40),
(177, 'RU', 'RUSSIAN FEDERATION', 'Russian Federation', 'RUS', 643, 70),
(178, 'RW', 'RWANDA', 'Rwanda', 'RWA', 646, 250),
(179, 'SH', 'SAINT HELENA', 'Saint Helena', 'SHN', 654, 290),
(180, 'KN', 'SAINT KITTS AND NEVIS', 'Saint Kitts and Nevis', 'KNA', 659, 1869),
(181, 'LC', 'SAINT LUCIA', 'Saint Lucia', 'LCA', 662, 1758),
(182, 'PM', 'SAINT PIERRE AND MIQUELON', 'Saint Pierre and Miquelon', 'SPM', 666, 508),
(183, 'VC', 'SAINT VINCENT AND THE GRENADINES', 'Saint Vincent and the Grenadines', 'VCT', 670, 1784),
(184, 'WS', 'SAMOA', 'Samoa', 'WSM', 882, 684),
(185, 'SM', 'SAN MARINO', 'San Marino', 'SMR', 674, 378),
(186, 'ST', 'SAO TOME AND PRINCIPE', 'Sao Tome and Principe', 'STP', 678, 239),
(187, 'SA', 'SAUDI ARABIA', 'Saudi Arabia', 'SAU', 682, 966),
(188, 'SN', 'SENEGAL', 'Senegal', 'SEN', 686, 221),
(189, 'CS', 'SERBIA AND MONTENEGRO', 'Serbia and Montenegro', NULL, NULL, 381),
(190, 'SC', 'SEYCHELLES', 'Seychelles', 'SYC', 690, 248),
(191, 'SL', 'SIERRA LEONE', 'Sierra Leone', 'SLE', 694, 232),
(192, 'SG', 'SINGAPORE', 'Singapore', 'SGP', 702, 65),
(193, 'SK', 'SLOVAKIA', 'Slovakia', 'SVK', 703, 421),
(194, 'SI', 'SLOVENIA', 'Slovenia', 'SVN', 705, 386),
(195, 'SB', 'SOLOMON ISLANDS', 'Solomon Islands', 'SLB', 90, 677),
(196, 'SO', 'SOMALIA', 'Somalia', 'SOM', 706, 252),
(197, 'ZA', 'SOUTH AFRICA', 'South Africa', 'ZAF', 710, 27),
(198, 'GS', 'SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS', 'South Georgia and the South Sandwich Islands', NULL, NULL, 0),
(199, 'ES', 'SPAIN', 'Spain', 'ESP', 724, 34),
(200, 'LK', 'SRI LANKA', 'Sri Lanka', 'LKA', 144, 94),
(201, 'SD', 'SUDAN', 'Sudan', 'SDN', 736, 249),
(202, 'SR', 'SURINAME', 'Suriname', 'SUR', 740, 597),
(203, 'SJ', 'SVALBARD AND JAN MAYEN', 'Svalbard and Jan Mayen', 'SJM', 744, 47),
(204, 'SZ', 'SWAZILAND', 'Swaziland', 'SWZ', 748, 268),
(205, 'SE', 'SWEDEN', 'Sweden', 'SWE', 752, 46),
(206, 'CH', 'SWITZERLAND', 'Switzerland', 'CHE', 756, 41),
(207, 'SY', 'SYRIAN ARAB REPUBLIC', 'Syrian Arab Republic', 'SYR', 760, 963),
(208, 'TW', 'TAIWAN, PROVINCE OF CHINA', 'Taiwan, Province of China', 'TWN', 158, 886),
(209, 'TJ', 'TAJIKISTAN', 'Tajikistan', 'TJK', 762, 992),
(210, 'TZ', 'TANZANIA, UNITED REPUBLIC OF', 'Tanzania, United Republic of', 'TZA', 834, 255),
(211, 'TH', 'THAILAND', 'Thailand', 'THA', 764, 66),
(212, 'TL', 'TIMOR-LESTE', 'Timor-Leste', NULL, NULL, 670),
(213, 'TG', 'TOGO', 'Togo', 'TGO', 768, 228),
(214, 'TK', 'TOKELAU', 'Tokelau', 'TKL', 772, 690),
(215, 'TO', 'TONGA', 'Tonga', 'TON', 776, 676),
(216, 'TT', 'TRINIDAD AND TOBAGO', 'Trinidad and Tobago', 'TTO', 780, 1868),
(217, 'TN', 'TUNISIA', 'Tunisia', 'TUN', 788, 216),
(218, 'TR', 'TURKEY', 'Turkey', 'TUR', 792, 90),
(219, 'TM', 'TURKMENISTAN', 'Turkmenistan', 'TKM', 795, 7370),
(220, 'TC', 'TURKS AND CAICOS ISLANDS', 'Turks and Caicos Islands', 'TCA', 796, 1649),
(221, 'TV', 'TUVALU', 'Tuvalu', 'TUV', 798, 688),
(222, 'UG', 'UGANDA', 'Uganda', 'UGA', 800, 256),
(223, 'UA', 'UKRAINE', 'Ukraine', 'UKR', 804, 380),
(224, 'AE', 'UNITED ARAB EMIRATES', 'United Arab Emirates', 'ARE', 784, 971),
(225, 'GB', 'UNITED KINGDOM', 'United Kingdom', 'GBR', 826, 44),
(226, 'US', 'UNITED STATES', 'United States', 'USA', 840, 1),
(227, 'UM', 'UNITED STATES MINOR OUTLYING ISLANDS', 'United States Minor Outlying Islands', NULL, NULL, 1),
(228, 'UY', 'URUGUAY', 'Uruguay', 'URY', 858, 598),
(229, 'UZ', 'UZBEKISTAN', 'Uzbekistan', 'UZB', 860, 998),
(230, 'VU', 'VANUATU', 'Vanuatu', 'VUT', 548, 678),
(231, 'VE', 'VENEZUELA', 'Venezuela', 'VEN', 862, 58),
(232, 'VN', 'VIET NAM', 'Viet Nam', 'VNM', 704, 84),
(233, 'VG', 'VIRGIN ISLANDS, BRITISH', 'Virgin Islands, British', 'VGB', 92, 1284),
(234, 'VI', 'VIRGIN ISLANDS, U.S.', 'Virgin Islands, U.s.', 'VIR', 850, 1340),
(235, 'WF', 'WALLIS AND FUTUNA', 'Wallis and Futuna', 'WLF', 876, 681),
(236, 'EH', 'WESTERN SAHARA', 'Western Sahara', 'ESH', 732, 212),
(237, 'YE', 'YEMEN', 'Yemen', 'YEM', 887, 967),
(238, 'ZM', 'ZAMBIA', 'Zambia', 'ZMB', 894, 260),
(239, 'ZW', 'ZIMBABWE', 'Zimbabwe', 'ZWE', 716, 263),
(240, 'RS', 'SERBIA', 'Serbia', 'SRB', NULL, 381),
(241, 'ME', 'MONTENEGRO', 'Montenegro', 'MNE', NULL, 382);
""")
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('companies', schema=None, naming_convention=naming_convention) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_company_country_id_country'), type_='foreignkey')
batch_op.drop_column('country_id')
op.drop_table('countries')
# ### end Alembic commands ###
| 55.070946
| 123
| 0.587081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15,208
| 0.932949
|
0290a912952dfd6fc7b4ea5458b073ea88cdb834
| 26,718
|
py
|
Python
|
pyxrd/scripts/generate_default_phases.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 27
|
2018-06-15T15:28:18.000Z
|
2022-03-10T12:23:50.000Z
|
pyxrd/scripts/generate_default_phases.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 22
|
2018-06-14T08:29:16.000Z
|
2021-07-05T13:33:44.000Z
|
pyxrd/scripts/generate_default_phases.py
|
PyXRD/pyxrd
|
26bacdf64f3153fa74b8caa62e219b76d91a55c1
|
[
"BSD-2-Clause"
] | 8
|
2019-04-13T13:03:51.000Z
|
2021-06-19T09:29:11.000Z
|
#!/usr/bin/python
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
import os
from pyxrd.data import settings
from pyxrd.project.models import Project
from pyxrd.phases.models import Component, Phase
def generate_expandables(
filename_format, phase_name, maxR,
phase_kwargs_AD, phase_kwargs_EG, phase_kwargs_350,
code_AD, code_EG, code_350,
comp_kwargs_AD, comp_kwargs_EG, comp_kwargs_350):
"""
Generates a list of phase descriptions for a combination of an
AD, EG and 350° Ca-saturated phase linked together
"""
return [
('%s' + (filename_format % R), [
(dict(R=R, name=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_AD), code_AD, comp_kwargs_AD),
(dict(R=R, name=phase_name + (' R%d Ca-EG' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_EG), code_EG, comp_kwargs_EG),
(dict(R=R, name=phase_name + (' R%d Ca-350' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_350), code_350, comp_kwargs_350)
]) for R in range(maxR)
]
def run(args=None, ui_callback=None):
"""
How this script works:
- 'code_length' is the length of the aliases keys (see below)
- 'aliases' is a dictionary contain 4-character long keys describing a
specific layer-type (or with other words: a Component object)
E.g. dS2w stands for Di-octahedral Smectite with 2 layers of water.
The values are file path formats, in which a single '%s' string placeholder
will be filled with the absolute path to the default components folder.
- 'default_phases' is an initially empty list that will be filled with two-
tuples. The first element in this tuple is the filename of the generated
phases, the second element is describing what this phase contains. This
second element is again a tuple, containing three parts:
- A dictionary of key-word arguments passed on to the Phase
constructor. If a 'based_on' keyword is defined, an attempt is
made to translate it to an earlier generated phase. This way, it
is possible to pass the name of an earlier generated phase, and
the script will pass in the actual Phase object instead.
- A component code (string) built by the keys of the 'aliases'
dictionary. This string's length should be a multiple of 'code_length'.
There is no separator, rather, the 'code_length' is used to split the
code into its parts.
- Component keyword arguments dictionaries: this is a dictionary in
which the keys match with the components code parts. The values are
property-value dictionaries used to set Component properties after
importing them. Similarly to the Phases' 'based_on' keyword, the
value for the 'linked_with' key is translated to the actual
Component named as such.
### Setup:
"""
code_length = 4
aliases = {
'C ': '%sChlorite.cmp',
'K ': '%sKaolinite.cmp',
'I ': '%sIllite.cmp',
'Se ': '%sSerpentine.cmp',
'T ': '%sTalc.cmp',
'Ma ': '%sMargarite.cmp',
'Pa ': '%sParagonite.cmp',
'L ': '%sLeucophyllite.cmp',
'dS2w': '%sDi-Smectite/Di-Smectite - Ca 2WAT.cmp',
'dS1w': '%sDi-Smectite/Di-Smectite - Ca 1WAT.cmp',
'dS0w': '%sDi-Smectite/Di-Smectite - Ca Dehydr.cmp',
'dS2g': '%sDi-Smectite/Di-Smectite - Ca 2GLY.cmp',
'dS1g': '%sDi-Smectite/Di-Smectite - Ca 1GLY.cmp',
'dSht': '%sDi-Smectite/Di-Smectite - Ca Heated.cmp',
'tS2w': '%sTri-Smectite/Tri-Smectite - Ca 2WAT.cmp',
'tS1w': '%sTri-Smectite/Tri-Smectite - Ca 1WAT.cmp',
'tS0w': '%sTri-Smectite/Tri-Smectite - Ca Dehydr.cmp',
'tS2g': '%sTri-Smectite/Tri-Smectite - Ca 2GLY.cmp',
'tS1g': '%sTri-Smectite/Tri-Smectite - Ca 1GLY.cmp',
'tSht': '%sTri-Smectite/Tri-Smectite - Ca Heated.cmp',
'dV2w': '%sDi-Vermiculite/Di-Vermiculite - Ca 2WAT.cmp',
'dV1w': '%sDi-Vermiculite/Di-Vermiculite - Ca 1WAT.cmp',
'dV0w': '%sDi-Vermiculite/Di-Vermiculite - Ca Dehydr.cmp',
'dV2g': '%sDi-Vermiculite/Di-Vermiculite - Ca 2GLY.cmp',
'dV1g': '%sDi-Vermiculite/Di-Vermiculite - Ca 1GLY.cmp',
'dVht': '%sDi-Vermiculite/Di-Vermiculite - Ca Heated.cmp',
}
default_phases = []
"""
### Commonly used inherit flag dicts:
"""
inherit_S = dict(
inherit_ucp_a=True,
inherit_ucp_b=True,
inherit_delta_c=True,
inherit_layer_atoms=True,
)
inherit_all = dict(
inherit_d001=True,
inherit_default_c=True,
inherit_interlayer_atoms=True,
inherit_atom_relations=True,
**inherit_S
)
inherit_phase = dict(
inherit_display_color=True,
inherit_sigma_star=True,
inherit_CSDS_distribution=True,
inherit_probabilities=True
)
"""
### Single-layer phases:
"""
default_phases += [
('%sKaolinite.phs', [(dict(R=0, name='Kaolinite'), 'K ', {}), ]),
('%sIllite.phs', [(dict(R=0, name='Illite'), 'I ', {})]),
('%sSerpentine.phs', [(dict(R=0, name='Serpentine'), 'Se ', {})]),
('%sTalc.phs', [(dict(R=0, name='Talc'), 'T ', {})]),
('%sChlorite.phs', [(dict(R=0, name='Chlorite'), 'C ', {})]),
('%sMargarite.phs', [(dict(R=0, name='Margarite'), 'Ma ', {})]),
('%sLeucophyllite.phs', [(dict(R=0, name='Leucophyllite'), 'L ', {})]),
('%sParagonite.phs', [(dict(R=0, name='Paragonite'), 'Pa ', {})]),
]
"""
### Dioctahedral smectites:
"""
S_code_AD = 'dS2w'
S_code_EG = 'dS2g'
S_code_350 = 'dSht'
S_inh_comp_args = {
'dS2g': dict(linked_with='dS2w', **inherit_S),
'dSht': dict(linked_with='dS2w', **inherit_S),
}
SS_code_AD = S_code_AD + 'dS1w'
SS_code_EG = S_code_EG + 'dS1g'
SS_code_350 = S_code_350 + 'dS1g'
SS_inh_comp_args = dict(S_inh_comp_args)
SS_inh_comp_args.update({
'dS1g': dict(linked_with='dS1w', **inherit_S),
})
SSS_code_AD = SS_code_AD + 'dS0w'
SSS_code_EG = SS_code_EG + 'dS0w'
SSS_code_350 = SS_code_350 + 'dS0w'
SSS_inh_comp_args = dict(SS_inh_comp_args)
SSS_inh_comp_args.update({
'dS0w': dict(linked_with='dS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Di-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), S_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), S_code_EG, S_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), S_code_350, S_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Di-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
SS_code_AD, SS_code_EG, SS_code_350,
{}, SS_inh_comp_args, SS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Di-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
SSS_code_AD, SSS_code_EG, SSS_code_350,
{}, SSS_inh_comp_args, SSS_inh_comp_args,
)
"""
### Trioctahedral smectites:
"""
tS_code_AD = 'tS2w'
tS_code_EG = 'tS2g'
tS_code_350 = 'tSht'
tS_inh_comp_args = {
'tS2g': dict(linked_with='tS2w', **inherit_S),
'tSht': dict(linked_with='tS2w', **inherit_S),
}
tSS_code_AD = tS_code_AD + 'tS1w'
tSS_code_EG = tS_code_EG + 'tS1g'
tSS_code_350 = tS_code_350 + 'tS1g'
tSS_inh_comp_args = dict(S_inh_comp_args)
tSS_inh_comp_args.update({
'tS1g': dict(linked_with='tS1w', **inherit_S),
})
tSSS_code_AD = tSS_code_AD + 'tS0w'
tSSS_code_EG = tSS_code_EG + 'tS0w'
tSSS_code_350 = tSS_code_350 + 'tS0w'
tSSS_inh_comp_args = dict(SS_inh_comp_args)
tSSS_inh_comp_args.update({
'tS0w': dict(linked_with='tS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Tri-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), tS_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), tS_code_EG, tS_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), tS_code_350, tS_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Tri-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
tSS_code_AD, tSS_code_EG, tSS_code_350,
{}, tSS_inh_comp_args, tSS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Tri-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
tSSS_code_AD, tSSS_code_EG, tSSS_code_350,
{}, tSSS_inh_comp_args, tSSS_inh_comp_args,
)
"""
### Dioctahedral vermiculites:
"""
V_code_AD = 'dV2w'
V_code_EG = 'dV2g'
V_code_350 = 'dVht'
V_inh_comp_args = {
'dV2g': dict(linked_with='dV2w', **inherit_S),
'dVht': dict(linked_with='dV2w', **inherit_S),
}
VV_code_AD = V_code_AD + 'dV1w'
VV_code_EG = V_code_EG + 'dV1g'
VV_code_350 = V_code_350 + 'dV1g'
VV_inh_comp_args = dict(V_inh_comp_args)
VV_inh_comp_args.update({
'dV1g': dict(linked_with='dV1w', **inherit_S),
})
VVV_code_AD = VV_code_AD + 'dV0w'
VVV_code_EG = VV_code_EG + 'dV0w'
VVV_code_350 = VV_code_350 + 'dV0w'
VVV_inh_comp_args = dict(VV_inh_comp_args)
VVV_inh_comp_args.update({
'dV0w': dict(linked_with='dV0w', **inherit_S),
})
default_phases += [
('%sVermiculites/Di-Vermiculite Ca.phs', [
(dict(R=0, name='V R0 Ca-AD'), V_code_AD, {}),
(dict(R=0, name='V R0 Ca-EG', based_on='V R0 Ca-AD', **inherit_phase), V_code_EG, V_inh_comp_args),
(dict(R=0, name='V R0 Ca-350', based_on='V R0 Ca-AD', **inherit_phase), V_code_350, V_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Vermiculites/VV/Di-VV R%d Ca.phs', 'VV', 4,
{}, inherit_phase, inherit_phase,
VV_code_AD, VV_code_EG, VV_code_350,
{}, VV_inh_comp_args, VV_inh_comp_args,
)
default_phases += generate_expandables(
'Vermiculites/VVV/Di-VVV R%d Ca.phs', 'VVV', 3,
{}, inherit_phase, inherit_phase,
VVV_code_AD, VVV_code_EG, VVV_code_350,
{}, VVV_inh_comp_args, VVV_inh_comp_args,
)
"""
### Kaolinite - Smectites:
"""
K_code = 'K '
K_inh_comp_args = {
'K ': dict(linked_with='K ', **inherit_all),
}
KS_code_AD = K_code + S_code_AD
KS_code_EG = K_code + S_code_EG
KS_code_350 = K_code + S_code_350
KS_inh_comp_args = dict(S_inh_comp_args)
KS_inh_comp_args.update(K_inh_comp_args)
KSS_code_AD = K_code + SS_code_AD
KSS_code_EG = K_code + SS_code_EG
KSS_code_350 = K_code + SS_code_350
KSS_inh_comp_args = dict(SS_inh_comp_args)
KSS_inh_comp_args.update(K_inh_comp_args)
KSSS_code_AD = K_code + SSS_code_AD
KSSS_code_EG = K_code + SSS_code_EG
KSSS_code_350 = K_code + SSS_code_350
KSSS_inh_comp_args = dict(SSS_inh_comp_args)
KSSS_inh_comp_args.update(K_inh_comp_args)
default_phases += generate_expandables(
'Kaolinite-Smectites/KS/KS R%d Ca.phs', 'KS', 4,
{}, inherit_phase, inherit_phase,
KS_code_AD, KS_code_EG, KS_code_350,
{}, KS_inh_comp_args, KS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSS/KSS R%d Ca.phs', 'KSS', 3,
{}, inherit_phase, inherit_phase,
KSS_code_AD, KSS_code_EG, KSS_code_350,
{}, KSS_inh_comp_args, KSS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSSS/KSSS R%d Ca.phs', 'KSSS', 2,
{}, inherit_phase, inherit_phase,
KSSS_code_AD, KSSS_code_EG, KSSS_code_350,
{}, KSSS_inh_comp_args, KSSS_inh_comp_args,
)
"""
### Illite - Smectites:
"""
I_code = 'I '
I_inh_comp_args = {
'I ': dict(linked_with='I ', **inherit_all),
}
IS_code_AD = I_code + S_code_AD
IS_code_EG = I_code + S_code_EG
IS_code_350 = I_code + S_code_350
IS_inh_comp_args = dict(S_inh_comp_args)
IS_inh_comp_args.update(I_inh_comp_args)
ISS_code_AD = I_code + SS_code_AD
ISS_code_EG = I_code + SS_code_EG
ISS_code_350 = I_code + SS_code_350
ISS_inh_comp_args = dict(SS_inh_comp_args)
ISS_inh_comp_args.update(I_inh_comp_args)
ISSS_code_AD = I_code + SSS_code_AD
ISSS_code_EG = I_code + SSS_code_EG
ISSS_code_350 = I_code + SSS_code_350
ISSS_inh_comp_args = dict(SSS_inh_comp_args)
ISSS_inh_comp_args.update(I_inh_comp_args)
default_phases += generate_expandables(
'Illite-Smectites/IS/IS R%d Ca.phs', 'IS', 4,
{}, inherit_phase, inherit_phase,
IS_code_AD, IS_code_EG, IS_code_350,
{}, IS_inh_comp_args, IS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISS/ISS R%d Ca.phs', 'ISS', 3,
{}, inherit_phase, inherit_phase,
ISS_code_AD, ISS_code_EG, ISS_code_350,
{}, ISS_inh_comp_args, ISS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISSS/ISSS R%d Ca.phs', 'ISSS', 2,
{}, inherit_phase, inherit_phase,
ISSS_code_AD, ISSS_code_EG, ISSS_code_350,
{}, ISSS_inh_comp_args, ISSS_inh_comp_args,
)
"""
### Chlorite - Smectites:
"""
C_code = 'C '
C_inh_comp_args = {
'C ': dict(linked_with='C ', **inherit_all),
}
CS_code_AD = C_code + tS_code_AD
CS_code_EG = C_code + tS_code_EG
CS_code_350 = C_code + tS_code_350
CS_inh_comp_args = dict(tS_inh_comp_args)
CS_inh_comp_args.update(C_inh_comp_args)
CSS_code_AD = C_code + tSS_code_AD
CSS_code_EG = C_code + tSS_code_EG
CSS_code_350 = C_code + tSS_code_350
CSS_inh_comp_args = dict(tSS_inh_comp_args)
CSS_inh_comp_args.update(C_inh_comp_args)
CSSS_code_AD = C_code + tSSS_code_AD
CSSS_code_EG = C_code + tSSS_code_EG
CSSS_code_350 = C_code + tSSS_code_350
CSSS_inh_comp_args = dict(tSSS_inh_comp_args)
CSSS_inh_comp_args.update(C_inh_comp_args)
default_phases += generate_expandables(
'Chlorite-Smectites/CS/CS R%d Ca.phs', 'CS', 4,
{}, inherit_phase, inherit_phase,
CS_code_AD, CS_code_EG, CS_code_350,
{}, CS_inh_comp_args, CS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSS/CSS R%d Ca.phs', 'CSS', 3,
{}, inherit_phase, inherit_phase,
CSS_code_AD, CSS_code_EG, CSS_code_350,
{}, CSS_inh_comp_args, CSS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSSS/CSSS R%d Ca.phs', 'CSSS', 2,
{}, inherit_phase, inherit_phase,
CSSS_code_AD, CSSS_code_EG, CSSS_code_350,
{}, CSSS_inh_comp_args, CSSS_inh_comp_args,
)
"""
### Talc - Smectites:
"""
T_code = 'T '
T_inh_comp_args = {
'T ': dict(linked_with='T ', **inherit_all),
}
TS_code_AD = T_code + S_code_AD
TS_code_EG = T_code + S_code_EG
TS_code_350 = T_code + S_code_350
TS_inh_comp_args = dict(S_inh_comp_args)
TS_inh_comp_args.update(T_inh_comp_args)
TSS_code_AD = T_code + SS_code_AD
TSS_code_EG = T_code + SS_code_EG
TSS_code_350 = T_code + SS_code_350
TSS_inh_comp_args = dict(SS_inh_comp_args)
TSS_inh_comp_args.update(T_inh_comp_args)
TSSS_code_AD = T_code + SSS_code_AD
TSSS_code_EG = T_code + SSS_code_EG
TSSS_code_350 = T_code + SSS_code_350
TSSS_inh_comp_args = dict(SSS_inh_comp_args)
TSSS_inh_comp_args.update(T_inh_comp_args)
default_phases += generate_expandables(
'Talc-Smectites/TS/TS R%d Ca.phs', 'TS', 4,
{}, inherit_phase, inherit_phase,
TS_code_AD, TS_code_EG, TS_code_350,
{}, TS_inh_comp_args, TS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSS/TSS R%d Ca.phs', 'TSS', 3,
{}, inherit_phase, inherit_phase,
TSS_code_AD, TSS_code_EG, TSS_code_350,
{}, TSS_inh_comp_args, TSS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSSS/TSSS R%d Ca.phs', 'TSSS', 2,
{}, inherit_phase, inherit_phase,
TSSS_code_AD, TSSS_code_EG, TSSS_code_350,
{}, TSSS_inh_comp_args, TSSS_inh_comp_args,
)
"""
### Illite - Chlorite - Smectites:
"""
IC_code = I_code + C_code
IC_inh_comp_args = dict(I_inh_comp_args)
IC_inh_comp_args.update(C_inh_comp_args)
ICS_code_AD = IC_code + S_code_AD
ICS_code_EG = IC_code + S_code_EG
ICS_inh_comp_args = dict(S_inh_comp_args)
ICS_inh_comp_args.update(IC_inh_comp_args)
ICSS_code_AD = IC_code + SS_code_AD
ICSS_code_EG = IC_code + SS_code_EG
ICSS_inh_comp_args = dict(SS_inh_comp_args)
ICSS_inh_comp_args.update(IC_inh_comp_args)
ICSSS_code_AD = IC_code + SSS_code_AD
ICSSS_code_EG = IC_code + SSS_code_EG
ICSSS_inh_comp_args = dict(SSS_inh_comp_args)
ICSSS_inh_comp_args.update(IC_inh_comp_args)
default_phases += [
('%sIllite-Chlorite-Smectites/ICS/ICS R0 Ca.phs', [
(dict(R=0, name='ICS R0 Ca-AD'), ICS_code_AD, {}),
(dict(R=0, name='ICS R0 Ca-EG', based_on='ICS R0 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R1 Ca.phs', [
(dict(R=1, name='ICS R1 Ca-AD'), ICS_code_AD, {}),
(dict(R=1, name='ICS R1 Ca-EG', based_on='ICS R1 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R2 Ca.phs', [
(dict(R=2, name='ICS R2 Ca-AD'), ICS_code_AD, {}),
(dict(R=2, name='ICS R2 Ca-EG', based_on='ICS R2 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R0 Ca.phs', [
(dict(R=0, name='ICSS R0 Ca-AD'), ICSS_code_AD, {}),
(dict(R=0, name='ICSS R0 Ca-EG', based_on='ICSS R0 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R1 Ca.phs', [
(dict(R=1, name='ICSS R1 Ca-AD'), ICSS_code_AD, {}),
(dict(R=1, name='ICSS R1 Ca-EG', based_on='ICSS R1 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSSS/ICSSS R0 Ca.phs', [
(dict(R=0, name='ICSSS R0 Ca-AD'), ICSSS_code_AD, {}),
(dict(R=0, name='ICSSS R0 Ca-EG', based_on='ICSSS R0 Ca-AD', **inherit_phase), ICSSS_code_EG, ICSSS_inh_comp_args)
]),
]
"""
### Kaolinite - Chlorite - Smectites:
"""
KC_code = K_code + C_code
KC_inh_comp_args = dict(K_inh_comp_args)
KC_inh_comp_args.update(C_inh_comp_args)
KCS_code_AD = KC_code + S_code_AD
KCS_code_EG = KC_code + S_code_EG
KCS_inh_comp_args = dict(S_inh_comp_args)
KCS_inh_comp_args.update(KC_inh_comp_args)
KCSS_code_AD = KC_code + SS_code_AD
KCSS_code_EG = KC_code + SS_code_EG
KCSS_inh_comp_args = dict(SS_inh_comp_args)
KCSS_inh_comp_args.update(KC_inh_comp_args)
KCSSS_code_AD = KC_code + SSS_code_AD
KCSSS_code_EG = KC_code + SSS_code_EG
KCSSS_inh_comp_args = dict(SSS_inh_comp_args)
KCSSS_inh_comp_args.update(KC_inh_comp_args)
default_phases += [
('%sKaolinite-Chlorite-Smectites/KCS/KCS R0 Ca.phs', [
(dict(R=0, name='KCS R0 Ca-AD'), KCS_code_AD, {}),
(dict(R=0, name='KCS R0 Ca-EG', based_on='KCS R0 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R1 Ca.phs', [
(dict(R=1, name='KCS R1 Ca-AD'), KCS_code_AD, {}),
(dict(R=1, name='KCS R1 Ca-EG', based_on='KCS R1 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R2 Ca.phs', [
(dict(R=2, name='KCS R2 Ca-AD'), KCS_code_AD, {}),
(dict(R=2, name='KCS R2 Ca-EG', based_on='KCS R2 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R0 Ca.phs', [
(dict(R=0, name='KCSS R0 Ca-AD'), KCSS_code_AD, {}),
(dict(R=0, name='KCSS R0 Ca-EG', based_on='KCSS R0 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R1 Ca.phs', [
(dict(R=1, name='KCSS R1 Ca-AD'), KCSS_code_AD, {}),
(dict(R=1, name='KCSS R1 Ca-EG', based_on='KCSS R1 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSSS/KCSSS R0 Ca.phs', [
(dict(R=0, name='KCSSS R0 Ca-AD'), KCSSS_code_AD, {}),
(dict(R=0, name='KCSSS R0 Ca-EG', based_on='KCSSS R0 Ca-AD', **inherit_phase), KCSSS_code_EG, KCSSS_inh_comp_args)
]),
]
"""
### Actual object generation routine:
"""
import queue
import threading
def ioworker(in_queue, stop):
"""
Saves Phase objects from the in_queue.
If the Queue is empty this function will only stop
if the 'stop' event is set.
"""
while True:
try:
phases_path, phases = in_queue.get(timeout=0.5)
create_dir_recursive(phases_path)
Phase.save_phases(phases, phases_path)
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
save_queue = queue.Queue()
io_stop = threading.Event()
iothread = threading.Thread(target=ioworker, args=(save_queue, io_stop))
iothread.start()
def phaseworker(in_queue, save_queue, stop):
"""
Parses Phase descriptions into actual objects and passes them
to the save_queue.
'stop' should be a threading.Event() that should be toggled
once all elements have been Queued.
This way, the worker will only stop once the Queue is really empty,
and not when it's processing faster than the Queue can be filled.
"""
while True:
try:
phases_path, phase_descr = in_queue.get(timeout=0.5)
project = Project()
phase_lookup = {}
component_lookup = {}
for phase_kwargs, code, comp_props in phase_descr:
# create phase:
G = len(code) / code_length
based_on = None
if "based_on" in phase_kwargs:
based_on = phase_lookup.get(phase_kwargs.pop("based_on"), None)
phase = Phase(G=G, parent=project, **phase_kwargs)
phase.based_on = based_on
phase_lookup[phase.name] = phase
# derive upper and lower limits for the codes using code lengths:
limits = list(zip(
list(range(0, len(code), code_length)),
list(range(code_length, len(code) + 1, code_length))
))
# create components:
phase.components[:] = []
for ll, ul in limits:
part = code[ll: ul]
for component in Component.load_components(aliases[part] % (settings.DATA_REG.get_directory_path("DEFAULT_COMPONENTS") + "/"), parent=phase):
component.resolve_json_references()
phase.components.append(component)
props = comp_props.get(part, {})
for prop, value in props.items():
if prop == 'linked_with':
value = component_lookup[value]
setattr(component, prop, value)
component_lookup[part] = component
# put phases on the save queue:
phases_path = phases_path % (settings.DATA_REG.get_directory_path("DEFAULT_PHASES") + "/")
save_queue.put((phases_path, list(phase_lookup.values())))
# Flag this as finished
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
phase_queue = queue.Queue()
phase_stop = threading.Event()
phasethread = threading.Thread(target=phaseworker, args=(phase_queue, save_queue, phase_stop))
phasethread.start()
# Queue phases:
for phases_path, phase_descr in default_phases:
phase_queue.put((phases_path, phase_descr))
# Signal phaseworker it can stop if the phase_queue is emptied:
phase_stop.set()
while phasethread.is_alive():
# Try to join the thread, but don't block, inform the UI
# of our progress if a callback is provided:
phasethread.join(timeout=0.1)
if callable(ui_callback):
progress = float(len(default_phases) - phase_queue.qsize()) / float(len(default_phases))
ui_callback(progress)
if callable(ui_callback):
ui_callback(1.0)
# Signal the IO worker the phaseworker has stopped, so it can stop
# if the save_queue is empty
io_stop.set()
while iothread.is_alive():
# Try to join the thread, but don't block
iothread.join(timeout=0.1)
pass # end of run
def create_dir_recursive(path):
"""
Creates the path 'path' recursively.
"""
to_create = []
while not os.path.exists(path):
to_create.insert(0, path)
path = os.path.dirname(path)
for path in to_create[:-1]:
os.mkdir(path)
| 38.777939
| 165
| 0.606183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,969
| 0.298252
|
0291301e96c8737d3d86596ed9a5bcb7c2fdd30e
| 1,322
|
py
|
Python
|
data/kaggle_python/interview_prac/FizzBuzz.py
|
MohanKrishna-RC/Python-Necessities
|
c63fbac717a9bf7edd48ec20337c16de55f5b535
|
[
"FTL"
] | null | null | null |
data/kaggle_python/interview_prac/FizzBuzz.py
|
MohanKrishna-RC/Python-Necessities
|
c63fbac717a9bf7edd48ec20337c16de55f5b535
|
[
"FTL"
] | 8
|
2019-11-27T12:05:09.000Z
|
2019-11-27T12:05:18.000Z
|
data/kaggle_python/interview_prac/FizzBuzz.py
|
MohanKrishna-RC/Python-Necessities
|
c63fbac717a9bf7edd48ec20337c16de55f5b535
|
[
"FTL"
] | null | null | null |
import sys
"""
Example :
Input:
2 (Test cases)
3 (Size of array)
0 1 1 (input)
3
0 1 2
"""
# To store no of test cases here (2).
# To store input here (0 1 1) and (0 1 2).
t = int(sys.stdin.readline())
# print(t)
l = []
while t:
#To store the size of array here (3).
n = int(sys.stdin.readline())
#Here i have used sys.stdin.readline() to take input 0 1 1 than split to get a= ['0','1','1'].
a = (sys.stdin.readline().split()) #Now converting a= ['0','1','1'] to l = [0,1,1]
print(a)
for i in range(0, n):
b = int(a[i])
l.append(b)
#Do your job with the list l here just print !
print(l)
l = [] # empty list for next input ie (0 1 2).
t = t-1
# our problem
"""
Input :
2 ( Test Cases)
3 15 (input string)
"""
# To store no of test cases here (2).
# t=int(sys.stdin.readline())
# # To store input here 3 15.
# # print(t)
# #Here i have used sys.stdin.readline() to take input 3 15 than split to get a= ['3', '15'].
# a = (sys.stdin.readline().split())
# # print(a)
# for k in range(t):
# for i in range(1,int(a[k])+1):
# if i % 3 == 0 and i % 5 == 0:
# print("FizzBuzz")
# elif i%3==0:
# print("Fizz")
# elif i%5 == 0:
# print("Buzz")
# else:
# print(i)
| 22.793103
| 98
| 0.519667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,026
| 0.776097
|
029137d82ad6128135f8644310a7387974e99f16
| 3,660
|
py
|
Python
|
wikipediabase/persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 1
|
2017-11-26T17:57:59.000Z
|
2017-11-26T17:57:59.000Z
|
wikipediabase/persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 34
|
2015-03-23T10:28:59.000Z
|
2021-12-13T20:16:48.000Z
|
wikipediabase/persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 2
|
2015-05-17T00:56:45.000Z
|
2015-06-27T22:10:59.000Z
|
"""
Some persistent maps (gdbm) require special encoding of keys
and/or values. This is an abstraction for these kinds of quirks.
"""
from itertools import imap
import collections
import gdbm as dbm
import json
from sqlitedict import SqliteDict
import os
class EncodedDict(collections.MutableMapping):
"""
Subclass this and provide any of the following (see
implementatiokn for signatures)
- db
- _init()
- _encode_key
- _decode_key.
"""
def __init__(self, wrapped=None):
self.db = wrapped if wrapped is not None else dict()
def _encode_key(self, key):
"""
Override to encode keys coming in.
"""
return key
def _decode_key(self, key):
"""
Override to encode keys going out.
"""
return key
def __del__(self):
del self.db
def __setitem__(self, key, val):
self.db[self._encode_key(key)] = val
def __getitem__(self, key):
return self.db[self._encode_key(key)]
def __contains__(self, key):
return self._encode_key(key) in self.keys()
def __delitem__(self, key):
del self.db[self._encode_key(key)]
def __iter__(self):
return imap(self._decode_key, self.db.keys())
def __len__(self):
return len(self.db)
def keys(self):
return list(self)
def values(self):
return self.db.values()
def items(self):
return [(self._decode_key(key), v) for key,v in self.db.iteritems()]
def to_json(self, filename):
json.dump([(k,v) for k,v in self.db.iteritems()],
open(filename, 'w'))
def from_json(self, filename):
for k,v in json.load(open(filename)):
self.db[k] = v
class DbmPersistentDict(EncodedDict):
"""
Persistent dict using dbm. Will open or create filename.
"""
def __init__(self, filename):
flag = 'w' if os.path.exists(filename) else 'n'
super(DbmPersistentDict, self).__init__(dbm.open(filename, flag))
def _encode_key(self, key):
# Asciify
if isinstance(key, unicode):
return key.encode('unicode_escape')
return str(key)
def _decode_key(self, key):
# Unicodify
return key.decode('unicode_escape')
class SqlitePersistentDict(EncodedDict):
def __init__(self, filename):
if not filename.endswith('.sqlite'):
filename += '.sqlite'
db = SqliteDict(filename)
super(SqlitePersistentDict, self).__init__(db)
def __del__(self):
self.db.close()
super(SqlitePersistentDict, self).__del__()
"""
Some info on performance:
>>> import timeit
>>> sqlkv = SqlitePersistentDict('/tmp/bench1.sqlite')
>>> timeit.timeit(lambda : benchmark_write(sqlkv), number=100)
10.847157955169678
>>> timeit.timeit(lambda : benchmark_read(sqlkv), number=100)
18.88098978996277
>>> dbmkv = DbmPersistentDict('/tmp/bench.dbm')
>>> timeit.timeit(lambda : benchmark_write(dbmkv), number=100)
0.18030309677124023
>>> timeit.timeit(lambda : benchmark_read(dbmkv), number=100)
0.14914202690124512
SqliteDict is a pretty thin wrapper around sqlite, I would probably
not have made it much thinner. Just use Dbm.
Keep this around in case anyone considers changing to sqlite.
XXX: see how gdbm does when data is larger than memory. Also check out
bsddb
"""
# PersistentDict = SqlitePersistentDict
PersistentDict = DbmPersistentDict
def benchmark_write(dic, times=100000):
for i in xrange(times):
dic['o' + str(i)] = str(i) * 1000
def benchmark_read(dic, times=100000):
for i in xrange(times):
dic['o' + str(i)]
| 24.72973
| 76
| 0.650546
| 2,374
| 0.648634
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.36776
|
0291c411c4fb519a999596fb62a5c1bf748ff844
| 1,672
|
py
|
Python
|
SlidingWindows/Leetcode132.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
SlidingWindows/Leetcode132.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
SlidingWindows/Leetcode132.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def minCut(self, s: str) -> int:
dp=self.isPal(s)
return self.bfs(s,dp)
def isPal(self,s):
dp=[[False for i in s] for i in s]
for i in range(len(s)):
dp[i][i]=True
if i+1<len(s):
dp[i][i+1]=True if s[i]==s[i+1] else False
for i in range(len(s)):
for j in range(1,min(i+1,len(s)-i)):
# if j==1:
# dp[i][i+j]=True if s[i]==s[i+j] else False
# dp[i-j][i] = True if s[i] == s[i - j] else False
# else:
if i-j>-1 and i+j<len(s):
dp[i-j][i+j]= True if s[i-j]==s[i+j] and dp[i-j+1][i+j-1] else False
if i-j>-1 and i+j+1<len(s):
dp[i-j][i+j+1]=True if s[i-j]==s[i+1+j] and dp[i-j+1][i+j] else False
return dp
def bfs(self,s,dp):
q=list()
depth=1
for i in range(len(dp[0])):
if dp[0][i]:
if i==len(s)-1:
return 0
q.append(i+1)
while q:
size=len(q)
for i in range(size):
c=q[0]
q.pop(0)
for index in range(len(dp[c])):
if dp[c][index]:
if index==len(s)-1:
return depth
q.append(index+1)
depth+=1
return depth
if __name__ == '__main__':
sol=Solution()
# s='aammbbc'
# s='bb'
s="fifgbeajcacehiicccfecbfhhgfiiecdcjjffbghdidbhbdbfbfjccgbbdcjheccfbhafehieabbdfeigbiaggchaeghaijfbjhi"
print(sol.minCut(s))
| 30.962963
| 108
| 0.423445
| 1,459
| 0.872608
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.150718
|
0292b6686b64612233e83af31cfc31f88384ed05
| 3,708
|
py
|
Python
|
widgets/component.py
|
peskaf/ramAIn
|
8eb1418007c925ac618e3bddd7de2c0520f5977a
|
[
"MIT"
] | null | null | null |
widgets/component.py
|
peskaf/ramAIn
|
8eb1418007c925ac618e3bddd7de2c0520f5977a
|
[
"MIT"
] | null | null | null |
widgets/component.py
|
peskaf/ramAIn
|
8eb1418007c925ac618e3bddd7de2c0520f5977a
|
[
"MIT"
] | null | null | null |
from PySide6.QtGui import QColor
from PySide6.QtWidgets import QFrame, QHBoxLayout, QWidget
from PySide6.QtCore import Qt, QSettings, QEvent
from utils import colors
import pyqtgraph as pg
import numpy as np
class ScrollablePlotWidget(pg.PlotWidget):
"""
Subclass of `pg.PlotWidget` that overrides `wheelEvent` and `mouse(Press/Release)Event`
so that user scrolls the parent widget when scrolling on the plot.
Widget performs no action on mouse press/release.
"""
def __init__(self, parent: QWidget = None) -> None:
"""
The constructor for scrollable plot widget.
Parameters:
parent (QWidget): Parent widget of this widget. Default: None.
"""
super().__init__()
self.parent = parent
def wheelEvent(self, event: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `wheelEvent` so that parent widget is scrolled.
Parameters:
event (QEvent): Scrolling event.
"""
self.parent.wheelEvent(event)
def mousePressEvent(self, QMouseEvent: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `mousePressEvent` so that it does nothing.
Parameters:
event (QEvent): Mouse press event.
"""
pass
def mouseReleaseEvent(self, QMouseEvent: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `mouseReleaseEvent` so that it does nothing.
Parameters:
event (QEvent): Mouse release event.
"""
pass
class Component(QFrame):
"""
A widget representing one Raman component. It displays a spectral map and a single spectral plot.
"""
def __init__(self, x: np.ndarray, y: np.ndarray, map: np.ndarray, parent: QWidget = None) -> None:
super().__init__(parent)
self.settings = QSettings()
# limit size of one component
self.setMinimumHeight(175)
self.setMaximumHeight(400)
self.x_data = x
self.y_data = y
self.map_data = map
# NOTE: scrolling over spectral map does nothing at all as wheelEvent works
# different for `pg.ImageView`
self.component_map = pg.ImageView(parent)
# hide controll buttons
self.component_map.ui.histogram.hide()
self.component_map.ui.roiBtn.hide()
self.component_map.ui.menuBtn.hide()
# set colors
bg_color = (240,240,240)
color_map = colors.COLORMAPS[str(self.settings.value("spectral_map/cmap"))]
cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, len(color_map)), color=color_map)
# component map properties
self.component_map.setColorMap(cmap)
self.component_map.setImage(self.map_data, autoRange=False)
self.component_map.getView().setMouseEnabled(False, False)
self.component_map.getView().setDefaultPadding(0)
self.component_map.getView().setAspectLocked(True, ratio=None)
self.component_map.getView().setBackgroundColor(QColor(240,240,240))
self.component_map.setMinimumWidth(175)
self.component_map.setMaximumWidth(250)
# spectral plot is the scrollable one
self.component_plot = ScrollablePlotWidget(parent)
self.component_plot.setBackground(bg_color)
plot_pen = pg.mkPen(color="#266867", width=1.5)
self.line = self.component_plot.plot(self.x_data, self.y_data, pen=plot_pen)
# make final layout
layout = QHBoxLayout()
layout.setAlignment(Qt.AlignHCenter)
layout.addWidget(self.component_map)
layout.addWidget(self.component_plot)
self.setLayout(layout)
| 32.243478
| 102
| 0.650755
| 3,484
| 0.93959
| 0
| 0
| 0
| 0
| 0
| 0
| 1,338
| 0.360841
|
029490250183cbdb90fa4664ca45b602bbeae6f3
| 9,124
|
py
|
Python
|
Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py
|
monacotime/4.IoT-project-sem-5
|
ef14dfba33d308cb5307bbb07d2950fd9a34cfda
|
[
"MIT"
] | null | null | null |
Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py
|
monacotime/4.IoT-project-sem-5
|
ef14dfba33d308cb5307bbb07d2950fd9a34cfda
|
[
"MIT"
] | null | null | null |
Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py
|
monacotime/4.IoT-project-sem-5
|
ef14dfba33d308cb5307bbb07d2950fd9a34cfda
|
[
"MIT"
] | null | null | null |
### IT WORKS BOIISSS WE DID IT!!!!###
#-------------------------------------------------------------
#Imports
#-------------------------------------------------------------
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
import colorsys
import random
import time
from googleapiclient.http import MediaFileUpload
from Google import Create_Service
cap = cv2.VideoCapture(0)
#-------------------------------------------------------------
# Global variables
#-------------------------------------------------------------
input_size = 416
iou = 0.4 #iou threshold
score = 0.25 #score threshold
class_names = "./classes.names"
model_path = "./yolov3-tiny-416-int8.tflite"
CLIENT_SECRET_FILE = "credentials.json"
API_NAME = "drive"
API_VERSION = "v3"
SCOPES = ["https://www.googleapis.com/auth/drive"]
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
folder_id = "1hFg3bENi-106qf8XvN1q_5uRDORUsxiD"
file_name = "save.jpg"
mime_type = "image/jpeg"
file_metadata = {"name": file_name, "parents": [folder_id]}
media = MediaFileUpload("./{0}".format(file_name), mimetype= mime_type)
#-------------------------------------------------------------
# Utility function definations
#-------------------------------------------------------------
def read_class_names(class_file_name):
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
def upload_to_drive():
service.files().create(
body = file_metadata,
media_body = media,
fields = "id"
).execute()
def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constant([416,416])):
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(box_xywh, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])
pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
input_shape = tf.cast(input_shape, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.)) / input_shape
box_maxes = (box_yx + (box_hw / 2.)) / input_shape
boxes = tf.concat([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
return (boxes, pred_conf)
def get_frame_image():
_, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
return frame, image_data
def forward_pass(image_data):
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
return pred
def draw_bbox(image, bboxes, starting_time, frame_id, classes=read_class_names(class_names), show_label=True):
num_classes = len(classes)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
out_boxes, out_scores, out_classes, num_boxes = bboxes
elapsed_time = time.time() - starting_time
fps = frame_id / elapsed_time
cv2.putText(image, "FPS: " + str(round(fps, 2)), (10, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 0), 3)
for i in range(num_boxes[0]):
if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue
coor = out_boxes[0][i]
coor[0] = int(coor[0] * image_h)
coor[2] = int(coor[2] * image_h)
coor[1] = int(coor[1] * image_w)
coor[3] = int(coor[3] * image_w)
fontScale = 0.5
score = out_scores[0][i]
class_ind = int(out_classes[0][i])
bbox_color = colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 600)
c1, c2 = (coor[1], coor[0]), (coor[3], coor[2])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
if show_label:
bbox_mess = '%s: %.2f' % (classes[class_ind], score)
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled
cv2.putText(image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
return image
def command_ext(items):
if 0 in items and 1 in items:
print("-"*10,"Entering Debug Mode","-"*10)
debug_mode()
elif 0 in items:
print("Operation_mode: Normal || command detected: O K")
print("-"*10,"Executing capture and upload","-"*10)
# cv2.imwrite("save.jpg", cv2.cvtColor(np.array(cap.read()[1]), cv2.COLOR_BGR2RGB))
cv2.imwrite("save.jpg", cap.read()[1])
upload_to_drive()
print("-"*10,"UPLOAD COMPLETE")
print("-"*10,"RETURNING TO NORMAL OPERATION IN 10s","-"*10)
time.sleep(5)
elif 1 in items:
print("Operation_mode: Normal || command detected: C A L L")
else: print("Operation_mode: Normal || command detected: no command")
def normal_operation_mode():
while True:
#-------------------------------------------------------------
# Reading and storing the frame as an image
#-------------------------------------------------------------
frame, image_data = get_frame_image()
#-------------------------------------------------------------
# Loading the image into the tensors and then predicting it
#-------------------------------------------------------------
pred = forward_pass(image_data)
#-------------------------------------------------------------
# Cleaning the boxes
#-------------------------------------------------------------
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
classes, valid_detection = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score)[2:]
#-------------------------------------------------------------
# Extracting command from the
#-------------------------------------------------------------
detected_items = [classes.numpy()[0][i].astype(int) for i in range(valid_detection.numpy()[0])]
command_ext(detected_items)
def debug_mode():
starting_time = time.time()
frame_id = 0
while True:
frame_id += 1
frame, image_data = get_frame_image()
pred = forward_pass(image_data)
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = draw_bbox(frame, pred_bbox, starting_time, frame_id)
result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow("Image", result)
if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows(); break
#-------------------------------------------------------------
# Allocating tflite
#-------------------------------------------------------------
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape'] #REMOVE THIS ONE
#-------------------------------------------------------------
# Start
#-------------------------------------------------------------
# normal_operation_mode()
debug_mode()
#-------------------------------------------------------------
# Thats it! THE END ez pz
#-------------------------------------------------------------
| 42.635514
| 130
| 0.551732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,275
| 0.249342
|
0295359e838ee62284e6df9935d33336b1756495
| 2,790
|
py
|
Python
|
maskrcnn_benchmark/modeling/backbone/res2net_builder.py
|
koseimori/Res2Net-maskrcnn
|
e205ff67855b52375f340ca70a08995069424e5c
|
[
"MIT"
] | 31
|
2020-02-02T15:12:13.000Z
|
2022-03-18T08:09:17.000Z
|
maskrcnn_benchmark/modeling/backbone/res2net_builder.py
|
koseimori/Res2Net-maskrcnn
|
e205ff67855b52375f340ca70a08995069424e5c
|
[
"MIT"
] | 4
|
2020-03-08T08:26:12.000Z
|
2021-03-08T11:30:52.000Z
|
maskrcnn_benchmark/modeling/backbone/res2net_builder.py
|
koseimori/Res2Net-maskrcnn
|
e205ff67855b52375f340ca70a08995069424e5c
|
[
"MIT"
] | 17
|
2020-02-20T12:04:04.000Z
|
2021-06-06T07:26:23.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
from . import fpn as fpn_module
from . import res2net
@registry.BACKBONES.register("R2-50-C4")
@registry.BACKBONES.register("R2-50-C5")
@registry.BACKBONES.register("R2-101-C4")
@registry.BACKBONES.register("R2-101-C5")
def build_res2net_backbone(cfg):
body = res2net.Res2Net(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
return model
@registry.BACKBONES.register("R2-50-FPN")
@registry.BACKBONES.register("R2-101-FPN")
@registry.BACKBONES.register("R2-152-FPN")
def build_res2net_fpn_backbone(cfg):
body = res2net.Res2Net(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(
in_channels_list=[
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelMaxPool(),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
@registry.BACKBONES.register("R2-50-FPN-RETINANET")
@registry.BACKBONES.register("R2-101-FPN-RETINANET")
def build_res2net_fpn_p3p7_backbone(cfg):
body = res2net.Res2Net(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \
else out_channels
fpn = fpn_module.FPN(
in_channels_list=[
0,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
# def build_backbone(cfg):
# assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
# "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
# cfg.MODEL.BACKBONE.CONV_BODY
# )
# return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
| 34.875
| 83
| 0.699283
| 0
| 0
| 0
| 0
| 2,167
| 0.776703
| 0
| 0
| 513
| 0.183871
|
029644afd069012e2e180cddce470b4c75d102b6
| 608
|
py
|
Python
|
code/tmp_rtrip/test/memory_watchdog.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/test/memory_watchdog.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/test/memory_watchdog.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
"""Memory watchdog: periodically read the memory usage of the main test process
and print it out, until terminated."""
import os
import sys
import time
try:
page_size = os.sysconf('SC_PAGESIZE')
except (ValueError, AttributeError):
try:
page_size = os.sysconf('SC_PAGE_SIZE')
except (ValueError, AttributeError):
page_size = 4096
while True:
sys.stdin.seek(0)
statm = sys.stdin.read()
data = int(statm.split()[5])
sys.stdout.write(' ... process data size: {data:.1f}G\n'.format(data=
data * page_size / 1024 ** 3))
sys.stdout.flush()
time.sleep(1)
| 28.952381
| 79
| 0.662829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.302632
|
02967401719aa2d8549023548710f426054a51b3
| 371
|
py
|
Python
|
tests/conftest.py
|
charles-cooper/crvfunder
|
63b4041ff06ff6ea943a7d69ae233719c4411bbd
|
[
"MIT"
] | 6
|
2022-03-17T21:10:41.000Z
|
2022-03-27T04:38:53.000Z
|
tests/conftest.py
|
charles-cooper/crvfunder
|
63b4041ff06ff6ea943a7d69ae233719c4411bbd
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
charles-cooper/crvfunder
|
63b4041ff06ff6ea943a7d69ae233719c4411bbd
|
[
"MIT"
] | 2
|
2022-03-26T03:37:40.000Z
|
2022-03-28T22:01:20.000Z
|
import pytest
pytest_plugins = ["fixtures.accounts", "fixtures.deployments"]
def pytest_sessionfinish(session, exitstatus):
if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
# we treat "no tests collected" as passing
session.exitstatus = pytest.ExitCode.OK
@pytest.fixture(autouse=True)
def isolation(module_isolation, fn_isolation):
pass
| 24.733333
| 62
| 0.749326
| 0
| 0
| 0
| 0
| 85
| 0.229111
| 0
| 0
| 83
| 0.22372
|
0297324475a0f71073a283c42e8668872ade345c
| 38,375
|
py
|
Python
|
sdk/python/pulumi_databricks/permissions.py
|
pulumi/pulumi-databricks
|
43580d4adbd04b72558f368ff0eef3d03432ebc1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_databricks/permissions.py
|
pulumi/pulumi-databricks
|
43580d4adbd04b72558f368ff0eef3d03432ebc1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_databricks/permissions.py
|
pulumi/pulumi-databricks
|
43580d4adbd04b72558f368ff0eef3d03432ebc1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PermissionsArgs', 'Permissions']
@pulumi.input_type
class PermissionsArgs:
def __init__(__self__, *,
access_controls: pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]],
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Permissions resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
pulumi.set(__self__, "access_controls", access_controls)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_policy_id is not None:
pulumi.set(__self__, "cluster_policy_id", cluster_policy_id)
if directory_id is not None:
pulumi.set(__self__, "directory_id", directory_id)
if directory_path is not None:
pulumi.set(__self__, "directory_path", directory_path)
if experiment_id is not None:
pulumi.set(__self__, "experiment_id", experiment_id)
if instance_pool_id is not None:
pulumi.set(__self__, "instance_pool_id", instance_pool_id)
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if notebook_id is not None:
pulumi.set(__self__, "notebook_id", notebook_id)
if notebook_path is not None:
pulumi.set(__self__, "notebook_path", notebook_path)
if object_type is not None:
pulumi.set(__self__, "object_type", object_type)
if registered_model_id is not None:
pulumi.set(__self__, "registered_model_id", registered_model_id)
if repo_id is not None:
pulumi.set(__self__, "repo_id", repo_id)
if repo_path is not None:
pulumi.set(__self__, "repo_path", repo_path)
if sql_alert_id is not None:
pulumi.set(__self__, "sql_alert_id", sql_alert_id)
if sql_dashboard_id is not None:
pulumi.set(__self__, "sql_dashboard_id", sql_dashboard_id)
if sql_endpoint_id is not None:
pulumi.set(__self__, "sql_endpoint_id", sql_endpoint_id)
if sql_query_id is not None:
pulumi.set(__self__, "sql_query_id", sql_query_id)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]:
return pulumi.get(self, "access_controls")
@access_controls.setter
def access_controls(self, value: pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]):
pulumi.set(self, "access_controls", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@cluster_policy_id.setter
def cluster_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_policy_id", value)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[pulumi.Input[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@directory_id.setter
def directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_id", value)
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> Optional[pulumi.Input[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@directory_path.setter
def directory_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_path", value)
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "experiment_id")
@experiment_id.setter
def experiment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_id", value)
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@instance_pool_id.setter
def instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_pool_id", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@notebook_id.setter
def notebook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_id", value)
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> Optional[pulumi.Input[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@notebook_path.setter
def notebook_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_path", value)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> Optional[pulumi.Input[str]]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@object_type.setter
def object_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_type", value)
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registered_model_id")
@registered_model_id.setter
def registered_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registered_model_id", value)
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> Optional[pulumi.Input[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@repo_id.setter
def repo_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_id", value)
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> Optional[pulumi.Input[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@repo_path.setter
def repo_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_path", value)
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_alert_id")
@sql_alert_id.setter
def sql_alert_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_alert_id", value)
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_dashboard_id")
@sql_dashboard_id.setter
def sql_dashboard_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_dashboard_id", value)
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_endpoint_id")
@sql_endpoint_id.setter
def sql_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_endpoint_id", value)
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_query_id")
@sql_query_id.setter
def sql_query_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_query_id", value)
@pulumi.input_type
class _PermissionsState:
def __init__(__self__, *,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Permissions resources.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
if access_controls is not None:
pulumi.set(__self__, "access_controls", access_controls)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_policy_id is not None:
pulumi.set(__self__, "cluster_policy_id", cluster_policy_id)
if directory_id is not None:
pulumi.set(__self__, "directory_id", directory_id)
if directory_path is not None:
pulumi.set(__self__, "directory_path", directory_path)
if experiment_id is not None:
pulumi.set(__self__, "experiment_id", experiment_id)
if instance_pool_id is not None:
pulumi.set(__self__, "instance_pool_id", instance_pool_id)
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if notebook_id is not None:
pulumi.set(__self__, "notebook_id", notebook_id)
if notebook_path is not None:
pulumi.set(__self__, "notebook_path", notebook_path)
if object_type is not None:
pulumi.set(__self__, "object_type", object_type)
if registered_model_id is not None:
pulumi.set(__self__, "registered_model_id", registered_model_id)
if repo_id is not None:
pulumi.set(__self__, "repo_id", repo_id)
if repo_path is not None:
pulumi.set(__self__, "repo_path", repo_path)
if sql_alert_id is not None:
pulumi.set(__self__, "sql_alert_id", sql_alert_id)
if sql_dashboard_id is not None:
pulumi.set(__self__, "sql_dashboard_id", sql_dashboard_id)
if sql_endpoint_id is not None:
pulumi.set(__self__, "sql_endpoint_id", sql_endpoint_id)
if sql_query_id is not None:
pulumi.set(__self__, "sql_query_id", sql_query_id)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]]:
return pulumi.get(self, "access_controls")
@access_controls.setter
def access_controls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]]):
pulumi.set(self, "access_controls", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@cluster_policy_id.setter
def cluster_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_policy_id", value)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[pulumi.Input[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@directory_id.setter
def directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_id", value)
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> Optional[pulumi.Input[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@directory_path.setter
def directory_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_path", value)
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "experiment_id")
@experiment_id.setter
def experiment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_id", value)
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@instance_pool_id.setter
def instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_pool_id", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@notebook_id.setter
def notebook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_id", value)
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> Optional[pulumi.Input[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@notebook_path.setter
def notebook_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_path", value)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> Optional[pulumi.Input[str]]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@object_type.setter
def object_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_type", value)
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registered_model_id")
@registered_model_id.setter
def registered_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registered_model_id", value)
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> Optional[pulumi.Input[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@repo_id.setter
def repo_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_id", value)
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> Optional[pulumi.Input[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@repo_path.setter
def repo_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_path", value)
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_alert_id")
@sql_alert_id.setter
def sql_alert_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_alert_id", value)
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_dashboard_id")
@sql_dashboard_id.setter
def sql_dashboard_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_dashboard_id", value)
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_endpoint_id")
@sql_endpoint_id.setter
def sql_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_endpoint_id", value)
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_query_id")
@sql_query_id.setter
def sql_query_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_query_id", value)
class Permissions(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
The resource permissions can be imported using the object id bash
```sh
$ pulumi import databricks:index/permissions:Permissions this /<object type>/<object id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PermissionsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
The resource permissions can be imported using the object id bash
```sh
$ pulumi import databricks:index/permissions:Permissions this /<object type>/<object id>
```
:param str resource_name: The name of the resource.
:param PermissionsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PermissionsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PermissionsArgs.__new__(PermissionsArgs)
if access_controls is None and not opts.urn:
raise TypeError("Missing required property 'access_controls'")
__props__.__dict__["access_controls"] = access_controls
__props__.__dict__["authorization"] = authorization
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_policy_id"] = cluster_policy_id
__props__.__dict__["directory_id"] = directory_id
__props__.__dict__["directory_path"] = directory_path
__props__.__dict__["experiment_id"] = experiment_id
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["job_id"] = job_id
__props__.__dict__["notebook_id"] = notebook_id
__props__.__dict__["notebook_path"] = notebook_path
__props__.__dict__["object_type"] = object_type
__props__.__dict__["registered_model_id"] = registered_model_id
__props__.__dict__["repo_id"] = repo_id
__props__.__dict__["repo_path"] = repo_path
__props__.__dict__["sql_alert_id"] = sql_alert_id
__props__.__dict__["sql_dashboard_id"] = sql_dashboard_id
__props__.__dict__["sql_endpoint_id"] = sql_endpoint_id
__props__.__dict__["sql_query_id"] = sql_query_id
super(Permissions, __self__).__init__(
'databricks:index/permissions:Permissions',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None) -> 'Permissions':
"""
Get an existing Permissions resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PermissionsState.__new__(_PermissionsState)
__props__.__dict__["access_controls"] = access_controls
__props__.__dict__["authorization"] = authorization
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_policy_id"] = cluster_policy_id
__props__.__dict__["directory_id"] = directory_id
__props__.__dict__["directory_path"] = directory_path
__props__.__dict__["experiment_id"] = experiment_id
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["job_id"] = job_id
__props__.__dict__["notebook_id"] = notebook_id
__props__.__dict__["notebook_path"] = notebook_path
__props__.__dict__["object_type"] = object_type
__props__.__dict__["registered_model_id"] = registered_model_id
__props__.__dict__["repo_id"] = repo_id
__props__.__dict__["repo_path"] = repo_path
__props__.__dict__["sql_alert_id"] = sql_alert_id
__props__.__dict__["sql_dashboard_id"] = sql_dashboard_id
__props__.__dict__["sql_endpoint_id"] = sql_endpoint_id
__props__.__dict__["sql_query_id"] = sql_query_id
return Permissions(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> pulumi.Output[Sequence['outputs.PermissionsAccessControl']]:
return pulumi.get(self, "access_controls")
@property
@pulumi.getter
def authorization(self) -> pulumi.Output[Optional[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> pulumi.Output[Optional[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> pulumi.Output[Optional[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> pulumi.Output[Optional[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "experiment_id")
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> pulumi.Output[Optional[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@property
@pulumi.getter(name="jobId")
def job_id(self) -> pulumi.Output[Optional[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> pulumi.Output[Optional[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@property
@pulumi.getter(name="objectType")
def object_type(self) -> pulumi.Output[str]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "registered_model_id")
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> pulumi.Output[Optional[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> pulumi.Output[Optional[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_alert_id")
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_dashboard_id")
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_endpoint_id")
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_query_id")
| 41.802832
| 279
| 0.645368
| 37,906
| 0.987779
| 0
| 0
| 33,780
| 0.880261
| 0
| 0
| 11,641
| 0.303349
|
02974a7f2e55a4545889ad1727cb810be5d621b5
| 1,254
|
py
|
Python
|
file/txt2bin.py
|
QPointNotebook/PythonSample
|
53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0
|
[
"MIT"
] | null | null | null |
file/txt2bin.py
|
QPointNotebook/PythonSample
|
53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0
|
[
"MIT"
] | null | null | null |
file/txt2bin.py
|
QPointNotebook/PythonSample
|
53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from file.file import file
class txt2bin( file ):
def read( self, file ):
datas = []
with open( file, 'r', encoding='utf-8' ) as f:
lines = f.readlines()
for line in lines:
data = line.splitlines() # split by '\n'
if not data[0]:
d = b''
else:
val = int( data[0], 16 ) # txt -> int
leng = len( data[0] ) // 2
d = val.to_bytes( leng, byteorder='big' ) # int -> binary
datas.append( d )
return datas
def write( self, file, datas ):
with open( file, 'w', encoding='utf-8' ) as f:
for data in datas:
val = int.from_bytes( data, byteorder='big' ) # binary -> int
d = hex( val ) # int -> hex
s = str( d )[2:] # cut '0x'
if len( data ) == 0:
f.write( '\n' )
else:
if data[0] < 0x10:
s = '0' + s # add '0' for loss of digit
f.write( s + '\n' )
| 31.35
| 77
| 0.369219
| 1,178
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.154705
|
02982b64a64f41b7dc43e4d28a9770dcfad2d139
| 2,105
|
py
|
Python
|
PGPs_tensorflow/Examples/Airline.py
|
maziarraissi/ParametricGP
|
d5974c9e41a2cd761c0cfaff138c5b1722c006db
|
[
"MIT"
] | 43
|
2017-04-12T10:43:21.000Z
|
2022-02-28T05:16:02.000Z
|
PGPs_tensorflow/Examples/Airline.py
|
arita37/ParametricGP
|
9c04f3166c22e787a92290fe4353ba4f918ed598
|
[
"MIT"
] | 1
|
2018-05-25T00:26:10.000Z
|
2018-05-29T05:26:15.000Z
|
PGPs_tensorflow/Examples/Airline.py
|
arita37/ParametricGP
|
9c04f3166c22e787a92290fe4353ba4f918ed598
|
[
"MIT"
] | 22
|
2017-04-12T02:22:08.000Z
|
2021-04-10T23:19:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import sys
sys.path.insert(0, '../PGP/')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from parametric_GP import PGP
if __name__ == "__main__":
# Import the data
data = pd.read_pickle('airline.pickle')
# Convert time of day from hhmm to minutes since midnight
data.ArrTime = 60*np.floor(data.ArrTime/100)+np.mod(data.ArrTime, 100)
data.DepTime = 60*np.floor(data.DepTime/100)+np.mod(data.DepTime, 100)
# Pick out the data
Y = data['ArrDelay'].values
names = ['Month', 'DayofMonth', 'DayOfWeek', 'plane_age', 'AirTime', 'Distance', 'ArrTime', 'DepTime']
X = data[names].values
N = len(data)
np.random.seed(N)
# Shuffle the data and only consider a subset of it
perm = np.random.permutation(N)
X = X[perm]
Y = Y[perm]
XT = X[int(2*N/3):N]
YT = Y[int(2*N/3):N]
X = X[:int(2*N/3)]
Y = Y[:int(2*N/3)]
# Normalize Y scale and offset
Ymean = Y.mean()
Ystd = Y.std()
Y = (Y - Ymean) / Ystd
Y = Y.reshape(-1, 1)
YT = (YT - Ymean) / Ystd
YT = YT.reshape(-1, 1)
# Normalize X on [0, 1]
Xmin, Xmax = X.min(0), X.max(0)
X = (X - Xmin) / (Xmax - Xmin)
XT = (XT - Xmin) / (Xmax - Xmin)
# Model creation
M = 500
pgp = PGP(X, Y, M, max_iter = 10000, N_batch = 1000,
monitor_likelihood = 10, lrate = 1e-3)
# Training
pgp.train()
# Prediction
mean_star, var_star = pgp.predict(XT)
# MSE
print('MSE: %f' % ((mean_star-YT)**2).mean())
print('MSE_mean: %f' % ((Y.mean()-YT)**2).mean())
# ARD
ARD = 1/np.sqrt(np.exp(pgp.hyp[1:-1]))
ARD_x = np.arange(len(ARD))
fig, ax = plt.subplots(figsize=(10,5))
plt.rcParams.update({'font.size': 16})
ax.barh(ARD_x,ARD)
ax.set_yticks(ARD_x)
ax.set_yticklabels(names)
ax.set_xlabel('ARD weights')
plt.savefig('../Fig/Flights.eps', format='eps', dpi=1000)
#####
# MSE: 0.832810
# MSE_mean: 0.999799
| 25.059524
| 106
| 0.566271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.263658
|
029864a3d0017b3744cbc0ce2c0fdf1a9dd81484
| 2,081
|
py
|
Python
|
django_project/proj/settings/local.py
|
robabram/Quickstart-Secure-Django-Template
|
22f304e864f8f6ce972f44bce6fe9b885341201a
|
[
"MIT"
] | 9
|
2018-10-03T00:30:57.000Z
|
2021-12-29T07:48:08.000Z
|
django_project/proj/settings/local.py
|
robabram/Quickstart-Secure-Django-Template
|
22f304e864f8f6ce972f44bce6fe9b885341201a
|
[
"MIT"
] | 9
|
2020-02-10T17:08:01.000Z
|
2021-11-19T17:21:18.000Z
|
django_project/proj/settings/local.py
|
robabram/Quickstart-Secure-Django-Template
|
22f304e864f8f6ce972f44bce6fe9b885341201a
|
[
"MIT"
] | null | null | null |
#
# Author: Robert Abram <rabram991@gmail.com>
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
import os
from proj.settings.base import *
#
# Logging
#
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'website.log',
'formatter': 'verbose',
'maxBytes': 1024 * 1000 * 100 # 100MB
},
'lockout': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'lockout.log',
'formatter': 'verbose',
'maxBytes': 1024 * 1000 * 100 # 100MB
},
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'], # Could change to: ['null'],
'level': 'ERROR', # Change this to DEBUG to see SQL Queries in log output
},
'django': {
'handlers': ['console'],
'propagate': True,
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'axes.watch_login': {
'handlers': ['lockout'],
'propagate': False,
'level': 'INFO',
},
'celery': {
'handlers': ['console'],
'propagate': False,
'level': os.getenv('DJANGO_LOG_LEVEL', 'WARNING'),
},
}
}
| 27.381579
| 86
| 0.475252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,159
| 0.556944
|
0298d15f0c2dd54fb30dc11d08603cd497ca28b4
| 3,426
|
py
|
Python
|
pyfos/utils/system_security/seccertmgmt_show.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 44
|
2017-11-17T12:03:11.000Z
|
2022-02-03T20:57:56.000Z
|
pyfos/utils/system_security/seccertmgmt_show.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 13
|
2018-10-09T15:34:15.000Z
|
2022-02-24T20:03:17.000Z
|
pyfos/utils/system_security/seccertmgmt_show.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 23
|
2017-12-14T18:08:33.000Z
|
2022-02-03T15:33:40.000Z
|
#!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`seccertmgmt_show` - PyFOS util for displaying certificates in the switch.
***********************************************************************************
The :mod:`seccertmgmt_show` util provides the option to display a certificate.
This module can be used to display a certificate. If the certificate entity \
and type are not provided, information for all certificates is displayed.
* Input:
| Infrastructure Options:
| -i,--ipaddr=IPADDR The IP address of the FOS switch.
| -L,--login=LOGIN The login name.
| -P,--password=PASSWORD The password.
| -s,--secured=MODE The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose Verbose mode [OPTIONAL].
* Util Script Options:
| --certificate-entity=ENTITY-NAME Sets the certificate entity name.
| --certificate-type=CERT-TYPE Sets the certificate type.
| --is-hexdump-show Displays the raw hex data.
* Output:
* The certificate information.
.. function:: seccertmgmt_show.show_system_security_seccertmgmt(session)
* Displays the certificate and its information in the switch.
Example Usage of the Method:
ret = seccertmgmt_show.show_system_security_seccertmgmt(session, \
cert_entity, cert_type)
print (ret)
Details::
result = seccertmgmt_show.show_system_security_seccertmgmt(
session, \'cert\', \'https\')
* Input:
:param session: The session returned by the login.
:param cert_entity: The associated certificate entity.
:param cert_type: The associated certificate type.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the certificate-related information.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_security import security_certificate
from pyfos.utils import brcd_util
def _show_cert(session, restobject):
return restobject.get(session)
def show_security_certificate(session, cert_entity, cert_type):
seccertmgmt_obj = security_certificate()
seccertmgmt_obj.set_certificate_entity(cert_entity)
seccertmgmt_obj.set_certificate_entity(cert_type)
result = _show_cert(session, seccertmgmt_obj)
return result
def main(argv):
# Print arguments
# print(sys.argv[1:])
filters = ['certificate_entity', 'certificate_type']
inputs = brcd_util.parse(argv, security_certificate, filters)
session = brcd_util.getsession(inputs)
result = _show_cert(inputs['session'], inputs['utilobject'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 30.864865
| 83
| 0.694104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,563
| 0.748103
|
029a249ed3ef3d36cbd9d5a8a3e1445f10e3e29d
| 2,345
|
py
|
Python
|
tests/base.py
|
Suhail6inkling/bot
|
95ae05c773e753699e6899255783c1d7df936024
|
[
"MIT"
] | 1
|
2022-01-01T17:33:48.000Z
|
2022-01-01T17:33:48.000Z
|
tests/base.py
|
Suhail6inkling/bot
|
95ae05c773e753699e6899255783c1d7df936024
|
[
"MIT"
] | null | null | null |
tests/base.py
|
Suhail6inkling/bot
|
95ae05c773e753699e6899255783c1d7df936024
|
[
"MIT"
] | 1
|
2020-11-01T19:57:00.000Z
|
2020-11-01T19:57:00.000Z
|
import logging
import unittest
from contextlib import contextmanager
class _CaptureLogHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
def __init__(self):
super().__init__()
self.records = []
def emit(self, record):
self.records.append(record)
class LoggingTestCase(unittest.TestCase):
"""TestCase subclass that adds more logging assertion tools."""
@contextmanager
def assertNotLogs(self, logger=None, level=None, msg=None):
"""
Asserts that no logs of `level` and higher were emitted by `logger`.
You can specify a specific `logger`, the minimum `logging` level we want to watch and a
custom `msg` to be added to the `AssertionError` if thrown. If the assertion fails, the
recorded log records will be outputted with the `AssertionError` message. The context
manager does not yield a live `look` into the logging records, since we use this context
manager when we're testing under the assumption that no log records will be emitted.
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
if level:
level = logging._nameToLevel.get(level, level)
else:
level = logging.INFO
handler = _CaptureLogHandler()
old_handlers = logger.handlers[:]
old_level = logger.level
old_propagate = logger.propagate
logger.handlers = [handler]
logger.setLevel(level)
logger.propagate = False
try:
yield
except Exception as exc:
raise exc
finally:
logger.handlers = old_handlers
logger.propagate = old_propagate
logger.setLevel(old_level)
if handler.records:
level_name = logging.getLevelName(level)
n_logs = len(handler.records)
base_message = f"{n_logs} logs of {level_name} or higher were triggered on {logger.name}:\n"
records = [str(record) for record in handler.records]
record_message = "\n".join(records)
standard_message = self._truncateMessage(base_message, record_message)
msg = self._formatMessage(msg, standard_message)
self.fail(msg)
| 34.485294
| 104
| 0.639659
| 2,270
| 0.968017
| 1,862
| 0.79403
| 1,882
| 0.802559
| 0
| 0
| 796
| 0.339446
|
029b069d68471e7fbe34c10e131ca57fcd80d3f5
| 892
|
py
|
Python
|
blog/app/admin/views.py
|
web-user/flask-blog
|
130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a
|
[
"MIT"
] | null | null | null |
blog/app/admin/views.py
|
web-user/flask-blog
|
130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a
|
[
"MIT"
] | null | null | null |
blog/app/admin/views.py
|
web-user/flask-blog
|
130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, session, redirect, url_for, request, flash, abort, current_app, make_response
from flask_login import login_user, logout_user, login_required, current_user
from . import admin
from .. import db
from ..models import User, Post
from ..form import PostForm
from functools import wraps
from flask import g, request, redirect, url_for
@admin.route('/admin', methods = ['GET', 'POST'])
@login_required
def admin():
form = PostForm()
error = None
if request.method == 'POST' and form.validate():
print(form.body.data)
print('MMM----------NNNN')
post = Post(body=form.body.data, title=form.title.data)
db.session.add(post)
db.session.commit()
return redirect(url_for('main.home'))
flash('Invalid username or password.')
return render_template('admin.html', title='Admin', form=form)
| 34.307692
| 119
| 0.690583
| 0
| 0
| 0
| 0
| 516
| 0.578475
| 0
| 0
| 105
| 0.117713
|
029c0dd5be38ab97f221e4b0ca039e07bafa37e8
| 2,561
|
py
|
Python
|
examples/get-set-params/robot.py
|
Tyler-Duckworth/robotpy-rev
|
d03829a4f8e47526e753f0edeafc1df888880775
|
[
"Apache-2.0"
] | 1
|
2019-01-28T18:16:55.000Z
|
2019-01-28T18:16:55.000Z
|
examples/get-set-params/robot.py
|
Tyler-Duckworth/robotpy-rev
|
d03829a4f8e47526e753f0edeafc1df888880775
|
[
"Apache-2.0"
] | 18
|
2019-01-09T08:35:48.000Z
|
2022-01-15T02:17:23.000Z
|
examples/get-set-params/robot.py
|
Tyler-Duckworth/robotpy-rev
|
d03829a4f8e47526e753f0edeafc1df888880775
|
[
"Apache-2.0"
] | 9
|
2019-01-11T03:14:19.000Z
|
2022-01-13T00:51:48.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2018 FIRST. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import rev
import wpilib
class Robot(wpilib.TimedRobot):
def robotInit(self):
# Create motor
self.motor = rev.CANSparkMax(1, rev.MotorType.kBrushless)
self.joystick = wpilib.Joystick(0)
# The restoreFactoryDefaults method can be used to reset the
# configuration parameters in the SPARK MAX to their factory default
# state. If no argument is passed, these parameters will not persist
# between power cycles
self.motor.restoreFactoryDefaults()
# Parameters can be set by calling the appropriate set() method on the
# CANSparkMax object whose properties you want to change
#
# Set methods will return one of three CANError values which will let
# you know if the parameter was successfully set:
# CANError.kOk
# CANError.kError
# CANError.kTimeout
if self.motor.setIdleMode(rev.IdleMode.kCoast) is not rev.CANError.kOk:
wpilib.SmartDashboard.putString("Idle Mode", "Error")
# Similarly, parameters will have a get() method which allows you to
# retrieve their values from the controller
if self.motor.getIdleMode() is rev.IdleMode.kCoast:
wpilib.SmartDashboard.putString("Idle Mode", "Coast")
else:
wpilib.SmartDashboard.putString("Idle Mode", "Brake")
# Set ramp rate to 0
if self.motor.setOpenLoopRampRate(0) is not rev.CANError.kOk:
wpilib.SmartDashboard.putString("Ramp Rate", "Error")
# Read back ramp value
wpilib.SmartDashboard.putString(
"Ramp Rate", str(self.motor.getOpenLoopRampRate())
)
def teleopPeriodic(self):
# Pair motor and the joystick's Y Axis
self.motor.set(self.joystick.getY())
# Put Voltage, Temperature, and Motor Output onto SmartDashboard
wpilib.SmartDashboard.putNumber("Voltage", self.motor.getBusVoltage())
wpilib.SmartDashboard.putNumber("Temperature", self.motor.getMotorTemperature())
wpilib.SmartDashboard.putNumber("Output", self.motor.getAppliedOutput())
if __name__ == "__main__":
wpilib.run(Robot)
| 40.015625
| 88
| 0.632956
| 2,102
| 0.820773
| 0
| 0
| 0
| 0
| 0
| 0
| 1,284
| 0.501367
|
029cefa854d393945ca4f9769661f617a4a0cbfe
| 39,324
|
py
|
Python
|
pyclustering/nnet/som.py
|
JosephChataignon/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 1,013
|
2015-01-26T19:50:14.000Z
|
2022-03-31T07:38:48.000Z
|
pyclustering/nnet/som.py
|
peterlau0626/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 542
|
2015-01-20T16:44:32.000Z
|
2022-01-29T14:57:20.000Z
|
pyclustering/nnet/som.py
|
peterlau0626/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 262
|
2015-03-19T07:28:12.000Z
|
2022-03-30T07:28:24.000Z
|
"""!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
class type_conn(IntEnum):
"""!
@brief Enumeration of connection types for SOM.
@see som
"""
## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four = 0
## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight = 1
## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb = 2
## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor = 3
class type_init(IntEnum):
"""!
@brief Enumeration of initialization types for SOM.
@see som
"""
## Weights are randomly distributed using Gaussian distribution (0, 1).
random = 0
## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid = 1
## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface = 2
## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid = 3
class som_parameters:
"""!
@brief Represents SOM parameters.
"""
def __init__(self):
"""!
@brief Creates SOM parameters.
"""
## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type = type_init.uniform_grid
## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius = None
## Rate of learning.
self.init_learn_rate = 0.1
## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold = 0.001
## Seed for random state (by default is `None`, current system time is used).
self.random_state = None
class som:
"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""
@property
def size(self):
"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
return self._size
@property
def weights(self):
"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
return self._weights
@property
def awards(self):
"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
return self._award
@property
def capture_objects(self):
"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""
if self.__ccore_som_pointer is not None:
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
return self._capture_objects
def __init__(self, rows, cols, conn_type=type_conn.grid_eight, parameters=None, ccore=True):
"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
# some of these parameters are required despite core implementation, for example, for network visualization.
self._cols = cols
self._rows = rows
self._size = cols * rows
self._conn_type = conn_type
self._data = None
self._neighbors = None
self._local_radius = 0.0
self._learn_rate = 0.0
self.__ccore_som_pointer = None
self._params = parameters or som_parameters()
if self._params.init_radius is None:
self._params.init_radius = self.__initialize_initial_radius(rows, cols)
if (ccore is True) and ccore_library.workable():
self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params)
else:
# location
self._location = self.__initialize_locations(rows, cols)
# default weights
self._weights = [[0.0]] * self._size
# awards
self._award = [0] * self._size
# captured objects
self._capture_objects = [[] for i in range(self._size)]
# distances - calculate and store them only during training
self._sqrt_distances = None
# connections
if conn_type != type_conn.func_neighbor:
self._create_connections(conn_type)
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, location):
"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""
sqrt_distances = [[[] for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(i, size, 1):
dist = euclidean_distance_square(location[i], location[j])
sqrt_distances[i][j] = dist
sqrt_distances[j][i] = dist
return sqrt_distances
def _create_initial_weights(self, init_type):
"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""
dim_info = dimension_info(self._data)
step_x = dim_info.get_center()[0]
if self._rows > 1:
step_x = dim_info.get_width()[0] / (self._rows - 1)
step_y = 0.0
if dim_info.get_dimensions() > 1:
step_y = dim_info.get_center()[1]
if self._cols > 1:
step_y = dim_info.get_width()[1] / (self._cols - 1)
# generate weights (topological coordinates)
random.seed(self._params.random_state)
# Uniform grid.
if init_type == type_init.uniform_grid:
# Predefined weights in line with input data.
self._weights = [[[] for i in range(dim_info.get_dimensions())] for j in range(self._size)]
for i in range(self._size):
location = self._location[i]
for dim in range(dim_info.get_dimensions()):
if dim == 0:
if self._rows > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif dim == 1:
if self._cols > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif init_type == type_init.random_surface:
# Random weights at the full surface.
self._weights = [
[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in
range(dim_info.get_dimensions())] for _ in range(self._size)]
elif init_type == type_init.random_centroid:
# Random weights at the center of input data.
self._weights = [[(random.random() + dim_info.get_center()[i]) for i in range(dim_info.get_dimensions())]
for _ in range(self._size)]
else:
# Random weights of input data.
self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]
def _create_connections(self, conn_type):
"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""
self._neighbors = [[] for index in range(self._size)]
for index in range(0, self._size, 1):
upper_index = index - self._cols
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols + 1
lower_index = index + self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols + 1
left_index = index - 1
right_index = index + 1
node_row_index = math.floor(index / self._cols)
upper_row_index = node_row_index - 1
lower_row_index = node_row_index + 1
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four):
if upper_index >= 0:
self._neighbors[index].append(upper_index)
if lower_index < self._size:
self._neighbors[index].append(lower_index)
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (
conn_type == type_conn.honeycomb):
if (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index):
self._neighbors[index].append(left_index)
if (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index):
self._neighbors[index].append(right_index)
if conn_type == type_conn.grid_eight:
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
if conn_type == type_conn.honeycomb:
if (node_row_index % 2) == 0:
upper_left_index = index - self._cols
upper_right_index = index - self._cols + 1
lower_left_index = index + self._cols
lower_right_index = index + self._cols + 1
else:
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
def _adaptation(self, index, x):
"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""
dimension = len(self._weights[0])
if self._conn_type == type_conn.func_neighbor:
for neuron_index in range(self._size):
distance = self._sqrt_distances[index][neuron_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neuron_index][i] = self._weights[neuron_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neuron_index][i])
else:
for i in range(dimension):
self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i])
for neighbor_index in self._neighbors[index]:
distance = self._sqrt_distances[index][neighbor_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neighbor_index][i] = self._weights[neighbor_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neighbor_index][i])
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop is True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs
def simulate(self, input_pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""
if self.__ccore_som_pointer is not None:
return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)
return self._competition(input_pattern)
def _get_maximal_adaptation(self, previous_weights):
"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""
dimension = len(self._data[0])
maximal_adaptation = 0.0
for neuron_index in range(self._size):
for dim in range(dimension):
current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]
if current_adaptation < 0:
current_adaptation = -current_adaptation
if maximal_adaptation < current_adaptation:
maximal_adaptation = current_adaptation
return maximal_adaptation
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number
def show_distance_matrix(self):
"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""
distance_matrix = self.get_distance_matrix()
plt.imshow(distance_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("U-Matrix")
plt.colorbar()
plt.show()
def get_distance_matrix(self):
"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
if self._conn_type != type_conn.func_neighbor:
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
distance_matrix = [[0.0] * self._cols for i in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
if self._conn_type == type_conn.func_neighbor:
self._create_connections(type_conn.grid_eight)
for neighbor_index in self._neighbors[neuron_index]:
distance_matrix[i][j] += euclidean_distance_square(self._weights[neuron_index],
self._weights[neighbor_index])
distance_matrix[i][j] /= len(self._neighbors[neuron_index])
return distance_matrix
def show_density_matrix(self, surface_divider=20.0):
"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""
density_matrix = self.get_density_matrix(surface_divider)
plt.imshow(density_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("P-Matrix")
plt.colorbar()
plt.show()
def get_density_matrix(self, surface_divider=20.0):
"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
density_matrix = [[0] * self._cols for i in range(self._rows)]
dimension = len(self._weights[0])
dim_max = [float('-Inf')] * dimension
dim_min = [float('Inf')] * dimension
for weight in self._weights:
for index_dim in range(dimension):
if weight[index_dim] > dim_max[index_dim]:
dim_max[index_dim] = weight[index_dim]
if weight[index_dim] < dim_min[index_dim]:
dim_min[index_dim] = weight[index_dim]
radius = [0.0] * len(self._weights[0])
for index_dim in range(dimension):
radius[index_dim] = (dim_max[index_dim] - dim_min[index_dim]) / surface_divider
## TODO: do not use data
for point in self._data:
for index_neuron in range(len(self)):
point_covered = True
for index_dim in range(dimension):
if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:
point_covered = False
break
row = int(math.floor(index_neuron / self._cols))
col = index_neuron - row * self._cols
if point_covered is True:
density_matrix[row][col] += 1
return density_matrix
def show_winner_matrix(self):
"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
(fig, ax) = plt.subplots()
winner_matrix = [[0] * self._cols for _ in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
winner_matrix[i][j] = self._award[neuron_index]
ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center')
ax.imshow(winner_matrix, cmap=plt.get_cmap('cool'), interpolation='none')
ax.grid(True)
plt.title("Winner Matrix")
plt.show()
plt.close(fig)
def show_network(self, awards=False, belongs=False, coupling=True, dataset=True, marker_type='o'):
"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
dimension = len(self._weights[0])
fig = plt.figure()
# Check for dimensions
if (dimension == 1) or (dimension == 2):
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
else:
raise NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')
if (self._data is not None) and (dataset is True):
for x in self._data:
if dimension == 1:
axes.plot(x[0], 0.0, 'b|', ms=30)
elif dimension == 2:
axes.plot(x[0], x[1], 'b.')
elif dimension == 3:
axes.scatter(x[0], x[1], x[2], c='b', marker='.')
# Show neurons
for index in range(self._size):
color = 'g'
if self._award[index] == 0:
color = 'y'
if dimension == 1:
axes.plot(self._weights[index][0], 0.0, color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], 0.0, location, color='blue', fontsize=10)
if dimension == 2:
axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], point[1], location, color='blue', fontsize=10)
if (self._conn_type != type_conn.func_neighbor) and (coupling is True):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
'g', linewidth=0.5)
elif dimension == 3:
axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c=color,
marker=marker_type)
if (self._conn_type != type_conn.func_neighbor) and (coupling != False):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
[self._weights[index][2], self._weights[neighbor][2]],
'g-', linewidth=0.5)
plt.title("Network Structure")
plt.grid()
plt.show()
plt.close(fig)
def __get_dump_from_python(self, ccore_usage):
return {'ccore': ccore_usage,
'state': {'cols': self._cols,
'rows': self._rows,
'size': self._size,
'conn_type': self._conn_type,
'neighbors': self._neighbors,
'local_radius': self._local_radius,
'learn_rate': self._learn_rate,
'params': self._params,
'location': self._location,
'weights': self._weights,
'award': self._award,
'capture_objects': self._capture_objects}}
def __download_dump_from_ccore(self):
self._location = self.__initialize_locations(self._rows, self._cols)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
def __upload_common_part(self, state_dump):
self._cols = state_dump['cols']
self._rows = state_dump['rows']
self._size = state_dump['size']
self._conn_type = state_dump['conn_type']
self._neighbors = state_dump['neighbors']
self._local_radius = state_dump['local_radius']
self._learn_rate = state_dump['learn_rate']
self._params = state_dump['params']
self._neighbors = None
def __upload_dump_to_python(self, state_dump):
self.__ccore_som_pointer = None
self.__upload_common_part(state_dump)
self._location = state_dump['location']
self._weights = state_dump['weights']
self._award = state_dump['award']
self._capture_objects = state_dump['capture_objects']
self._location = self.__initialize_locations(self._rows, self._cols)
self._create_connections(self._conn_type)
def __upload_dump_to_ccore(self, state_dump):
self.__upload_common_part(state_dump)
self.__ccore_som_pointer = wrapper.som_create(self._rows, self._cols, self._conn_type, self._params)
wrapper.som_load(self.__ccore_som_pointer, state_dump['weights'], state_dump['award'],
state_dump['capture_objects'])
| 39.402806
| 203
| 0.576416
| 38,726
| 0.984793
| 0
| 0
| 1,936
| 0.049232
| 0
| 0
| 13,775
| 0.350295
|
029d25002fe312ac4b1cd506fc070aee02af1ff6
| 2,265
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
trajkd/Programming-a-Real-Self-Driving-Car
|
536377815a8dd907c59979f4a07d25b6d157dbaa
|
[
"MIT"
] | 1
|
2021-08-17T11:19:34.000Z
|
2021-08-17T11:19:34.000Z
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
trajkd/Programming-a-Real-Self-Driving-Car
|
536377815a8dd907c59979f4a07d25b6d157dbaa
|
[
"MIT"
] | 7
|
2020-09-26T01:07:12.000Z
|
2022-03-12T00:31:00.000Z
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
trajkd/Programming-a-Real-Self-Driving-Car
|
536377815a8dd907c59979f4a07d25b6d157dbaa
|
[
"MIT"
] | 1
|
2021-08-06T17:24:26.000Z
|
2021-08-06T17:24:26.000Z
|
import rospy
from styx_msgs.msg import TrafficLight
import numpy as np
from keras.models import Model
from keras import applications
from keras.models import load_model
from keras.preprocessing import image as img_preprocessing
import cv2
# load the trained model
from keras.utils.generic_utils import CustomObjectScope
model_filepath = 'saved_models/model.MobileNet-3-classes.h5'
n_classes = 3
class TLClassifier(object):
def __init__(self):
# load classifier
# load keras libraies and load the MobileNet model
self.model_loaded = False
def load_model(self):
rospy.loginfo("TLClassifier: Loading model...")
with CustomObjectScope({'relu6': applications.mobilenet.relu6,'DepthwiseConv2D': applications.mobilenet.DepthwiseConv2D}):
self.model = load_model(model_filepath)
self.model._make_predict_function() # Otherwise there is a "Tensor %s is not an element of this grap..." when predicting
self.model_loaded = True
rospy.loginfo("TLClassifier: Model loaded - READY")
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Implement light color prediction
if not self.model_loaded:
rospy.logwarn("Model not loaded yet, clssification not possible!")
return TrafficLight.UNKNOWN
# The model was trained with RGB images.
# So the image needs to be provided as RGB:
# self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
# Otherwise a conversion would be necessary
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# The model expects RGB images in (224, 224) as input
image = cv2.resize(image,(224,224))
# to tensors and normalize it
x = img_preprocessing.img_to_array(image)
x = np.expand_dims(x, axis=0).astype('float32')/255
# get index of predicted signal sign for the image
signal_prediction = np.argmax(self.model.predict(x))
return signal_prediction
| 36.532258
| 132
| 0.680795
| 1,866
| 0.823841
| 0
| 0
| 0
| 0
| 0
| 0
| 1,012
| 0.446799
|
029d60e7a021da261de9331901e5b18fe50fb799
| 2,080
|
py
|
Python
|
sktime/clustering/evaluation/_plot_clustering.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 5,349
|
2019-03-21T14:56:50.000Z
|
2022-03-31T11:25:30.000Z
|
sktime/clustering/evaluation/_plot_clustering.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 1,803
|
2019-03-26T13:33:53.000Z
|
2022-03-31T23:58:10.000Z
|
sktime/clustering/evaluation/_plot_clustering.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 911
|
2019-03-25T01:21:30.000Z
|
2022-03-31T04:45:51.000Z
|
# -*- coding: utf-8 -*-
"""Cluster plotting tools"""
__author__ = ["Christopher Holder", "Tony Bagnall"]
__all__ = ["plot_cluster_algorithm"]
import pandas as pd
from sktime.clustering.base._typing import NumpyOrDF
from sktime.clustering.base.base import BaseClusterer
from sktime.clustering.partitioning._lloyds_partitioning import (
TimeSeriesLloydsPartitioning,
)
from sktime.datatypes._panel._convert import from_nested_to_2d_array
from sktime.utils.validation._dependencies import _check_soft_dependencies
def _plot(cluster_values, center, axes):
for cluster_series in cluster_values:
axes.plot(cluster_series, color="b")
axes.plot(center, color="r")
def plot_cluster_algorithm(model: BaseClusterer, predict_series: NumpyOrDF, k: int):
"""
Method that is used to plot a clustering algorithms output
Parameters
----------
model: BaseClusterer
Clustering model to plot
predict_series: Numpy or Dataframe
The series to predict the values for
k: int
Number of centers
"""
_check_soft_dependencies("matplotlib")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if isinstance(predict_series, pd.DataFrame):
predict_series = from_nested_to_2d_array(predict_series, return_numpy=True)
plt.figure(figsize=(5, 10))
plt.rcParams["figure.dpi"] = 100
indexes = model.predict(predict_series)
centers = model.get_centers()
series_values = TimeSeriesLloydsPartitioning.get_cluster_values(
indexes, predict_series, k
)
fig, axes = plt.subplots(nrows=k, ncols=1)
for i in range(k):
_plot(series_values[i], centers[i], axes[i])
blue_patch = mpatches.Patch(color="blue", label="Series that belong to the cluster")
red_patch = mpatches.Patch(color="red", label="Cluster centers")
plt.legend(
handles=[red_patch, blue_patch],
loc="upper center",
bbox_to_anchor=(0.5, -0.40),
fancybox=True,
shadow=True,
ncol=5,
)
plt.tight_layout()
plt.show()
| 30.144928
| 88
| 0.703846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.241346
|
029ed725f1f2d111375bab605c9d49677c361f7c
| 2,858
|
py
|
Python
|
src/tests/crud/test_user.py
|
Behnam-sn/neat-backend
|
ba6e6356ee092eba27179f72fd2a15e25c68d1b8
|
[
"MIT"
] | 1
|
2022-03-07T22:16:48.000Z
|
2022-03-07T22:16:48.000Z
|
src/tests/crud/test_user.py
|
Behnam-sn/neat-backend
|
ba6e6356ee092eba27179f72fd2a15e25c68d1b8
|
[
"MIT"
] | null | null | null |
src/tests/crud/test_user.py
|
Behnam-sn/neat-backend
|
ba6e6356ee092eba27179f72fd2a15e25c68d1b8
|
[
"MIT"
] | 1
|
2022-03-07T22:16:49.000Z
|
2022-03-07T22:16:49.000Z
|
from sqlalchemy.orm import Session
from src import crud
from src.core.security import verify_password
from src.schemas.user import UserCreate, UserUpdate
from src.tests.utils.user import create_random_user_by_api
from src.tests.utils.utils import random_lower_string
def test_create_user(db: Session):
username = random_lower_string()
password = random_lower_string()
user_in = UserCreate(username=username, password=password)
user_obj = crud.create_user(db, user=user_in)
assert user_obj.username == username
assert hasattr(user_obj, "hashed_password")
def test_authenticate_user(db: Session):
username = random_lower_string()
password = random_lower_string()
create_random_user_by_api(username=username, password=password)
authenticated_user = crud.authenticate_user(
db,
username=username,
password=password
)
assert authenticated_user
assert authenticated_user.username == username
def test_not_authenticate_user(db: Session):
user = crud.authenticate_user(
db,
username=random_lower_string(),
password=random_lower_string()
)
assert user is None
def test_get_all_users(db: Session):
users = crud. get_users(db)
assert users
def test_get_user(db: Session):
username = random_lower_string()
password = random_lower_string()
create_random_user_by_api(username=username, password=password)
user = crud.get_user_by_username(db, username=username)
assert user
assert user.username == username
def test_update_user(db: Session):
username = random_lower_string()
password = random_lower_string()
create_random_user_by_api(username=username, password=password)
new_username = random_lower_string()
full_name = random_lower_string()
user_in_update = UserUpdate(
username=new_username,
full_name=full_name,
)
crud.update_user(db, username=username, user_update=user_in_update)
user = crud.get_user_by_username(db, username=new_username)
assert user
assert username != new_username
assert user.full_name
def test_update_password(db: Session):
username = random_lower_string()
password = random_lower_string()
create_random_user_by_api(username=username, password=password)
new_password = random_lower_string()
crud.update_password(db, username=username, new_password=new_password)
user = crud.get_user_by_username(db, username=username)
assert user
assert verify_password(new_password, user.hashed_password)
def test_delete_user(db: Session):
username = random_lower_string()
password = random_lower_string()
create_random_user_by_api(username=username, password=password)
user = crud.remove_user(db, username=username)
assert user
assert user.username == username
| 26.220183
| 74
| 0.747726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.005948
|
02a1f0d97978ae4cf6f1fe28ed7cbca384a07bc2
| 1,062
|
py
|
Python
|
src/transductor/tests/test_forms.py
|
fga-gpp-mds/2016.2-Time07
|
44d78ce4f36b7cb535b9c775027b8a93972ba5e3
|
[
"MIT"
] | null | null | null |
src/transductor/tests/test_forms.py
|
fga-gpp-mds/2016.2-Time07
|
44d78ce4f36b7cb535b9c775027b8a93972ba5e3
|
[
"MIT"
] | null | null | null |
src/transductor/tests/test_forms.py
|
fga-gpp-mds/2016.2-Time07
|
44d78ce4f36b7cb535b9c775027b8a93972ba5e3
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from transductor.forms import EnergyForm
from transductor.models import TransductorModel
class EnergyTransductorForm(TestCase):
def setUp(self):
t_model = TransductorModel()
t_model.name = "TR 4020"
t_model.transport_protocol = "UDP"
t_model.serial_protocol = "Mosbus RTU"
t_model.measurements_type = "EnergyMeasurements"
t_model.register_addresses = [[68, 0], [70, 1]]
t_model.save()
self.t_model = t_model
def test_valid_form(self):
data = {
'serie_number': 1,
'ip_address': "111.111.111.111",
'description': "Test",
'model': self.t_model.id
}
form = EnergyForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_form(self):
data = {
'serie_number': u'',
'ip_address': "1",
'description': u'',
'model': u''
}
form = EnergyForm(data=data)
self.assertFalse(form.is_valid())
| 25.902439
| 56
| 0.579096
| 937
| 0.882298
| 0
| 0
| 0
| 0
| 0
| 0
| 173
| 0.1629
|
02a2b904bf7cbd57601e581a0ffde8c156b2a583
| 3,729
|
py
|
Python
|
src/models/base.py
|
zhengzangw/pytorch-classification
|
3a6d95e3810015fa71c950492585c11dfe0b8b64
|
[
"MIT"
] | null | null | null |
src/models/base.py
|
zhengzangw/pytorch-classification
|
3a6d95e3810015fa71c950492585c11dfe0b8b64
|
[
"MIT"
] | null | null | null |
src/models/base.py
|
zhengzangw/pytorch-classification
|
3a6d95e3810015fa71c950492585c11dfe0b8b64
|
[
"MIT"
] | null | null | null |
from typing import Any, List, Optional
import hydra
import torch
import torchmetrics
from omegaconf import DictConfig
from pytorch_lightning import LightningModule
from ..optimizer.scheduler import create_scheduler
from ..utils import utils
from ..utils.misc import mixup_data
log = utils.get_logger(__name__)
class LitBase(LightningModule):
def __init__(self, cfg: Optional[DictConfig] = None, **kwargs):
super().__init__()
self.save_hyperparameters()
config = cfg
self.config = config
# model
log.info(f"Instantiating module <{config.module._target_}>")
self.model = hydra.utils.instantiate(
config.module, num_classes=config.datamodule.num_classes
)
# load from checkpoint
if config.get("load_from_checkpoint"):
ckpt = torch.load(config.load_from_checkpoint)
missing_keys, unexpected_keys = self.load_state_dict(ckpt["state_dict"], strict=False)
log.info(f"[ckpt] Missing keys: {missing_keys}, Unexpected keys: {unexpected_keys}.")
log.info(f"[ckpt] Load checkpoint from {config.load_from_checkpoint}.")
# loss function
log.info(f"Instantiating module <{config.loss._target_}>")
self.criterion = hydra.utils.instantiate(config.loss)
def forward(self, x: torch.Tensor):
return self.model(x)
# ------------
# train
# ------------
def training_epoch_end(self, outputs: List[Any]):
pass
# ------------
# validation
# ------------
def validation_epoch_end(self, outputs: List[Any]):
pass
# ------------
# test
# ------------
def test_epoch_end(self, outputs: List[Any]):
pass
# ------------
# optim
# ------------
def configure_scheduler(self, optimizer):
config = self.config
num_steps_per_epoch = int(
self.trainer.datamodule.train_len / config.datamodule.effective_batch_size + 0.5
)
max_epoch = config.trainer.max_epochs
max_iterations = max_epoch * num_steps_per_epoch
if config.scheduler.policy == "epoch":
sch_times = max_epoch
else:
sch_times = max_iterations
if config.scheduler.get("warmup"):
if config.scheduler.policy == "epoch":
sch_times -= config.scheduler.warmup.times
elif config.scheduler.policy == "iteration":
if isinstance(config.scheduler.warmup.times, float):
sch_times -= config.scheduler.warmup.times * num_steps_per_epoch
else:
sch_times -= config.scheduler.warmup.times
else:
raise ValueError(
"scheduler_policy should be epoch or iteration,"
f"but '{config.scheduler.policy}' given."
)
schedulers = []
if config.scheduler.get("name"):
log.info(f"Creating module <{config.scheduler.name}>")
sch = create_scheduler(optimizer=optimizer, sch_times=sch_times, **config.scheduler)
schedulers.append(sch)
return schedulers
def configure_optimizers(self):
config = self.config
# === Optimizer ===
log.info(f"Instantiating module <{config.optimizer._target_}>")
if config.optimizer._target_.split(".")[-1] in ["LARS"]:
optimizer = hydra.utils.instantiate(config.optimizer, self.model)
else:
optimizer = hydra.utils.instantiate(config.optimizer, self.model.parameters())
# === Scheduler ===
schedulers = self.configure_scheduler(optimizer)
return [optimizer], schedulers
| 31.601695
| 98
| 0.604452
| 3,413
| 0.915259
| 0
| 0
| 0
| 0
| 0
| 0
| 728
| 0.195227
|
02a48025dd5fe8b32b133893735d857d8b3b537a
| 11,122
|
py
|
Python
|
tea/balance.py
|
pcubillos/TEA
|
e3e4844de4cacef89b9f4a8b1673545726bfc42e
|
[
"BSD-4-Clause-UC"
] | 25
|
2016-06-20T23:21:46.000Z
|
2022-02-06T18:57:33.000Z
|
tea/balance.py
|
pcubillos/TEA
|
e3e4844de4cacef89b9f4a8b1673545726bfc42e
|
[
"BSD-4-Clause-UC"
] | 3
|
2015-06-04T16:56:26.000Z
|
2018-04-03T03:33:31.000Z
|
tea/balance.py
|
dzesmin/TEA
|
0ec66410f274d9deea7764d53d6363f9aaad3355
|
[
"BSD-4-Clause-UC"
] | 19
|
2015-05-27T17:46:41.000Z
|
2021-08-05T10:54:59.000Z
|
#! /usr/bin/env python
############################# BEGIN FRONTMATTER ################################
# #
# TEA - calculates Thermochemical Equilibrium Abundances of chemical species #
# #
# TEA is part of the PhD dissertation work of Dr. Jasmina #
# Blecic, who developed it with coding assistance from #
# undergraduate M. Oliver Bowman and under the advice of #
# Prof. Joseph Harrington at the University of Central Florida, #
# Orlando, Florida, USA. #
# #
# Copyright (C) 2014-2016 University of Central Florida #
# #
# This program is reproducible-research software: you can #
# redistribute it and/or modify it under the terms of the #
# Reproducible Research Software License as published by #
# Prof. Joseph Harrington at the University of Central Florida, #
# either version 0.3 of the License, or (at your option) any later #
# version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Reproducible Research Software License for more details. #
# #
# You should have received a copy of the Reproducible Research #
# Software License along with this program. If not, see #
# <http://planets.ucf.edu/resources/reproducible/>. The license's #
# preamble explains the situation, concepts, and reasons surrounding #
# reproducible research, and answers some common questions. #
# #
# This project was started with the support of the NASA Earth and #
# Space Science Fellowship Program, grant NNX12AL83H, held by #
# Jasmina Blecic, Principal Investigator Joseph Harrington, and the #
# NASA Science Mission Directorate Planetary Atmospheres Program, #
# grant NNX12AI69G. #
# #
# See the file ACKNOWLEDGING in the top-level TEA directory for #
# instructions on how to acknowledge TEA in publications. #
# #
# Visit our Github site: #
# https://github.com/dzesmin/TEA/ #
# #
# Reach us directly at: #
# Jasmina Blecic <jasmina@nyu.edu> #
# #
############################## END FRONTMATTER #################################
import readconf as rc
import os
import numpy as np
from sys import argv
from sympy.core import Symbol
from sympy.solvers import solve
import format as form
def balance(a, b, verb=0, loc_out=None):
"""
This code produces an initial guess for the first TEA iteration by
fulfilling the mass balance condition, sum_i(ai_j * y_i) = bj (equation (17)
in the TEA theory paper), where i is species index, j is element index, a's
are stoichiometric coefficients, and b's are elemental fractions by number,
i.e., ratio of number densities of element 'j' to the total number densities
of all elements in the system (see the end of the Section 2 in the TEA theory
paper). The code writes the result into machine- and human-readable files,
if requested.
To satisfy the mass balance equation, some yi variables remain as free
parameters. The number of free parameters is set to the number of total
elements in the system, thus ensuring that the mass balance equation can
be solved for any number of input elements and output species the user
chooses. The code locates a chunk of species (y_i) containing a sum of
ai_j values that forbids ignoring any element in the system (sum of the
ai_j values in a column must not be zero). This chunk is used as a set
of free variables in the system. The initial scale for other y_i variables
are set to a known, arbitrary number. Initially, starting values for the
known species are set to 0.1 moles, and the mass balance equation is
calculated. If this value does not produce all positive mole numbers,
the code automatically sets known parameters to 10 times smaller and
tries again. Actual mole numbers for the initial guesses of y_i are
arbitrary, as TEA only requires a balanced starting point to initialize
minimization. The goal of this code is to find a positive set of non-zero
mole numbers to satisfy this requirement. Finally, the code calculates y_bar,
initializes the iteration number, delta, and delta_bar to zero and writes
results into machine- and human-readable output files.
This code is called by runatm.py and runsingle.py
Parameters
----------
a: 2D float ndarray
Stoichiometric coefficients of the species.
b: 1D float ndarray
Elemental mixing fractions.
verb: Integer
Verbosity level (0=mute, 1=quiet, 2=verbose).
loc_out: String
If not None, save results to this folder.
Returns
-------
y: 1D float ndarray
Initial non-zero guesses for the species mixing ratios that
satisfy the mass-balance equation.
y_bar: Float
Sum of the mixing ratios.
"""
# Read in values from header file
nspec, natom = np.shape(a)
# Print b values for debugging purposes
if verb > 1:
print("b values: " + str(b))
# Find chunk of ai_j array that will allow the corresponding yi values
# to be free variables such that all elements are considered
for n in np.arange(nspec - natom + 1):
# Get lower and upper indices for chunk of ai_j array to check
lower = n
upper = n + natom
# Retrieve chunk of ai_j that would contain free variables
a_chunk = a[lower:upper]
# Sum columns to get total of ai_j in chunk for each species 'j'
check = list(map(sum, zip(*a_chunk)))
# Look for zeros in check. If a zero is found, this chunk of data can't
# be used for free variables, as this signifies an element is ignored
has_zero = 0 in check
# If zero not found, create list of free variables' indices
if has_zero == False:
free_id = []
for m in np.arange(natom):
if verb > 1:
print('Using y_{:d} as a free variable.'.format(n + m + 1))
free_id.append(n + m)
break
# Set initial guess of non-free y_i
scale = 0.1
# Loop until all y_i are non-zero positive:
nofit = True
while nofit:
# Set up list of 'known' initial mole numbers before and after free chunk
pre_free = np.zeros(free_id[0]) + scale
post_free = np.zeros(nspec - free_id[-1] - 1) + scale
# Set up list of free variables
free = []
for m in np.arange(natom):
name = 'y_unknown_' + np.str(m)
free.append(Symbol(name))
# Combine free and 'known' to make array of y_initial mole numbers
y_init = np.append(pre_free, free)
y_init = np.append(y_init, post_free)
# Make 'j' equations satisfying mass balance equation (17) in TEA
# theory doc:
# sum_i(ai_j * y_i) = b_j
eq = []
for m in np.arange(natom):
rhs = 0
for n in np.arange(nspec):
rhs += a[n, m] * y_init[n]
rhs -= b[m]
eq.append(rhs)
# Solve system of linear equations to get free y_i variables
result = solve(list(eq), list(free), rational=False)
# Correct for no-solution-found results.
# If no solution found, decrease scale size.
if result == []:
scale /= 10
if verb > 1:
print("Correcting initial guesses for realistic mass. \
Trying " + str(scale) + "...")
# Correct for negative-mass results. If found, decrease scale size
else:
# Assume no negatives and check
hasneg = False
for m in np.arange(natom):
if result[free[m]] < 0:
hasneg = True
# If negatives found, decrease scale size
if hasneg:
scale /= 10
if verb > 1:
print("Negative numbers found in fit."
"\n Correcting initial guesses for realistic mass."
"\n Trying scale of {:.0e}.".format(scale))
# If no negatives found, exit the loop (good fit is found)
else:
nofit = False
if verb > 1:
print("A scale of {:.0e} provided a viable initial guess.".
format(scale))
# Gather the results
fit = []
for m in np.arange(natom):
fit = np.append(fit, result[free[m]])
# Put the result into the final y_init array
y_init[free_id[0]:free_id[natom-1]+1] = fit
# This part of the code is only for debugging purposes
# It rounds the values and checks whether the balance equation is satisfied
# No values are changed and this serves solely as a check
if verb > 1:
print('\nChecks:')
for m in np.arange(natom):
flag = round((sum(a[:,m] * y_init[:])), 2) == round(b[m], 2)
if flag:
if verb > 1:
print('Equation {:d} is satisfied.'.format(m+1))
else:
print('Equation {:d} is NOT satisfied. Check for errors'.format(m+1))
# Put all initial mole numbers in y array
y = np.array(y_init, dtype=np.double)
# Make y_bar (sum of all y values)
y_bar = np.sum(y, dtype=np.double)
# Initialize delta variables to 0. (this signifies the first iteration)
delta = np.zeros(nspec)
delta_bar = np.sum(delta)
return y, y_bar
| 45.958678
| 85
| 0.538482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,282
| 0.74465
|
02a4beb0015cd6725cf78ab2fb76439c197ecfc1
| 2,073
|
py
|
Python
|
sims/s251/calc-err.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 1
|
2019-12-19T16:21:13.000Z
|
2019-12-19T16:21:13.000Z
|
sims/s251/calc-err.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | null | null | null |
sims/s251/calc-err.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 2
|
2020-01-08T06:23:33.000Z
|
2020-01-08T07:06:50.000Z
|
from pylab import *
import tables
def exactSol(X, Y, t):
return exp(-2*t)*sin(X)*cos(Y)
fh = tables.openFile("s251-dg-diffuse-2d_q_1.h5")
q = fh.root.StructGridField
nx, ny, nc = q.shape
dx = 2*pi/nx
Xf = linspace(0, 2*pi-dx, nx)
dy = 2*pi/ny
Yf = linspace(0, 2*pi-dy, ny)
XX, YY = meshgrid(Xf, Yf)
Xhr = linspace(0, 2*pi, 101)
Yhr = linspace(0, 2*pi, 101)
XXhr, YYhr = meshgrid(Xhr, Yhr)
fhr = exactSol(XXhr, YYhr, 1.0)
figure(1)
pcolormesh(Xhr, Yhr, fhr)
colorbar()
figure(2)
pcolormesh(Xf, Yf, q[:,:,0])
colorbar()
# compute error
fex = exactSol(XX, YY, 1.0)
error = abs(fex.transpose()-q[:,:,0]).sum()/(nx*ny);
print "%g %g" % (dx, error)
def evalSum(coeff, fields):
res = 0.0*fields[0]
for i in range(len(coeff)):
res = res + coeff[i]*fields[i]
return res
def projectOnFinerGrid_f24(Xc, Yc, q):
dx = Xc[1]-Xc[0]
dy = Yc[1]-Yc[0]
nx = Xc.shape[0]
ny = Yc.shape[0]
# mesh coordinates
Xn = linspace(Xc[0]-0.5*dx, Xc[-1]+0.5*dx, 2*nx+1) # one more
Yn = linspace(Yc[0]-0.5*dy, Yc[-1]+0.5*dy, 2*ny+1) # one more
XXn, YYn = meshgrid(Xn, Yn)
# data
qn = zeros((2*Xc.shape[0], 2*Yc.shape[0]), float)
v1 = q[:,:,0]
v2 = q[:,:,1]
v3 = q[:,:,2]
v4 = q[:,:,3]
vList = [v1,v2,v3,v4]
# node 1
c1 = [0.5625,0.1875,0.0625,0.1875]
qn[0:2*nx:2, 0:2*ny:2] = evalSum(c1, vList)
# node 2
c2 = [0.1875,0.5625,0.1875,0.0625]
qn[1:2*nx:2, 0:2*ny:2] = evalSum(c2, vList)
# node 3
c3 = [0.1875,0.0625,0.1875,0.5625]
qn[0:2*nx:2, 1:2*ny:2] = evalSum(c3, vList)
# node 4
c4 = [0.0625,0.1875,0.5625,0.1875]
qn[1:2*nx:2, 1:2*ny:2] = evalSum(c4, vList)
return XXn, YYn, qn
Xc = linspace(0.5*dx, 2*pi-0.5*dx, nx)
Yc = linspace(0.5*dy, 2*pi-0.5*dy, ny)
Xp, Yp, qp = projectOnFinerGrid_f24(Xc, Yc, q)
figure(1)
subplot(1,2,1)
pcolormesh(Xp, Yp, transpose(qp))
title('RDG t=1')
colorbar(shrink=0.5)
axis('image')
subplot(1,2,2)
pcolormesh(Xhr, Yhr, fhr)
title('Exact t=1')
colorbar(shrink=0.5)
axis('image')
savefig('s251-exact-cmp.png')
show()
| 21.371134
| 65
| 0.578389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.086348
|
02a600c96645d56c182f0a175380bb6948a7e4b5
| 973
|
py
|
Python
|
PyCharm/Exercicios/Aula12/ex041.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
PyCharm/Exercicios/Aula12/ex041.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
PyCharm/Exercicios/Aula12/ex041.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
# Importação de bibliotecas
from datetime import date
# Título do programa
print('\033[1;34;40mCLASSIFICAÇÃO DE CATEGORIAS PARA NATAÇÃO\033[m')
# Objetos
nascimento = int(input('\033[30mDigite o ano do seu nascimento:\033[m '))
idade = date.today().year - nascimento
mirim = 9
infantil = 14
junior = 19
senior = 20
# Lógica
if idade <= mirim:
print('Sua idade é \033[1;33m{} anos\033[m, e sua categoria é a \033[1;34mMIRIM!\033[m'.format(idade))
elif idade <= infantil:
print('Sua idade é \033[1;33m{}\033[m anos, e sua categoria é a \033[1;34mINFANTIL!\033[m'.format(idade))
elif idade <= junior:
print('Sua idade é \033[1;33m{}\033[m anos, e sua categoria é a \033[1;34mJUNIOR!\033[m'.format(idade))
elif idade <= senior:
print('Sua idade é \033[1;33m{}\033[m anos, e sua categoria é a \033[1;34mSÊNIOR!\033[m'.format(idade))
elif idade > senior:
print('Sua idade é \033[1;33m{}\033[m anos, e sua categoria é \033[1;34mMASTER!\033[m'.format(idade))
| 38.92
| 109
| 0.693731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 601
| 0.605847
|
02a632349d6da6f348ea1c189802c694c33a0241
| 1,681
|
py
|
Python
|
github-bot/harvester_github_bot/config.py
|
futuretea/bot
|
5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3
|
[
"Apache-2.0"
] | null | null | null |
github-bot/harvester_github_bot/config.py
|
futuretea/bot
|
5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3
|
[
"Apache-2.0"
] | null | null | null |
github-bot/harvester_github_bot/config.py
|
futuretea/bot
|
5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3
|
[
"Apache-2.0"
] | null | null | null |
from everett.component import RequiredConfigMixin, ConfigOptions
from everett.manager import ConfigManager, ConfigOSEnv
class BotConfig(RequiredConfigMixin):
required_config = ConfigOptions()
required_config.add_option('flask_loglevel', parser=str, default='info', doc='Set the log level for Flask.')
required_config.add_option('flask_password', parser=str, doc='Password for HTTP authentication in Flask.')
required_config.add_option('flask_username', parser=str, doc='Username for HTTP authentication in Flask.')
required_config.add_option('github_owner', parser=str, default='harvester', doc='Set the owner of the target GitHub '
'repository.')
required_config.add_option('github_repository', parser=str, default='harvester', doc='Set the name of the target '
'GitHub repository.')
required_config.add_option('github_repository_test', parser=str, default='tests', doc='Set the name of the tests '
'GitHub repository.')
required_config.add_option('github_token', parser=str, doc='Set the token of the GitHub machine user.')
required_config.add_option('zenhub_pipeline', parser=str, default='Review', doc='Set the target ZenHub pipeline to '
'handle events for.')
def get_config():
config = ConfigManager(environments=[
ConfigOSEnv()
])
return config.with_options(BotConfig())
| 64.653846
| 121
| 0.606782
| 1,423
| 0.84652
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.323022
|
02a66be9396c06d98dcd1e2835505651b29dc2d8
| 618
|
py
|
Python
|
scripts/MCA/1combine.py
|
jumphone/scRef
|
7308d8571c3e46f481c9432857de84fd13955166
|
[
"MIT"
] | 10
|
2018-11-27T09:32:53.000Z
|
2022-03-21T02:42:54.000Z
|
scripts/MCA/1combine.py
|
jumphone/scRef
|
7308d8571c3e46f481c9432857de84fd13955166
|
[
"MIT"
] | null | null | null |
scripts/MCA/1combine.py
|
jumphone/scRef
|
7308d8571c3e46f481c9432857de84fd13955166
|
[
"MIT"
] | 2
|
2018-12-13T18:45:26.000Z
|
2020-06-20T07:18:19.000Z
|
import sys
output=sys.argv[1]
input_list=(sys.argv[2:])
EXP={}
header=[]
for input_file in input_list:
fi=open(input_file)
header=header+fi.readline().replace('"','').rstrip().split()
for line in fi:
seq=line.replace('"','').rstrip().split()
if seq[0] in EXP:
EXP[seq[0]]=EXP[seq[0]]+seq[1:]
else:
EXP[seq[0]]=seq[1:]
fi.close()
fo=open(output,'w')
fo.write('\t'.join(header)+'\n')
for gene in EXP:
if len(EXP[gene])==len(header):
fo.write(gene+'\t'+'\t'.join(EXP[gene])+'\n')
fo.close()
| 16.263158
| 65
| 0.509709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.053398
|
02a7fe68590060c7734febb66561d5e0b21091e7
| 1,152
|
py
|
Python
|
apps/portalbase/macros/wiki/graph/1_main.py
|
jumpscale7/jumpscale_portal
|
8c99265e48f85643f8a52bc40a23f5266fb09231
|
[
"Apache-2.0"
] | 2
|
2016-04-14T14:05:01.000Z
|
2016-04-21T07:20:36.000Z
|
apps/portalbase/macros/wiki/graph/1_main.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | 13
|
2016-03-07T12:07:15.000Z
|
2018-02-28T13:11:59.000Z
|
apps/portalbase/macros/wiki/graph/1_main.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | 5
|
2016-03-08T07:49:51.000Z
|
2018-10-19T13:57:04.000Z
|
def main(j, args, params, tags, tasklet):
params.merge(args)
doc = params.doc
tags = params.tags
out = ""
cmdstr = params.macrostr.split(":", 1)[1].replace("}}", "").strip()
md5 = j.base.byteprocessor.hashMd5(cmdstr)
j.system.fs.createDir(j.system.fs.joinPaths(j.core.portal.active.filesroot, "dot"))
path = j.system.fs.joinPaths(j.core.portal.active.filesroot, "dot", md5)
if not j.system.fs.exists(path + ".png"):
j.system.fs.writeFile(path + ".dot", cmdstr)
cmd = "dot -Tpng %s.dot -o %s.png" % (path, path)
# for i in range(5):
rescode, result = j.system.process.execute(cmd)
# if result.find("warning")==011:
if result.find("warning") != -1:
out = result
out += '\n'
out += "##DOT FILE WAS##:\n"
out += cmdstr
out += "##END OF DOT FILE##\n"
out = "{{code:\n%s\n}}" % out
params.result = out
return params
out = "!/files/dot/%s.png!" % md5
params.result = (out, doc)
return params
def match(j, args, params, tags, tasklet):
return True
| 27.428571
| 87
| 0.539063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.181424
|
02a84eeec97777f61185766e05077d7532adafbc
| 232
|
py
|
Python
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 124
|
2015-07-30T21:34:25.000Z
|
2022-02-19T08:45:50.000Z
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 37
|
2015-08-31T23:02:20.000Z
|
2022-02-04T04:45:28.000Z
|
pyramda/logic/any_pass.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 20
|
2015-08-04T18:59:09.000Z
|
2021-12-13T08:08:59.000Z
|
from pyramda.function.curry import curry
from pyramda.function.always import always
from pyramda.iterable.reduce import reduce
from .either import either
@curry
def any_pass(ps, v):
return reduce(either, always(False), ps)(v)
| 23.2
| 47
| 0.784483
| 0
| 0
| 0
| 0
| 75
| 0.323276
| 0
| 0
| 0
| 0
|
02ab52b832209102ceb109d8aa07a587d3c2d55e
| 817
|
py
|
Python
|
bio/scheduler/views.py
|
ZuluPro/bio-directory
|
4cdd3967e97363f59795d7b0fdb85998029370ff
|
[
"BSD-2-Clause"
] | null | null | null |
bio/scheduler/views.py
|
ZuluPro/bio-directory
|
4cdd3967e97363f59795d7b0fdb85998029370ff
|
[
"BSD-2-Clause"
] | null | null | null |
bio/scheduler/views.py
|
ZuluPro/bio-directory
|
4cdd3967e97363f59795d7b0fdb85998029370ff
|
[
"BSD-2-Clause"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from schedule.models import Calendar
from schedule.periods import Day
from bio.scheduler import models
def actions(request):
actions = models.Action.objects.order_by('-created_on')
period = Day(actions, now())
# XXX: Hack for don't get tomorow occs
occs = [occ for occ in period.get_occurrences()
if occ.start.date() == now().date()]
return render(request, 'bio/actions.html', {
'meta': actions.model._meta,
'objects': occs,
'calendar': Calendar.objects.get(name='Bio')
})
def action(request, action_id):
action = get_object_or_404(models.Action.objects.filter(id=action_id))
return render(request, 'bio/action.html', {
'action': action
})
| 31.423077
| 74
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.151775
|
02ac373e21b06fce0640079ee711d2d820229c82
| 1,759
|
py
|
Python
|
src/undefined/1/try.py
|
ytyaru0/Python.Sqlite.framework.think.20180305101210
|
babb8f55183776a9cbed486613c4a1a7caa6daf2
|
[
"CC0-1.0"
] | null | null | null |
src/undefined/1/try.py
|
ytyaru0/Python.Sqlite.framework.think.20180305101210
|
babb8f55183776a9cbed486613c4a1a7caa6daf2
|
[
"CC0-1.0"
] | null | null | null |
src/undefined/1/try.py
|
ytyaru0/Python.Sqlite.framework.think.20180305101210
|
babb8f55183776a9cbed486613c4a1a7caa6daf2
|
[
"CC0-1.0"
] | null | null | null |
try:
import MyTable
except NameError as e:
print(e)
import importlib
importlib.import_module('Constraints')
# 現在 locals() にある module に Constraints.py モジュールを加える。
# (MyTableにConstraintsを加える。`from Constraints import PK,UK,FK,NN,D,C`する)
# トレースバックで例外発生モジュールを補足
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
last_tb = None
for tb in traceback.extract_tb(exc_traceback):
print(tb)
last_tb = tb
#print(last_tb)
#print(type(last_tb))
#print(dir(last_tb))
print(last_tb.filename)
print(last_tb.line)
print(last_tb.lineno)
print(last_tb.name)
import pathlib
module_path = pathlib.Path(last_tb.filename)
module_name = module_path.name.replace(module_path.suffix, '')
print(module_name)
# モジュール インスタンスに Constraints を挿入しようと思ったが、できない。PK未定義エラーのため。
# ソースコードを文字列で作成すればいいか? `from Constraints import PK,UK,FK,NN,D,C`を加えて。
# exec(source_code)すればいい?
#import importlib
#importlib.import_module(module_name)
print(e)
#print('未定義', e)
#print(type(e))
#print(dir(e))
#print(e.args)
#print(type(e.with_traceback()))
#print(e.with_traceback())
#print(type(e.with_traceback))
#print(dir(e.with_traceback))
# #!python3などの行が先頭にあるが処理省略!
source_code = 'from Constraints import PK,UK,FK,NN,D,C' + '\n'
with pathlib.Path(last_tb.filename).open() as f:
source_code += f.read()
exec(source_code)
assert(module_name in locals())
cls = locals()[module_name]
print(dir(cls))
print(cls.Id)
# name 'PK' is not defined
#print(locals())
#print(locals()['__loader__'])
#print(dir(locals()['__loader__']))
#print(locals()['__loader__'].get_filename())
| 29.316667
| 75
| 0.665151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,091
| 0.537703
|
02aedbb634ecb4773466e66e6c16d7f09a8368bc
| 22,797
|
py
|
Python
|
trac/wiki/tests/admin.py
|
rjollos/trac
|
2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/tests/admin.py
|
rjollos/trac
|
2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/tests/admin.py
|
rjollos/trac
|
2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import os.path
import sys
import tempfile
import unittest
from trac.admin.api import console_datetime_format
from trac.admin.console import TracAdmin
from trac.admin.test import TracAdminTestCaseBase
from trac.test import EnvironmentStub, mkdtemp
from trac.tests.contentgen import random_unique_camel, random_paragraph
from trac.util import create_file
from trac.util.datefmt import format_datetime
from trac.wiki.admin import WikiAdmin
from trac.wiki.model import WikiPage
class WikiAdminTestCase(unittest.TestCase):
page_text = 'Link to WikiStart'
def setUp(self):
self.env = EnvironmentStub()
self.env.path = mkdtemp()
self.tmpdir = os.path.join(self.env.path, 'tmp')
os.mkdir(self.tmpdir)
self.filename = os.path.join(self.tmpdir, 'file.txt')
create_file(self.filename, self.page_text)
self.admin = WikiAdmin(self.env)
with self.env.db_transaction:
for name, readonly in (('WritablePage', [0, 1, 0]),
('ReadOnlyPage', [1, 0, 1, 0, 1])):
for r in readonly:
page = WikiPage(self.env, name)
page.text = '[wiki:%s@%d]' % (name, page.version + 1)
page.readonly = r
page.save('trac', '')
def tearDown(self):
self.env.reset_db_and_disk()
def _import_page(self, *args, **kwargs):
with open(os.devnull, 'wb') as devnull:
stdout = sys.stdout
try:
sys.stdout = devnull
self.admin.import_page(*args, **kwargs)
finally:
sys.stdout = stdout
def test_import_page_new(self):
self._import_page(self.filename, 'NewPage')
page = WikiPage(self.env, 'NewPage')
self.assertEqual('NewPage', page.name)
self.assertEqual(1, page.version)
self.assertEqual(self.page_text, page.text)
self.assertEqual(0, page.readonly)
def test_import_page_readonly(self):
page = WikiPage(self.env, 'ReadOnlyPage')
self.assertEqual(5, page.version)
self.assertEqual(1, page.readonly)
self.assertNotEqual(self.page_text, page.text)
self._import_page(self.filename, 'ReadOnlyPage')
page = WikiPage(self.env, 'ReadOnlyPage')
self.assertEqual(6, page.version)
self.assertEqual(1, page.readonly)
self.assertEqual(self.page_text, page.text)
def test_import_page_not_readonly(self):
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(3, page.version)
self.assertEqual(0, page.readonly)
self.assertNotEqual(self.page_text, page.text)
self._import_page(self.filename, 'WritablePage')
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(4, page.version)
self.assertEqual(0, page.readonly)
self.assertEqual(self.page_text, page.text)
def test_import_page_uptodate(self):
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(3, page.version)
self.assertEqual(0, page.readonly)
create_file(self.filename, page.text)
page_text = page.text
self._import_page(self.filename, 'WritablePage')
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(3, page.version)
self.assertEqual(0, page.readonly)
self.assertEqual(page_text, page.text)
def test_import_page_replace(self):
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(3, page.version)
self.assertEqual(0, page.readonly)
self.assertNotEqual(self.page_text, page.text)
self._import_page(self.filename, 'WritablePage', replace=True)
page = WikiPage(self.env, 'WritablePage')
self.assertEqual(3, page.version)
self.assertEqual(0, page.readonly)
self.assertEqual(self.page_text, page.text)
class TracAdminTestCase(TracAdminTestCaseBase):
def setUp(self):
self.env = EnvironmentStub(default_data=True, enable=('trac.*',),
disable=('trac.tests.*',))
self.admin = TracAdmin()
self.admin.env_set('', self.env)
self.tempdir = mkdtemp()
def tearDown(self):
self.env = None
def _insert_page(self, name=None):
page = WikiPage(self.env)
if name is None:
name = random_unique_camel()
page.name = name
page.text = random_paragraph()
page.save('user1', 'Page created.')
return name
def _insert_pages(self, int_or_names):
if isinstance(int_or_names, int):
names = sorted(random_unique_camel()
for _ in range(0, int_or_names))
else:
names = sorted(int_or_names)
return [self._insert_page(n) for n in names]
def _change_page(self, name):
page = WikiPage(self.env, name)
page.text = random_paragraph()
page.save('user2', 'Page changed.')
def _file_content(self, dir_or_path, name=None):
path = dir_or_path if name is None else os.path.join(dir_or_path, name)
with open(path, 'r') as f:
return f.read()
def _write_file(self, path, content=None):
if content is None:
content = random_paragraph()
with open(path, 'w') as f:
f.write(content)
return content
def execute(self, cmd, *args):
argstr = ' '.join('"%s"' % a for a in args)
return super(TracAdminTestCase, self) \
.execute('wiki {} {}'.format(cmd, argstr))
def assertFileContentMatchesPage(self, names):
for n in names:
self.assertEqual(WikiPage(self.env, n).text,
self._file_content(self.tempdir, n))
def test_wiki_dump(self):
names = self._insert_pages(2)
rv, output = self.execute('dump', self.tempdir, *names)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': names[0],
'name2': names[1],
'path1': os.path.join(self.tempdir, names[0]),
'path2': os.path.join(self.tempdir, names[1]),
})
self.assertFileContentMatchesPage(names)
def test_wiki_dump_all(self):
names = self._insert_pages(2)
rv, output = self.execute('dump', self.tempdir)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': names[0],
'name2': names[1],
'path1': os.path.join(self.tempdir, names[0]),
'path2': os.path.join(self.tempdir, names[1]),
})
self.assertEquals(names, sorted(os.listdir(self.tempdir)))
self.assertFileContentMatchesPage(names)
def test_wiki_dump_all_create_dst_dir(self):
names = self._insert_pages(2)
dstdir = os.path.join(self.tempdir, 'subdir')
rv, output = self.execute('dump', dstdir)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': names[0],
'name2': names[1],
'path1': os.path.join(dstdir, names[0]),
'path2': os.path.join(dstdir, names[1]),
})
self.assertEquals(names, sorted(os.listdir(dstdir)))
def test_wiki_dump_all_glob(self):
names = self._insert_pages(['PageOne', 'PageTwo', 'ThreePage'])
rv, output = self.execute('dump', self.tempdir, 'Page*')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': names[0],
'name2': names[1],
'path1': os.path.join(self.tempdir, names[0]),
'path2': os.path.join(self.tempdir, names[1]),
})
self.assertEquals(names[0:2], sorted(os.listdir(self.tempdir)))
self.assertFileContentMatchesPage(names[0:2])
def test_wiki_dump_all_dst_is_file(self):
tempdir = os.path.join(self.tempdir, 'dst')
create_file(tempdir)
rv, output = self.execute('dump', tempdir)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'dstdir': tempdir,
})
def test_wiki_export(self):
name = self._insert_page()
export_path = os.path.join(self.tempdir, name)
rv, output = self.execute('export', name, export_path)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
'path': export_path,
})
self.assertFileContentMatchesPage([name])
def test_wiki_export_page_not_found(self):
name = random_unique_camel()
export_path = os.path.join(self.tempdir, name)
rv, output = self.execute('export', name, export_path)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'name': name,
})
def test_wiki_export_file_exists(self):
name = self._insert_page()
export_path = os.path.join(self.tempdir, name)
create_file(export_path)
rv, output = self.execute('export', name, export_path)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'export_path': export_path,
})
def test_wiki_export_print_to_stdout(self):
name = self._insert_page()
rv, output = self.execute('export', name)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'content': WikiPage(self.env, name).text,
})
def test_wiki_import(self):
name = random_unique_camel()
import_path = os.path.join(self.tempdir, name)
content = self._write_file(import_path)
rv, output = self.execute('import', name, import_path)
page = WikiPage(self.env, name)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
'path': import_path,
})
self.assertIn(('INFO', '%s imported from %s' % (name, import_path)),
self.env.log_messages)
self.assertEqual(content, page.text)
def test_wiki_import_page_exists(self):
name = self._insert_page()
import_path = os.path.join(self.tempdir, name)
self._write_file(import_path)
rv, output = self.execute('import', name, import_path)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
'path': import_path,
})
self.assertEqual(2, WikiPage(self.env, name).version)
def test_wiki_import_page_up_to_date(self):
name = self._insert_page()
import_path = os.path.join(self.tempdir, name)
self._write_file(import_path, WikiPage(self.env, name).text)
rv, output = self.execute('import', name, import_path)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'tempdir': self.tempdir,
'name': name,
})
self.assertIn(('INFO', '%s is already up to date' % name),
self.env.log_messages)
self.assertEqual(1, WikiPage(self.env, name).version)
def test_wiki_import_page_name_invalid(self):
name = 'PageOne/../PageTwo'
import_path = os.path.join(self.tempdir, 'PageOne')
self._write_file(import_path)
rv, output = self.execute('import', name, import_path)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'tempdir': self.tempdir,
'name': name
})
self.assertFalse(WikiPage(self.env, name).exists)
def test_wiki_import_file_not_found(self):
name = random_unique_camel()
import_path = os.path.join(self.tempdir, name)
rv, output = self.execute('import', name, import_path)
page = WikiPage(self.env, name)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'import_path': import_path,
})
def test_wiki_list(self):
name1 = self._insert_page('PageOne')
name2 = self._insert_page('PageTwo')
self._change_page(name2)
rv, output = self.execute('list')
self.assertEqual(0, rv, output)
fmt = lambda m: format_datetime(m, console_datetime_format)
self.assertExpectedResult(output, {
'page1_modified': fmt(WikiPage(self.env, name1).time),
'page2_modified': fmt(WikiPage(self.env, name2).time),
})
def test_wiki_list_no_pages(self):
rv, output = self.execute('list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_wiki_load(self):
name1 = 'PageOne'
name2 = 'PageTwo'
path1 = os.path.join(self.tempdir, name1)
path2 = os.path.join(self.tempdir, name2)
content1 = random_paragraph()
content2 = random_paragraph()
with open(path1, 'w') as f:
f.write(content1)
with open(path2, 'w') as f:
f.write(content2)
rv, output = self.execute('load', path1, path2)
page1 = WikiPage(self.env, name1)
page2 = WikiPage(self.env, name2)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': name1,
'name2': name2,
'path1': path1,
'path2': path2,
})
self.assertIn(('INFO', '%s imported from %s' % (name1, path1)),
self.env.log_messages)
self.assertIn(('INFO', '%s imported from %s' % (name2, path2)),
self.env.log_messages)
self.assertEqual(content1, page1.text)
self.assertEqual(content2, page2.text)
self.assertEqual(1, page1.version)
self.assertEqual(1, page2.version)
def test_wiki_load_page_exists(self):
name = self._insert_page()
path = os.path.join(self.tempdir, name)
content = self._write_file(path)
rv, output = self.execute('load', path)
page = WikiPage(self.env, name)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
'path': path,
})
self.assertEqual(content, page.text)
self.assertEqual(2, page.version)
def test_wiki_load_pages_from_dir(self):
name1 = 'PageOne'
name2 = 'PageTwo'
path1 = os.path.join(self.tempdir, name1)
path2 = os.path.join(self.tempdir, name2)
content1 = random_paragraph()
content2 = random_paragraph()
with open(path1, 'w') as f:
f.write(content1)
with open(path2, 'w') as f:
f.write(content2)
os.mkdir(os.path.join(self.tempdir, 'subdir'))
rv, output = self.execute('load', self.tempdir)
page1 = WikiPage(self.env, name1)
page2 = WikiPage(self.env, name2)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': name1,
'name2': name2,
'path1': path1,
'path2': path2,
})
self.assertEqual(content1, page1.text)
self.assertEqual(content2, page2.text)
self.assertEqual(1, page1.version)
self.assertEqual(1, page2.version)
def test_wiki_load_from_invalid_path(self):
name = random_unique_camel()
path = os.path.join(self.tempdir, name)
rv, output = self.execute('load', path)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'path': path,
})
self.assertFalse(WikiPage(self.env, name).exists)
def test_wiki_remove(self):
name = self._insert_page()
rv, output = self.execute('remove', name)
self.assertIn(('INFO', 'Deleted page %s' % name),
self.env.log_messages)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
})
self.assertFalse(WikiPage(self.env, name).exists)
def test_wiki_remove_glob(self):
names = self._insert_pages(['PageOne', 'PageTwo', 'PageThree'])
rv, output = self.execute('remove', 'Page*')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
for n in names:
self.assertIn(('INFO', 'Deleted page %s' % n),
self.env.log_messages)
self.assertFalse(WikiPage(self.env, n).exists)
def test_wiki_rename(self):
name1 = self._insert_page()
name2 = random_unique_camel()
rv, output = self.execute('rename', name1, name2)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': name1,
'name2': name2,
})
self.assertIn(('INFO', 'Renamed page %s to %s' % (name1, name2)),
self.env.log_messages)
self.assertFalse(WikiPage(self.env, name1).exists)
self.assertTrue(WikiPage(self.env, name2).exists)
def test_wiki_rename_name_unchanged(self):
name = self._insert_page()
rv, output = self.execute('rename', name, name)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
self.assertTrue(WikiPage(self.env, name).exists)
def test_wiki_rename_name_not_specified(self):
name = self._insert_page()
rv, output = self.execute('rename', name)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_wiki_rename_new_name_invalid(self):
name = self._insert_page()
new_name = 'PageOne/../PageTwo'
rv, output = self.execute('rename', name, new_name)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'name': new_name,
})
self.assertTrue(WikiPage(self.env, name).exists)
def test_wiki_rename_new_page_exists(self):
names = self._insert_pages(['PageOne', 'PageTwo'])
page1_content = WikiPage(self.env, names[0]).text
page2_content = WikiPage(self.env, names[1]).text
rv, output = self.execute('rename', *names)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
page1 = WikiPage(self.env, names[0])
page2 = WikiPage(self.env, names[1])
self.assertTrue(page1.exists)
self.assertTrue(page2.exists)
self.assertEqual(page1_content, page1.text)
self.assertEqual(page2_content, page2.text)
def test_wiki_replace(self):
name1 = random_unique_camel()
name2 = random_unique_camel()
path1 = os.path.join(self.tempdir, name1)
path2 = os.path.join(self.tempdir, name2)
content1 = random_paragraph()
content2 = random_paragraph()
self._insert_page(name1)
self._insert_page(name2)
with open(path1, 'w') as f:
f.write(content1)
with open(path2, 'w') as f:
f.write(content2)
rv, output = self.execute('replace', path1, path2)
page1 = WikiPage(self.env, name1)
page2 = WikiPage(self.env, name2)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': name1,
'name2': name2,
'path1': path1,
'path2': path2,
})
self.assertIn(('INFO', '%s imported from %s' % (name1, path1)),
self.env.log_messages)
self.assertIn(('INFO', '%s imported from %s' % (name2, path2)),
self.env.log_messages)
self.assertEqual(content1, page1.text)
self.assertEqual(content2, page2.text)
self.assertEqual(1, page1.version)
self.assertEqual(1, page2.version)
def test_wiki_replace_new_page(self):
name = random_unique_camel()
path = os.path.join(self.tempdir, name)
content = self._write_file(path)
rv, output = self.execute('replace', path)
page = WikiPage(self.env, name)
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name': name,
'path': path,
})
self.assertEqual(1, page.version)
self.assertEqual(content, page.text)
def test_wiki_replace_pages_from_dir(self):
names = self._insert_pages(2)
path1 = os.path.join(self.tempdir, names[0])
path2 = os.path.join(self.tempdir, names[1])
content1 = random_paragraph()
content2 = random_paragraph()
with open(path1, 'w') as f:
f.write(content1)
with open(path2, 'w') as f:
f.write(content2)
os.mkdir(os.path.join(self.tempdir, 'subdir'))
rv, output = self.execute('replace', self.tempdir)
page1 = WikiPage(self.env, names[0])
page2 = WikiPage(self.env, names[1])
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'name1': names[0],
'name2': names[1],
'path1': path1,
'path2': path2,
})
self.assertEqual(content1, page1.text)
self.assertEqual(content2, page2.text)
self.assertEqual(1, page1.version)
self.assertEqual(1, page2.version)
def test_wiki_replace_from_invalid_path(self):
name = random_unique_camel()
path = os.path.join(self.tempdir, name)
rv, output = self.execute('replace', path)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'path': path,
})
self.assertFalse(WikiPage(self.env, name).exists)
def test_wiki_upgrade(self):
rv, output = self.execute('upgrade')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_wiki_upgrade_up_to_date(self):
self.execute('upgrade')
rv, output = self.execute('upgrade')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WikiAdminTestCase))
suite.addTest(unittest.makeSuite(TracAdminTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 37.128664
| 79
| 0.605343
| 21,560
| 0.945738
| 0
| 0
| 0
| 0
| 0
| 0
| 2,064
| 0.090538
|
02aee1a4ed761847d16082b08939ec1e2b7eae92
| 2,646
|
py
|
Python
|
cli/cli.py
|
kandrio/toy-chord
|
c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da
|
[
"Apache-2.0"
] | null | null | null |
cli/cli.py
|
kandrio/toy-chord
|
c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da
|
[
"Apache-2.0"
] | null | null | null |
cli/cli.py
|
kandrio/toy-chord
|
c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da
|
[
"Apache-2.0"
] | null | null | null |
import click, requests, sys
bootstrap_ip = '192.168.0.2'
bootstrap_port = '8000'
base_url = 'http://' + bootstrap_ip + ':' + bootstrap_port
@click.group()
def toychord():
"""CLI client for toy-chord."""
pass
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--value', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def insert(key, value, host, port):
"""Make an insert request for a key-value pair, to a specific Node.
NOTE: The key-value pair may not be inserted to the database
of the Node that receives the request. It will be inserted in
the database of the Node that is the owner of the hash ID of
the key-value pair.
"""
url = 'http://' + host + ':' + str(port) + '/insert'
data = {
'key': key,
'value': value
}
r = requests.post(url, data)
if(r.status_code == 200):
click.echo('The key value pair was successfully inserted!')
else:
click.echo('Something went wrong with inserting the key-value pair.')
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def delete(key, host, port):
"""Make a delete request for a key-value pair, to a specific Node.
NOTE: The key-value pair doesn't have to be stored in the database
of the Node that receives the request.
"""
url = 'http://' + host + ':' + str(port) + '/delete'
data = {
'key': key
}
r = requests.post(url, data)
click.echo(r.text)
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def query(key, host, port):
"""Query for a key-value pair."""
url = 'http://' + host + ':' + str(port) + '/query'
data = {
'key': key
}
r = requests.post(url, data)
click.echo(r.text)
@toychord.command()
@click.option('--host', required=True, type=str)
@click.option('--port', required=True, type=int)
def depart(host, port):
"""Send a request to a specific Node to depart from toy-chord."""
url = 'http://' + host + ':' + str(port) + '/node/depart'
r = requests.post(url, {})
click.echo(r.text)
@toychord.command()
def overlay():
"""Print the placement of the Nodes in toy-chord."""
url = base_url + '/overlay'
r = requests.get(url)
click.echo(r.text)
if __name__ == '__main__':
toychord()
| 25.941176
| 77
| 0.628118
| 0
| 0
| 0
| 0
| 2,443
| 0.92328
| 0
| 0
| 1,025
| 0.387377
|
02b0025b6adb156b789fef5aff0bf34cd7804353
| 87
|
py
|
Python
|
sportstrackeranalyzer/plugin_handler/__init__.py
|
XeBoris/SportsTrackerAnalyzer
|
f211a9120b9ba91bb04b9742c80d0de7b4143f78
|
[
"MIT"
] | 1
|
2021-02-12T08:00:34.000Z
|
2021-02-12T08:00:34.000Z
|
sportstrackeranalyzer/plugin_handler/__init__.py
|
XeBoris/SportsTrackerAnalyzer
|
f211a9120b9ba91bb04b9742c80d0de7b4143f78
|
[
"MIT"
] | null | null | null |
sportstrackeranalyzer/plugin_handler/__init__.py
|
XeBoris/SportsTrackerAnalyzer
|
f211a9120b9ba91bb04b9742c80d0de7b4143f78
|
[
"MIT"
] | null | null | null |
from sportstrackeranalyzer.plugins.plugin_simple_distances import Plugin_SimpleDistance
| 87
| 87
| 0.942529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02b0d90057314fc93b4b1b7ec332876605dcadca
| 2,598
|
py
|
Python
|
migrations/versions/a12e86de073c_.py
|
ravenscroftj/harri_gttool
|
11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e
|
[
"MIT"
] | null | null | null |
migrations/versions/a12e86de073c_.py
|
ravenscroftj/harri_gttool
|
11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e
|
[
"MIT"
] | 4
|
2020-06-18T14:38:26.000Z
|
2021-12-13T19:54:55.000Z
|
migrations/versions/a12e86de073c_.py
|
ravenscroftj/harri_gttool
|
11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e
|
[
"MIT"
] | null | null | null |
"""Initial creation of schema
Revision ID: a12e86de073c
Revises:
Create Date: 2018-01-05 13:42:18.768932
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a12e86de073c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('articles',
sa.Column('article_id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=255), nullable=True),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('content', sa.Text(collation='utf8_general_ci'), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('article_id')
)
op.create_table('authors',
sa.Column('author_id', sa.Integer(), nullable=False),
sa.Column('fullname', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('author_id')
)
op.create_table('papers',
sa.Column('paper_id', sa.Integer(), nullable=False),
sa.Column('doi', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=255, collation='utf8_general_ci'), nullable=True),
sa.Column('abstract', sa.Text(collation='utf8_general_ci'), nullable=True),
sa.Column('pubdate', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('paper_id'),
sa.UniqueConstraint('doi')
)
op.create_table('entities',
sa.Column('entity_id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=128, collation='utf8_general_ci'), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('start', sa.Integer(), nullable=True),
sa.Column('end', sa.Integer(), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['articles.article_id'], ),
sa.PrimaryKeyConstraint('entity_id')
)
op.create_table('paper_authors',
sa.Column('paper_id', sa.Integer(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['authors.author_id'], ),
sa.ForeignKeyConstraint(['paper_id'], ['papers.paper_id'], ),
sa.PrimaryKeyConstraint('paper_id', 'author_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('paper_authors')
op.drop_table('entities')
op.drop_table('papers')
op.drop_table('authors')
op.drop_table('articles')
# ### end Alembic commands ###
| 36.591549
| 90
| 0.681678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 856
| 0.329484
|
02b1a17a92fa82b80990e63f3f0d4e50c1738b1c
| 5,106
|
py
|
Python
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | 1
|
2021-01-06T00:23:03.000Z
|
2021-01-06T00:23:03.000Z
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | 4
|
2020-11-09T06:01:25.000Z
|
2020-12-17T06:39:30.000Z
|
takedown/__init__.py
|
zsxing99/Takedown-script
|
fcd0533ab71a1198651a6e53cd1d58039d4fa7fd
|
[
"MIT"
] | null | null | null |
"""
TakeDown v0.0.1
===============
author: Zesheng Xing
email: zsxing@ucdavis.edu
This Python project is to help people search on some client hosting contents that potential violate the their copyright.
"""
VERSION = "0.1.0"
DESCRIPTION = "A python script that allows users to search potential copyright violated information on GitHub and " \
"send emails taking down those."
CONTRIBUTORS_INFO = "The project is developed by Zesheng Xing and supervised by Joël Porquet-Lupine at UC Davis, 2020."
USAGE = \
"""
Usage: takedown.py command [args...]
where commands include:
find search repositories
python takedown.py find [search_query] [GitHub_token] [-options]
with following args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
[-t target]: optional. The target of the search query. It could be “repo”, “code”. It is “code” by default.
Concatenate them by “+”, eg. “-t code+repo”.
[-i input]: optional. The file path of previous output of takedown find. By providing this path, the output
this time will be compared against the previous one.
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
or using a configuration file:
python takedown.py find -c <path_to_config_file>
config file args:
required args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
optional args:
[target]: optional. The target of the search query. It could be “repo”, “code”. It is “code” by default.
Concatenate them by “+”, eg. “-t code+repo”.
[input]: optional. The file path of previous output of takedown find. By providing this path,
the output this time will be compared against the previous one.
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
send send emails based on records
python takedown send [domain] [port] [inputs] [-options]
with following args:
[domain]: required. The domain address to connect
[port]: required. port of domain to connect
[inputs]: required. Input files to send email
[-u username]: optional. username of the account. or ask
[-p password]: optional. password of the account. or ask
[-s secure method]: optional. It could be “TLS” or “SSL”, depending on the domain and port connected.
Confirm before using this option.
[-t tags]: optional. Only the records that matches the tag will be sent with an email
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
[-en email name]: optional. name used to send email. Otherwise username will be used
[-es email subject]: optional. subject of the email. Otherwise default email subject is used
[-ep email preface]: optional. preface of the email. Otherwise default email preface is used
[-ee email ending]: optional. preface of the email. Otherwise default email preface is used
or using a configuration file:
python takedown.py send -c <path_to_config_file>
config file args:
required parameters:
[domain]: required. Domain used to connect smtp service
[port]: required. Port of domain to connect smtp service
[inputs]: required. Records based to send emails
optional parameters:
[username]: optional. username of the account. or ask
[password]: optional. password of the account. or ask
[secure method]: optional. It could be “TLS” or “SSL”, depending on the domain and port connected.
Confirm before using this option.
[tags]: optional. Only the records that matches the tag will be sent with an email
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be “yaml” or “json”. It is “yaml” by default
[emai_name]: optional. name used to send email. Otherwise username will be used
[email_subject]: optional. subject of the email. Otherwise default email subject is used
[email_preface]: optional. preface of the email. Otherwise default email preface is used
[email_ending]: optional. preface of the email. Otherwise default email preface is used
help show instructions and list of options
"""
| 60.785714
| 120
| 0.665883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,133
| 0.985032
|
02b1f2bbe7cd1537f8a13eba37e53d713c062a3c
| 977
|
py
|
Python
|
test/test_data.py
|
xoriath/py-cab
|
ab02faeaf69578bb9a0874632c610b27a9dd582f
|
[
"MIT"
] | null | null | null |
test/test_data.py
|
xoriath/py-cab
|
ab02faeaf69578bb9a0874632c610b27a9dd582f
|
[
"MIT"
] | null | null | null |
test/test_data.py
|
xoriath/py-cab
|
ab02faeaf69578bb9a0874632c610b27a9dd582f
|
[
"MIT"
] | null | null | null |
import os.path
CABEXTRACT_TEST_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'cabextract', 'cabs')
CABEXTRACT_BUGS_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'cabextract', 'bugs')
LIBMSPACK_TEST_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'libmspack')
def read_cabextract_cab(file_name):
return read_cab(CABEXTRACT_TEST_DIR, file_name)
def read_cabextract_bugs_cab(file_name):
return read_cab(CABEXTRACT_BUGS_DIR, file_name)
def read_libmspack_cab(file_name):
return read_cab(LIBMSPACK_TEST_DIR, file_name)
def read_cab(directory, file_name):
with open(os.path.join(directory, file_name), 'rb') as f:
return f.read()
def read_cabextract_cases(file_name, encoding='utf-8'):
with open(os.path.join(CABEXTRACT_TEST_DIR, file_name), 'r', encoding=encoding, errors='replace') as f:
return [line.rstrip('\n') for line in f.readlines() if not line.startswith('#') and line.strip()]
| 36.185185
| 107
| 0.732856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.11259
|
02b2af84d188c97a9d4469b8353aba7b18703383
| 141
|
py
|
Python
|
utils/exception.py
|
Lolik-Bolik/Hashing_Algorithms
|
d3ba488cf575fc685d5ea603b1915de4d5fed713
|
[
"MIT"
] | 2
|
2020-12-15T20:26:29.000Z
|
2020-12-15T20:27:26.000Z
|
utils/exception.py
|
Lolik-Bolik/Hashing_Algorithms
|
d3ba488cf575fc685d5ea603b1915de4d5fed713
|
[
"MIT"
] | null | null | null |
utils/exception.py
|
Lolik-Bolik/Hashing_Algorithms
|
d3ba488cf575fc685d5ea603b1915de4d5fed713
|
[
"MIT"
] | null | null | null |
"""
Exceptions module
"""
class CuckooHashMapFullException(Exception):
"""
Exception raised when filter is full.
"""
pass
| 11.75
| 44
| 0.64539
| 112
| 0.794326
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.553191
|
02b2cd966c362b3581d56d85cfd72c1cf6dfa614
| 1,212
|
py
|
Python
|
finetwork/plotter/_centrality_metrics.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | 5
|
2021-12-07T22:14:10.000Z
|
2022-03-30T14:09:15.000Z
|
finetwork/plotter/_centrality_metrics.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | null | null | null |
finetwork/plotter/_centrality_metrics.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | null | null | null |
import networkx as nx
class _CentralityMetrics:
def __init__(self, G, metrics):
self.G = G
self.metrics = metrics
def _compute_metrics(self):
metrics = self.metrics
if metrics == 'degree_centrality':
c = self.degree_centrality()
elif metrics == 'betweenness_centrality':
c = self.betweenness_centrality()
elif metrics == 'closeness_centrality':
c = self.closeness_centrality()
elif metrics == 'eigenvector_centrality':
c = self.bonachi_eigenvector_centrality()
return c
def degree_centrality(self):
centrality = nx.degree_centrality(self.G, weight='weight')
return centrality
def betweenness_centrality(self):
centrality = nx.betweenness_centrality(self.G, weight='weight')
return centrality
def closeness_centrality(self):
centrality = nx.closeness_centrality(self.G, weight='weight')
return centrality
def bonachi_eigenvector_centrality(self):
centrality = nx.eigenvector_centrality(self.G, weight='weight')
return centrality
| 32.756757
| 72
| 0.615512
| 1,185
| 0.977723
| 0
| 0
| 0
| 0
| 0
| 0
| 121
| 0.099835
|
02b429e1598512b88fa03213426e6cd52e56ec98
| 3,558
|
py
|
Python
|
filters/incoming_filters.py
|
juhokokkala/podoco_juhokokkala
|
57709c539168b6aaddfc187b3a3610bef63bd68a
|
[
"MIT"
] | null | null | null |
filters/incoming_filters.py
|
juhokokkala/podoco_juhokokkala
|
57709c539168b6aaddfc187b3a3610bef63bd68a
|
[
"MIT"
] | null | null | null |
filters/incoming_filters.py
|
juhokokkala/podoco_juhokokkala
|
57709c539168b6aaddfc187b3a3610bef63bd68a
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (C) 2016 Juho Kokkala
# This is part of Juho Kokkala's PoDoCo project.
#
# This file is licensed under the MIT License.
###############################################################################
"""
Particle filters for tracking the incoming traffic intensity.
See, the files script_test_poisson_1.py and script_test_negbin.py for
usage.
"""
import numpy as np
import resampling # resampling (c) Roger R Labbe Jr (MIT License)
from scipy.special import gammaln
def pf_init(Nrep, params):
"""
Initialize particle filter from MCMC samples.
"""
for key in params.keys():
params[key] = np.tile(params[key], Nrep)
N = params['A_x'].shape[0]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][0, :],
params['sqrtQ_x'] / np.sqrt((1 - params['A_x']**2)))
return x, params, W
def pf_update_poisson(y, x, params, W):
"""Update weights according to measurement"""
logW = np.log(W) + y * np.log(np.exp(x)) - np.exp(x)
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_poisson(y, x, params, W, resample=True):
"""One step (measurement) of the particle filter, Poisson obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_poisson(y, x, params, W)
return x, params, W
def predict_mean(x, params, W):
"""Expected value of the next observation after the update step"""
return np.sum(W * (np.exp(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]) + 0.5 * params['sqrtQ_x']**2)))
def trim_base(params):
"""Cuts the first component of base"""
params['base'] = params['base'][1:, :]
return params
def pf_update_negbin(y, x, params, W):
"""Update weights per measurement, NegBin obs. model"""
phi = np.exp(x) / (params['omega'] - 1)
logW = (gammaln(y + phi) - gammaln(phi) +
y * (np.log(params['omega'] - 1) - np.log(params['omega'])) -
phi * (np.log(params['omega'])))
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_negbin(y, x, params, W, resample=True):
"""
One step (measurement) of the particle filter, NegBin obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
params['omega'] = params['omega'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_negbin(y, x, params, W)
return x, params, W
| 29.163934
| 79
| 0.559303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,399
| 0.393198
|
02b49f51a158d139c9f2c3154c3099e5edf9d8c5
| 2,610
|
py
|
Python
|
model/semantic_gcn.py
|
AndersonStra/Mucko
|
f630712ea5be5f3ce995958c050cd7b5398d31e0
|
[
"MIT"
] | 2
|
2022-02-23T02:14:49.000Z
|
2022-03-23T13:27:44.000Z
|
model/semantic_gcn.py
|
AndersonStra/Mucko
|
f630712ea5be5f3ce995958c050cd7b5398d31e0
|
[
"MIT"
] | null | null | null |
model/semantic_gcn.py
|
AndersonStra/Mucko
|
f630712ea5be5f3ce995958c050cd7b5398d31e0
|
[
"MIT"
] | 1
|
2022-03-23T13:27:49.000Z
|
2022-03-23T13:27:49.000Z
|
import torch
import torch.nn.functional as F
from torch import nn
import dgl
import networkx as nx
class SemanticGCN(nn.Module):
def __init__(self, config, in_dim, out_dim, rel_dim):
super(SemanticGCN, self).__init__()
self.config = config
self.gcn1 = SemanticGCNLayer(in_dim, out_dim, rel_dim)
def forward(self, bg):
bg = self.gcn1(bg)
return bg
class SemanticGCNLayer(nn.Module):
def __init__(self, in_dims, out_dims, rel_dims):
super(SemanticGCNLayer, self).__init__()
self.node_fc = nn.Linear(in_dims, in_dims)
self.rel_fc = nn.Linear(rel_dims, rel_dims)
self.apply_fc = nn.Linear(in_dims + rel_dims + in_dims, out_dims)
def forward(self, g):
g.apply_nodes(func=self.apply_node)
g.update_all(message_func=self.message, reduce_func=dgl.function.sum('msg', 'h_sum'))
h = g.ndata['h']
h_sum = g.ndata['h_sum']
h = torch.cat([h, h_sum], dim=1) # shape(2*outdim)
h = self.apply_fc(h)
h = F.relu(h)
g.ndata['h'] = h
return g
def apply_node(self, nodes):
h = self.node_fc(nodes.data['h'])
return {'h': h}
def message(self, edges):
z1 = z1 = edges.src['att'] * edges.src['h'] + edges.dst['att'] * edges.dst['h']
z2 = edges.data['att'] * self.rel_fc(edges.data['rel'])
msg = torch.cat([z1, z2], dim=1)
return {'msg': msg}
class GATLayer(nn.Module):
def __init__(self, in_dim, out_dim, rel_dim):
super(GATLayer, self).__init__()
# equation (1)
self.fc = nn.Linear(in_dim, out_dim, bias=False)
# equation (2)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
def edge_attention(self, edges):
# edge UDF for equation (2)
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
# message UDF for equation (3) & (4)
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
# reduce UDF for equation (3) & (4)
# equation (3)
alpha = F.softmax(nodes.mailbox['e'], dim=1)
# equation (4)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g):
# equation (1)
z = self.fc(g.ndata['h'])
g.ndata['z'] = z
# equation (2)
g.apply_edges(self.edge_attention)
# equation (3) & (4)
g.update_all(self.message_func, self.reduce_func)
return g
| 31.071429
| 93
| 0.576245
| 2,502
| 0.958621
| 0
| 0
| 0
| 0
| 0
| 0
| 317
| 0.121456
|
02b57da73d7345d506383c6f3f8675776637aa80
| 3,808
|
py
|
Python
|
negentropy/scriptparser.py
|
shewitt-au/negentropy
|
40841e3f7d95f9124f4b59b0d591bf16e57ef312
|
[
"MIT"
] | 4
|
2021-07-07T09:49:05.000Z
|
2021-11-14T04:17:11.000Z
|
negentropy/scriptparser.py
|
shewitt-au/negentropy
|
40841e3f7d95f9124f4b59b0d591bf16e57ef312
|
[
"MIT"
] | null | null | null |
negentropy/scriptparser.py
|
shewitt-au/negentropy
|
40841e3f7d95f9124f4b59b0d591bf16e57ef312
|
[
"MIT"
] | 1
|
2020-05-29T18:11:04.000Z
|
2020-05-29T18:11:04.000Z
|
from inspect import cleandoc
from textwrap import indent
from lark import Lark, Transformer
from lark.exceptions import LarkError
from .interval import Interval
from . import errors
def parse(ctx, fname):
with open(fname, "r") as f:
t = f.read()
try:
l = Lark(open(ctx.implfile("script.lark")).read(), parser='lalr', debug=True)
t = l.parse(t)
ScriptTransformer(ctx).transform(t)
except LarkError as e:
etext = indent(str(e), " ")
raise errors.ParserException("Error parsing '{}:\n{}'".format(fname, etext))
class ScriptTransformer(Transformer):
def __init__(self, ctx):
self.ctx = ctx
def decent(self, t):
def handle(decsrc, decdst):
self.ctx.parse_decoderentry(decsrc, decdst)
handle(**dict(t))
def decsrc(self, t):
return ("decsrc", str(t[0]))
def decdst(self, t):
return ("decdst", str(t[0]))
def options(self, t):
self.ctx.parse_options(t[0][1])
def datasource(self, t):
self.ctx.parse_datasource(t[0][1])
def memmap(self, t):
def handle(self, range, mmdecoder, properties={}, mmdataaddr=None):
self.ctx.memtype.parse_add(range, self.ctx.decoder(mmdecoder), properties, mmdataaddr)
self.ctx.memtype.parse_begin()
for e in t[0]:
handle(self, **dict(e))
self.ctx.memtype.parse_end()
def mmbody(self, t):
return t
def mmentry(self, t):
return t
def mmdecoder(self, t):
return ("mmdecoder", str(t[0]))
def mmdataaddr(self, t):
return ("mmdataaddr", t[0])
def mmfromreset(selt, t):
return -1
def label(self, t):
def handle(self, range, lname, lflags=""):
self.ctx.syms.parse_add(self.ctx, range, lname, 'i' in lflags)
handle(self, **dict(t))
def lflags(self, t):
return ("lflags", str(t[0]))
def lname(self, t):
return ("lname", str(t[0]))
def directive(self, t):
def handle(daddress, dcommand, doaddress=None, dosymbol=None):
self.ctx.directives.parse_add(daddress, dcommand, doaddress, dosymbol)
handle(**dict(t))
def daddress(self, t):
return ("daddress", t[0])
def dcommand(self, t):
return ("dcommand", str(t[0]))
def doaddress(self, t):
return ("doaddress", t[0])
def dosymbol(self, t):
return ('dosymbol', str(t[0]))
def comment(self, t):
def handle(self, caddress, ctext, cpos="^"):
if not ctext:
ctext = "\n"
if cpos=='^':
self.ctx.cmts.add_before(caddress, ctext)
elif cpos=='v':
self.ctx.cmts.add_after(caddress, ctext)
elif cpos=='>':
self.ctx.cmts.add_inline(caddress, ctext)
handle(self, **dict(t))
def caddress(self, t):
return ("caddress", t[0])
def cpos(self, t):
return ("cpos", str(t[0]))
def ctext(self, t):
return ('ctext', str(t[0]))
def properties(self, t):
return ("properties", {str(i[0]) : i[1] for i in t})
def propentry(self, t):
return t
def hexnum(self, t):
return int(t[0][1:], 16)
def decimal(self, t):
return int(t[0])
def boolean(self, t):
return bool(t[0])
def list(self, t):
return list(t)
def name(self, t):
return str(t[0])
def quoted(self, t):
return t[0][1:-1]
def tquoted(self, t):
return cleandoc(t[0][3:-3])
def range(self, t):
ivl = Interval(int(t[0]), int(t[1])) if len(t)==2 else Interval(int(t[0]))
return ("range", ivl)
| 31.471074
| 99
| 0.541754
| 3,212
| 0.843487
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.055147
|
02b58fbea2e6f02fd5c603a709c877e0fd2cae0b
| 567
|
py
|
Python
|
Ex25.py
|
CarlosDouradoPGR/PythonBR-EstruturasDecs
|
727ab33f44c48e2d7026ea85d54791d1885c0bdc
|
[
"MIT"
] | null | null | null |
Ex25.py
|
CarlosDouradoPGR/PythonBR-EstruturasDecs
|
727ab33f44c48e2d7026ea85d54791d1885c0bdc
|
[
"MIT"
] | null | null | null |
Ex25.py
|
CarlosDouradoPGR/PythonBR-EstruturasDecs
|
727ab33f44c48e2d7026ea85d54791d1885c0bdc
|
[
"MIT"
] | null | null | null |
print('Interrogando um suspeito: ')
pg1 = str(input("Telefonou para a vítma?(S/N)\n").upper().strip())
pg2 = str(input('Esteve no local do crime?(S/N)\n').upper().strip())
pg3 = str(input('Mora perto da vítma?(S/N)\n').upper().strip())
pg4 = str(input('Devia para a vítma?(S/N)\n').upper().strip())
pg5 = str(input('Já trabalhou com a vítma?(S/N\n').upper().strip())
lst = [pg1, pg2, pg3, pg4, pg5].count('S')
if 3 > lst >= 2 :
print('Suspeita')
elif lst == 3 or lst == 4:
print('Cúmplice')
elif lst == 5:
print('Assassino')
else:
print('Inocente')
| 31.5
| 68
| 0.611993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.408377
|
02b6186616262617c89a1e33d1bf620cc853ca2d
| 334
|
py
|
Python
|
28. Implement strStr()/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
28. Implement strStr()/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
28. Implement strStr()/solution.py
|
alexwhyy/leetcode
|
41664aa48137677d2f98817b9c512d76f13c525f
|
[
"MIT"
] | null | null | null |
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if needle == "":
return 0
for i in range(0, len(haystack) - len(needle) + 1):
print(haystack[i : i + len(needle)], needle)
if haystack[i : i + len(needle)] == needle:
return i
return -1
| 37.111111
| 59
| 0.502994
| 334
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0.005988
|
02b6a5972aef51ad1a07e6ff7ba0827ae6cad8a4
| 2,235
|
py
|
Python
|
t/text_test.py
|
gsnedders/Template-Python
|
4081e4d820c1be0c0448a8dcb79e0703066da099
|
[
"Artistic-2.0"
] | null | null | null |
t/text_test.py
|
gsnedders/Template-Python
|
4081e4d820c1be0c0448a8dcb79e0703066da099
|
[
"Artistic-2.0"
] | 6
|
2015-10-13T13:46:10.000Z
|
2019-06-17T09:39:57.000Z
|
t/text_test.py
|
gsnedders/Template-Python
|
4081e4d820c1be0c0448a8dcb79e0703066da099
|
[
"Artistic-2.0"
] | 3
|
2018-12-03T13:15:21.000Z
|
2019-03-13T09:12:09.000Z
|
from template import Template
from template.test import TestCase, main
class Stringy:
def __init__(self, text):
self.text = text
def asString(self):
return self.text
__str__ = asString
class TextTest(TestCase):
def testText(self):
tt = (("basic", Template()),
("interp", Template({ "INTERPOLATE": 1 })))
vars = self._callsign()
v2 = { "ref": lambda obj: "%s[%s]" % (obj, obj.__class__.__name__),
"sfoo": Stringy("foo"),
"sbar": Stringy("bar") }
vars.update(v2)
self.Expect(DATA, tt, vars)
DATA = r"""
-- test --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
# [% PROCESS foo %]
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
-- use interp --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
\$ @ { } @{ } \${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
alpha bravo charlie
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
#
# [% PROCESS foo %]
#
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
[% a = "C'est un test"; a %]
-- expect --
C'est un test
-- test --
[% META title = "C'est un test" -%]
[% component.title -%]
-- expect --
C'est un test
-- test --
[% META title = 'C\'est un autre test' -%]
[% component.title -%]
-- expect --
C'est un autre test
-- test --
[% META title = "C'est un \"test\"" -%]
[% component.title -%]
-- expect --
C'est un "test"
-- test --
[% sfoo %]/[% sbar %]
-- expect --
foo/bar
-- test --
[% s1 = "$sfoo"
s2 = "$sbar ";
s3 = sfoo;
ref(s1);
'/';
ref(s2);
'/';
ref(s3);
-%]
-- expect --
foo[str]/bar [str]/foo[Stringy]
"""
| 14.607843
| 71
| 0.503803
| 486
| 0.21745
| 0
| 0
| 0
| 0
| 0
| 0
| 1,723
| 0.770917
|
02b974ddbd9b73968df839c1e4fdda0cbb8567db
| 761
|
py
|
Python
|
research/destroyer.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
research/destroyer.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
research/destroyer.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 John Carrino
import math
from pprint import pprint
import matplotlib.pyplot as plt
from frispy import Disc
from frispy import Discs
model = Discs.destroyer
mph_to_mps = 0.44704
v = 70 * mph_to_mps
rot = -v / model.diameter * 1.2
x0 = [6, -3, 25]
a, nose_up, hyzer = x0
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer, "gamma": -2})
result = disc.compute_trajectory(20.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
plt.plot(x, result.theta)
plt.plot(x, y)
plt.plot(x, z)
#plt.plot(t, x)
#plt.plot(t, y)
#plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| 22.382353
| 112
| 0.638633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.176084
|
02b9d9f46387318db8e53a0fa38e85fa6e870ae5
| 6,200
|
py
|
Python
|
litex/soc/cores/clock/xilinx_common.py
|
suarezvictor/litex
|
d37ef60e70bd0ce8d28079143b6859d8b928395e
|
[
"ADSL"
] | 1
|
2021-12-25T13:49:55.000Z
|
2021-12-25T13:49:55.000Z
|
litex/soc/cores/clock/xilinx_common.py
|
suarezvictor/litex
|
d37ef60e70bd0ce8d28079143b6859d8b928395e
|
[
"ADSL"
] | 1
|
2022-02-11T14:39:50.000Z
|
2022-02-11T22:25:57.000Z
|
litex/soc/cores/clock/xilinx_common.py
|
suarezvictor/litex
|
d37ef60e70bd0ce8d28079143b6859d8b928395e
|
[
"ADSL"
] | 1
|
2021-12-25T13:49:57.000Z
|
2021-12-25T13:49:57.000Z
|
#
# This file is part of LiteX.
#
# Copyright (c) 2018-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.io import DifferentialInput
from litex.soc.interconnect.csr import *
from litex.soc.cores.clock.common import *
# Xilinx / Generic ---------------------------------------------------------------------------------
class XilinxClocking(Module, AutoCSR):
clkfbout_mult_frange = (2, 64+1)
clkout_divide_range = (1, 128+1)
def __init__(self, vco_margin=0):
self.vco_margin = vco_margin
self.reset = Signal()
self.power_down = Signal()
self.locked = Signal()
self.clkin_freq = None
self.vcxo_freq = None
self.nclkouts = 0
self.clkouts = {}
self.config = {}
self.params = {}
def register_clkin(self, clkin, freq):
self.clkin = Signal()
if isinstance(clkin, (Signal, ClockSignal)):
self.comb += self.clkin.eq(clkin)
elif isinstance(clkin, Record):
self.specials += DifferentialInput(clkin.p, clkin.n, self.clkin)
else:
raise ValueError
self.clkin_freq = freq
register_clkin_log(self.logger, clkin, freq)
def create_clkout(self, cd, freq, phase=0, buf="bufg", margin=1e-2, with_reset=True, ce=None):
assert self.nclkouts < self.nclkouts_max
clkout = Signal()
self.clkouts[self.nclkouts] = (clkout, freq, phase, margin)
if with_reset:
self.specials += AsyncResetSynchronizer(cd, ~self.locked)
if buf is None:
self.comb += cd.clk.eq(clkout)
else:
clkout_buf = Signal()
self.comb += cd.clk.eq(clkout_buf)
if buf == "bufg":
self.specials += Instance("BUFG", i_I=clkout, o_O=clkout_buf)
elif buf == "bufr":
self.specials += Instance("BUFR", i_I=clkout, o_O=clkout_buf)
elif buf == "bufgce":
if ce is None:
raise ValueError("BUFGCE requires user to provide a clock enable ce Signal")
self.specials += Instance("BUFGCE", i_I=clkout, o_O=clkout_buf, i_CE=ce)
elif buf == "bufio":
self.specials += Instance("BUFIO", i_I=clkout, o_O=clkout_buf)
else:
raise ValueError("Unsupported clock buffer: {}".format(buf))
create_clkout_log(self.logger, cd.name, freq, margin, self.nclkouts)
self.nclkouts += 1
def compute_config(self):
config = {}
for divclk_divide in range(*self.divclk_divide_range):
config["divclk_divide"] = divclk_divide
for clkfbout_mult in reversed(range(*self.clkfbout_mult_frange)):
all_valid = True
vco_freq = self.clkin_freq*clkfbout_mult/divclk_divide
(vco_freq_min, vco_freq_max) = self.vco_freq_range
if (vco_freq >= vco_freq_min*(1 + self.vco_margin) and
vco_freq <= vco_freq_max*(1 - self.vco_margin)):
for n, (clk, f, p, m) in sorted(self.clkouts.items()):
valid = False
d_ranges = [self.clkout_divide_range]
if getattr(self, "clkout{}_divide_range".format(n), None) is not None:
d_ranges += [getattr(self, "clkout{}_divide_range".format(n))]
for d_range in d_ranges:
for d in clkdiv_range(*d_range):
clk_freq = vco_freq/d
if abs(clk_freq - f) <= f*m:
config["clkout{}_freq".format(n)] = clk_freq
config["clkout{}_divide".format(n)] = d
config["clkout{}_phase".format(n)] = p
valid = True
break
if valid:
break
if not valid:
all_valid = False
else:
all_valid = False
if all_valid:
config["vco"] = vco_freq
config["clkfbout_mult"] = clkfbout_mult
compute_config_log(self.logger, config)
return config
raise ValueError("No PLL config found")
def expose_drp(self):
self.drp_reset = CSR()
self.drp_locked = CSRStatus()
self.drp_read = CSR()
self.drp_write = CSR()
self.drp_drdy = CSRStatus()
self.drp_adr = CSRStorage(7, reset_less=True)
self.drp_dat_w = CSRStorage(16, reset_less=True)
self.drp_dat_r = CSRStatus(16)
# # #
den_pipe = Signal()
dwe_pipe = Signal()
drp_drdy = Signal()
self.params.update(
i_DCLK = ClockSignal(),
i_DWE = dwe_pipe,
i_DEN = den_pipe,
o_DRDY = drp_drdy,
i_DADDR = self.drp_adr.storage,
i_DI = self.drp_dat_w.storage,
o_DO = self.drp_dat_r.status
)
self.sync += [
den_pipe.eq(self.drp_read.re | self.drp_write.re),
dwe_pipe.eq(self.drp_write.re),
If(self.drp_read.re | self.drp_write.re,
self.drp_drdy.status.eq(0)
).Elif(drp_drdy,
self.drp_drdy.status.eq(1)
)
]
self.comb += self.drp_locked.status.eq(self.locked)
self.logger.info("Exposing DRP interface.")
def add_reset_delay(self, cycles):
for i in range(cycles):
reset = Signal()
self.specials += Instance("FDCE", i_C=self.clkin, i_CE=1, i_CLR=0, i_D=self.reset, o_Q=reset)
self.reset = reset
def do_finalize(self):
assert hasattr(self, "clkin")
self.add_reset_delay(cycles=8) # Prevents interlock when reset driven from sys_clk.
| 40.25974
| 105
| 0.528226
| 5,740
| 0.925806
| 0
| 0
| 0
| 0
| 0
| 0
| 633
| 0.102097
|
02ba8cd0ea54e5520bfcd504cff7483bc433ed10
| 3,044
|
py
|
Python
|
my_des/my_des.py
|
ipid/my-des
|
13340481c03113a23263ef824c119b3374028fe2
|
[
"MIT"
] | null | null | null |
my_des/my_des.py
|
ipid/my-des
|
13340481c03113a23263ef824c119b3374028fe2
|
[
"MIT"
] | null | null | null |
my_des/my_des.py
|
ipid/my-des
|
13340481c03113a23263ef824c119b3374028fe2
|
[
"MIT"
] | null | null | null |
__all__ = (
'des_encrypt',
'des_decrypt',
)
from typing import List, Any
from ._tools import *
from ._constant import *
from rich import print
from rich.text import Text
from rich.panel import Panel
def keygen(key: bytes) -> List[List[int]]:
res = []
key = bytes_to_binlist(key)
key = permute_with(key, table=PC_1)
print(f'[red b]Key after PC-1[/]: {binlist_to_str(key, 7)}\n')
c, d = split_half(key)
for i in range(16):
print(f'[white b u]Round {i}[/]')
lshift(c, ROTATION[i])
lshift(d, ROTATION[i])
print(f'[red b]Rotated key[/]: {binlist_to_str(c, 7)} | {binlist_to_str(d, 7)}')
key = permute_with(c + d, table=PC_2)
print(f'[red b]Key after PC-2[/]: {binlist_to_str(key, 6)}')
res.append(key)
print('')
return res
def f_func(x: List[int], *, key: List[int]) -> List[int]:
x = permute_with(x, table=E)
print(f'[red b]r[/] (Permutated): {binlist_to_str(x)}')
x = binlist_xor(x, key)
print(f'[red b]r xor key[/]: {binlist_to_str(x)}')
res = []
for i, binlist in enumerate(split_every(x, 6)):
num = binlist_to_num(binlist)
res += num_to_binlist(S[i][num], length=4)
return permute_with(res, table=P)
def des_encrypt_core(x: List[int], keys: List[List[int]]) -> List[int]:
print('')
print(Panel(Text('Stage 2. Initial Permutation', 'green bold', justify='center')))
print(f'Plaintext = {binlist_to_str(x)}')
x = permute_with(x, table=IP)
print(Text('↓ After IP: ↓', justify='center'))
print(f'Plaintext = {binlist_to_str(x)}\n')
l, r = split_half(x)
print(Panel(Text('Stage 3. Feistel structure', 'green bold', justify='center')))
for i in range(16):
print(f'[white b u]Round {i}[/]')
print(f'[red b]l[/] = {binlist_to_str(l)}')
print(f'[red b]r[/] = {binlist_to_str(r)}')
r_new = binlist_xor(l, f_func(r, key=keys[i]))
l_new = r
l, r = l_new, r_new
print(f'[red b]Encrypted:[/] {binlist_to_str(l)} {binlist_to_str(r)}\n')
print(Panel(Text('Stage 4. Swap and Reverse IP', 'green bold', justify='center')))
l, r = r, l
print(f'[red b]Swaped ciphertext[/]: {binlist_to_str(l)} {binlist_to_str(r)}')
after_fp = permute_with(l + r, table=FP)
print(f'[red b]After FP[/]: {binlist_to_str(after_fp)}\n')
return after_fp
def des_encrypt(x: bytes, key: bytes) -> List[int]:
x = bytes_to_binlist(x)
print(f'[red b]Plaintext:[/] {binlist_to_str(x)}')
print(f'[red b]Key:[/] {binlist_to_str(bytes_to_binlist(key))}\n')
print('')
print(Panel(Text('Stage 1. Generate keys', 'green bold', justify='center')))
keys = keygen(key)
ciphertext = des_encrypt_core(x, keys)
print('[white]Finally we got our ciphertext:[/]')
print(binlist_to_str(ciphertext))
return ciphertext
def des_decrypt(x: bytes, key: bytes) -> List[int]:
x, keys = bytes_to_binlist(x), keygen(key)
keys = [*reversed(keys)]
return des_encrypt_core(x, keys)
| 30.138614
| 88
| 0.610053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,048
| 0.343832
|
02baf30e1ca8e8d64e8718657b516c4805fddd84
| 1,080
|
py
|
Python
|
Chromebook/setup.py
|
mahtuag/DistroSetup
|
8fb2b7351ea12163602a9a4c5a7b63fc87f326e2
|
[
"Apache-2.0"
] | 3
|
2020-01-12T11:21:47.000Z
|
2021-09-16T06:43:22.000Z
|
Chromebook/setup.py
|
mahtuag/DistroSetup
|
8fb2b7351ea12163602a9a4c5a7b63fc87f326e2
|
[
"Apache-2.0"
] | 2
|
2020-07-01T20:46:31.000Z
|
2020-07-01T21:10:09.000Z
|
Chromebook/setup.py
|
wingedrhino/DistroSetup
|
65edfda7dbded113bf5f3e6f53b331fc8aeaf1c5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import apt
import sys
cache = apt.cache.Cache()
cache.update()
cache.open()
packages = [
'git',
'curl',
'wget',
'software-properties-common',
'build-essential',
'automake',
'libtool',
'autoconf',
'pkg-config',
'udev',
'fuse',
'snap',
'snapd',
'zsh',
'byobu',
'python3',
'libsquashfuse0',
'squashfuse',
'fuse',
'vim',
'atop',
'zsh',
'byobu',
'htop',
'iotop',
'nethogs',
'aptitude',
'udisks2',
'parted',
'gparted',
'udisks2-lvm2',
'udisks2-vdo',
'udisks2-zram',
'udisks2-btrfs',
'udisks2-doc',
'default-jdk',
'leiningen',
'clojure',
]
for pkg_name in packages:
pkg = cache[pkg_name]
if pkg.is_installed:
print(f'{pkg_name} is already installed.')
else:
print(f'{pkg_name} will be marked for installation.')
| 18.305085
| 62
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 465
| 0.430556
|
02bb2ad5f8635de13653c1ed22f4978ec39fcfc6
| 377
|
py
|
Python
|
performance_test.py
|
alan-augustine/python_singly_linkedlist
|
f227a4154b22de8a273d319ecdd6329035d5d258
|
[
"MIT"
] | null | null | null |
performance_test.py
|
alan-augustine/python_singly_linkedlist
|
f227a4154b22de8a273d319ecdd6329035d5d258
|
[
"MIT"
] | null | null | null |
performance_test.py
|
alan-augustine/python_singly_linkedlist
|
f227a4154b22de8a273d319ecdd6329035d5d258
|
[
"MIT"
] | null | null | null |
from time import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
from singly_linkedlist.singly_linkedlist import SinglyLinkedList
start = time()
linked_list = SinglyLinkedList()
for i in range(100000):
linked_list.insert_head(111111111111)
end = time()
print("Took {0} seconds".format(start-end))
# linked_list.print_elements()
| 23.5625
| 64
| 0.774536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.140584
|
02bdc143ddedce60ffc59109acab68856a9f8737
| 561
|
py
|
Python
|
python/leetcode/0001.py
|
bluewaitor/playground
|
330266ce28212dc5e32b0276c896f9ceffd35bf5
|
[
"MIT"
] | null | null | null |
python/leetcode/0001.py
|
bluewaitor/playground
|
330266ce28212dc5e32b0276c896f9ceffd35bf5
|
[
"MIT"
] | null | null | null |
python/leetcode/0001.py
|
bluewaitor/playground
|
330266ce28212dc5e32b0276c896f9ceffd35bf5
|
[
"MIT"
] | null | null | null |
# 1. 两数之和
from typing import List, Optional
class Solution:
def twoSum(self, nums: List[int], target: int) -> Optional[List[int]]:
index_map = {}
for index, value in enumerate(nums):
index_map[value] = index
for index, value in enumerate(nums):
remain = target - value
if remain in index_map and index_map[remain] != index:
return [index, index_map[remain]]
return None
if __name__ == '__main__':
solution = Solution()
print(solution.twoSum([2, 7, 11, 15], 9))
| 28.05
| 74
| 0.593583
| 413
| 0.725835
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.047452
|
02bea4753652cd78237dd184ed6e67ea923d42ea
| 454
|
py
|
Python
|
dataprocess/print_msg.py
|
lifelong-robotic-vision/openloris-scene-tools
|
ce6a4839f618bf036d3f3dbae14561bfc7413641
|
[
"MIT"
] | 13
|
2021-03-27T15:49:21.000Z
|
2022-03-19T13:26:30.000Z
|
dataprocess/print_msg.py
|
lifelong-robotic-vision/openloris-scene-tools
|
ce6a4839f618bf036d3f3dbae14561bfc7413641
|
[
"MIT"
] | 4
|
2021-03-30T10:40:43.000Z
|
2022-03-28T01:36:57.000Z
|
dataprocess/print_msg.py
|
lifelong-robotic-vision/openloris-scene-tools
|
ce6a4839f618bf036d3f3dbae14561bfc7413641
|
[
"MIT"
] | 1
|
2022-02-16T13:42:32.000Z
|
2022-02-16T13:42:32.000Z
|
#!/usr/bin/env python2
import rosbag
import sys
filename = sys.argv[1]
topics = sys.argv[2:]
with rosbag.Bag(filename) as bag:
for topic, msg, t in bag.read_messages(topics):
print('%s @%.7f ----------------------------' % (topic, t.to_sec()))
print(msg)
print('Press ENTER to continue')
while True:
try:
raw_input()
break
except EOFError:
pass
| 25.222222
| 76
| 0.497797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.189427
|
02beda4568a4663c141bf81401d0595971779e3a
| 1,011
|
py
|
Python
|
alegra/resources/invoice.py
|
okchaty/alegra
|
6c423b23a24650c9121da5f165f6f03669b98468
|
[
"MIT"
] | 1
|
2022-03-31T03:44:50.000Z
|
2022-03-31T03:44:50.000Z
|
alegra/resources/invoice.py
|
okchaty/alegra
|
6c423b23a24650c9121da5f165f6f03669b98468
|
[
"MIT"
] | 4
|
2020-03-24T17:54:03.000Z
|
2021-06-02T00:48:50.000Z
|
alegra/resources/invoice.py
|
okchaty/alegra
|
6c423b23a24650c9121da5f165f6f03669b98468
|
[
"MIT"
] | null | null | null |
from alegra.api_requestor import APIRequestor
from alegra.resources.abstract import CreateableAPIResource
from alegra.resources.abstract import EmailableAPIResource
from alegra.resources.abstract import ListableAPIResource
from alegra.resources.abstract import UpdateableAPIResource
from alegra.resources.abstract import VoidableAPIResource
class Invoice(
CreateableAPIResource,
EmailableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
VoidableAPIResource,
):
OBJECT_NAME = "invoices"
@classmethod
def open(cls, resource_id, user=None, token=None, api_base=None,
api_version=None, **json):
requestor = APIRequestor(
user=user,
token=token,
api_base=api_base,
api_version=api_version,
)
url = cls.class_url() + str(resource_id) + "/open/"
response = requestor.request(
method="post",
url=url,
json=json,
)
return response
| 29.735294
| 68
| 0.681503
| 666
| 0.658754
| 0
| 0
| 484
| 0.478734
| 0
| 0
| 24
| 0.023739
|
02bf002db0ba833a4cef03b49b9c37dba336934d
| 244
|
py
|
Python
|
dask_ml/model_selection.py
|
lesteve/dask-ml
|
0aca19c545be5c27bedcfbab5554b4ba39a6d754
|
[
"BSD-3-Clause"
] | 1
|
2020-12-01T13:20:05.000Z
|
2020-12-01T13:20:05.000Z
|
dask_ml/model_selection.py
|
lesteve/dask-ml
|
0aca19c545be5c27bedcfbab5554b4ba39a6d754
|
[
"BSD-3-Clause"
] | null | null | null |
dask_ml/model_selection.py
|
lesteve/dask-ml
|
0aca19c545be5c27bedcfbab5554b4ba39a6d754
|
[
"BSD-3-Clause"
] | null | null | null |
"""Utilities for hyperparameter optimization.
These estimators will operate in parallel. Their scalability depends
on the underlying estimators being used.
"""
from dask_searchcv.model_selection import GridSearchCV, RandomizedSearchCV # noqa
| 34.857143
| 82
| 0.831967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.680328
|
02c06f0c429f92d5e5a68c4d5f561cf2b85e43c8
| 23
|
py
|
Python
|
code/sample_2-1-9.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/sample_2-1-9.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/sample_2-1-9.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
x = 5
y = 6
print(x*y)
| 5.75
| 10
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02c10165e05312844fa4ea1cd0be76da5bd780bb
| 2,521
|
py
|
Python
|
whatrecord/tests/test_stcmd.py
|
ZLLentz/whatrecord
|
9f15da79e3063a64dbe6bb9678dbf52ebad46680
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-09-15T20:32:04.000Z
|
2021-12-17T16:46:10.000Z
|
whatrecord/tests/test_stcmd.py
|
ZLLentz/whatrecord
|
9f15da79e3063a64dbe6bb9678dbf52ebad46680
|
[
"BSD-3-Clause-LBNL"
] | 92
|
2021-04-02T16:42:24.000Z
|
2022-03-31T22:24:52.000Z
|
whatrecord/tests/test_stcmd.py
|
ZLLentz/whatrecord
|
9f15da79e3063a64dbe6bb9678dbf52ebad46680
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-04-01T20:48:42.000Z
|
2021-09-08T18:51:34.000Z
|
import pytest
from ..iocsh import IocshRedirect, IocshSplit, split_words
@pytest.mark.parametrize(
"line, expected",
[
pytest.param(
"""dbLoadRecords(a, "b", "c")""",
["dbLoadRecords", "a", "b", "c"],
id="basic_paren"
),
pytest.param(
"""dbLoadRecords a, "b", "c\"""",
["dbLoadRecords", "a", "b", "c"],
id="basic_no_paren"
),
pytest.param(
"""< input_file""",
IocshSplit(
[],
redirects={
0: IocshRedirect(fileno=0, name="input_file", mode="r"),
},
error=None,
),
id="basic_input_redirect",
),
pytest.param(
"""> output_file""",
IocshSplit(
[],
redirects={
1: IocshRedirect(fileno=1, name="output_file", mode="w"),
},
error=None,
),
id="basic_output_redirect",
),
pytest.param(
"""< input_file > output_file""",
IocshSplit(
[],
redirects={
0: IocshRedirect(fileno=0, name="input_file", mode="r"),
1: IocshRedirect(fileno=1, name="output_file", mode="w"),
},
error=None,
),
id="input_output_redirect",
),
pytest.param(
"""2> output_file""",
IocshSplit(
[],
redirects={
2: IocshRedirect(fileno=2, name="output_file", mode="w"),
},
error=None,
),
id="output_fd_num",
),
pytest.param(
"""test > stdout 2> stderr 3> whoknows""",
IocshSplit(
["test"],
redirects={
1: IocshRedirect(fileno=1, name="stdout", mode="w"),
2: IocshRedirect(fileno=2, name="stderr", mode="w"),
3: IocshRedirect(fileno=3, name="whoknows", mode="w"),
},
error=None,
),
id="output_fd_num_more",
),
]
)
def test_split_words(line, expected):
if isinstance(expected, list):
expected = IocshSplit(
argv=expected,
redirects={},
error=None,
)
assert split_words(line) == expected
| 28.977011
| 77
| 0.410948
| 0
| 0
| 0
| 0
| 2,444
| 0.969457
| 0
| 0
| 511
| 0.202697
|
02c18f6d2d3ebb8100e01a783419de97602121b6
| 1,723
|
py
|
Python
|
code/generateElevationFile.py
|
etcluvic/sme.altm
|
ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea
|
[
"CC-BY-4.0"
] | null | null | null |
code/generateElevationFile.py
|
etcluvic/sme.altm
|
ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea
|
[
"CC-BY-4.0"
] | null | null | null |
code/generateElevationFile.py
|
etcluvic/sme.altm
|
ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea
|
[
"CC-BY-4.0"
] | null | null | null |
from bs4 import BeautifulSoup
from datetime import datetime
from lxml import etree
import time
import codecs
import pickle
import os
def printSeparator(character, times):
print(character * times)
if __name__ == '__main__':
doiPrefix = '10.7202' #erudit's doi prefix
myTime = datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
referencedDocs = '/mnt/smeCode/altm/code/out/' + '2017-10-13_22-44-03-672976' + '.xml'
pickleFile = '/mnt/smeCode/parseMe2/code/pickles/keywords.p'
outputPath = '/mnt/smeCode/altm/code/elevation.files/'
outputFile = 'test.xml'
printSeparator('*',50)
print('loading pickle...')
keywords = pickle.load( open( pickleFile, "rb" ) )
print('pickle loaded!')
printSeparator('*',50)
#elevation file
rootElement = etree.Element("elevate")
f = codecs.open(referencedDocs,'r','utf-8')
markup = f.read()
f.close()
soup = BeautifulSoup(markup, "lxml-xml")
documents = soup.find_all('doi')
for d in documents:
doi = d.get_text().split('/')[1]
print(doi)
#print(d.get_text())
if doi in keywords.keys():
print(keywords[doi])
queryElement = etree.SubElement(rootElement, "query")
queryElement.set("text", ' '. join(list(keywords[doi]['terms'])))
docElement = etree.SubElement(queryElement, "doc")
docElement.set("id", doi)
printSeparator('*',50)
printSeparator('*', 50)
print 'Elevation - Saving xml file...'
xmlString = etree.tostring(rootElement, pretty_print=True, encoding='UTF-8')
fh = codecs.open(os.path.join(outputPath, myTime + '.xml'),'w', encoding='utf-8' )
fh.write(xmlString.decode('utf-8'))
fh.close()
print 'done'
printSeparator('*', 50)
print(xmlString)
print('bye')
| 22.671053
| 89
| 0.663958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 453
| 0.262914
|
02c1ec959e5357766a542721519334ad6dee8666
| 49,071
|
py
|
Python
|
MilightWifiBridge/MilightWifiBridge.py
|
K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library
|
bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb
|
[
"MIT"
] | null | null | null |
MilightWifiBridge/MilightWifiBridge.py
|
K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library
|
bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb
|
[
"MIT"
] | null | null | null |
MilightWifiBridge/MilightWifiBridge.py
|
K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library
|
bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Milight 3.0 (LimitlessLED Wifi Bridge v6.0) library: Control wireless lights (Milight 3.0) with Wifi
Note that this library was tested with Milight Wifi iBox v1 and RGBW lights. It should work with any other
lights and bridge using Milight 3.0 / LimitlessLED v6.0 protocol.
Non-exhaustive functionality using the python class or using this file from shell
(launch this python file with '-h' parameter to get more information):
- Initialize the Wifi bridge
- Link/Unlink lights
- Light on/off
- Wifi bridge lamp on/off
- Set night mode
- Set white mode
- Set color (using Milight format)
- Set saturation
- Set brightness
- Set disco mode (9 available)
- Increase/Decrease disco mode speed
- Get Milight wifi bridge MAC address
- ...
Used protocol: http://www.limitlessled.com/dev/ (LimitlessLED Wifi Bridge v6.0 section)
"""
__author__ = 'Quentin Comte-Gaz'
__email__ = "quentin@comte-gaz.com"
__license__ = "MIT License"
__copyright__ = "Copyright Quentin Comte-Gaz (2019)"
__python_version__ = "2.7+ and 3.+"
__version__ = "2.1 (2019/11/09)"
__status__ = "Usable for any project"
import socket
import collections
import sys, getopt
import logging
import binascii
class MilightWifiBridge:
"""Milight 3.0 Wifi Bridge class
Calling setup() function is necessary in order to make this class work properly.
"""
######################### Enums #########################
class eZone:
ALL = 0
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class eDiscoMode:
DISCO_1 = 1
DISCO_2 = 2
DISCO_3 = 3
DISCO_4 = 4
DISCO_5 = 5
DISCO_6 = 6
DISCO_7 = 7
DISCO_8 = 8
DISCO_9 = 9
class eTemperature:
WARM = 0 # 2700K
WARM_WHITE = 8 # 3000K
COOL_WHITE = 35 # 4000K
DAYLIGHT = 61 # 5000K
COOL_DAYLIGHT = 100 # 6500K
class eColor:
RED = 0xFF
LAVENDER = 0xD9
BLUE = 0xBA
AQUA = 0x85
GREEN = 0x7A
LIME = 0x54
YELLOW = 0x3B
ORANGE = 0x1E
######################### static variables/static functions/internal struct #########################
__START_SESSION_MSG = bytearray([0x20, 0x00, 0x00, 0x00, 0x16, 0x02, 0x62, 0x3A, 0xD5, 0xED, 0xA3, 0x01, 0xAE, 0x08,
0x2D, 0x46, 0x61, 0x41, 0xA7, 0xF6, 0xDC, 0xAF, 0xD3, 0xE6, 0x00, 0x00, 0x1E])
# Response sent by the milight wifi bridge after a start session query
# Keyword arguments:
# responseReceived -- (bool) Response valid
# mac -- (string) MAC address of the wifi bridge
# sessionId1 -- (int) First part of the session ID
# sessionId2 -- (int) Second part of the session ID
# sequenceNumber -- (int) Sequence number
__START_SESSION_RESPONSE = collections.namedtuple("StartSessionResponse", "responseReceived mac sessionId1 sessionId2")
__ON_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x01, 0x00, 0x00, 0x00])
__OFF_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x02, 0x00, 0x00, 0x00])
__NIGHT_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x05, 0x00, 0x00, 0x00])
__WHITE_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x05, 0x64, 0x00, 0x00, 0x00])
__DISCO_MODE_SPEED_UP_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x03, 0x00, 0x00, 0x00])
__DISCO_MODE_SLOW_DOWN_CMD = bytearray([0x31, 0x00, 0x00, 0x08, 0x04, 0x04, 0x00, 0x00, 0x00])
__LINK_CMD = bytearray([0x3D, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00])
__UNLINK_CMD = bytearray([0x3E, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_ON_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_OFF_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x04, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_WHITE_MODE_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x05, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_DISCO_MODE_SPEED_UP_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00])
__WIFI_BRIDGE_LAMP_DISCO_MODE_SLOW_DOWN_CMD = bytearray([0x31, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBridgeLampColorCmd(color):
"""Give 'Set color for bridge lamp' command
Keyword arguments:
color -- (int or eColor) Color value between 0x00 and 0xFF
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bytearray) 'Set colo for bridge lamp' command
"""
color = int(color)
if color < 0:
color = 0
elif color > 0xFF:
color = 0xFF
color &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x01, color, color, color, color])
@staticmethod
def __getSetColorCmd(color):
"""Give 'Set color' command
Keyword arguments:
color -- (int or eColor) Color value between 0x00 and 0xFF
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bytearray) 'Set color' command
"""
color = int(color)
if color < 0:
color = 0
elif color > 0xFF:
color = 0xFF
color &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x01, color, color, color, color])
@staticmethod
def __getSetDiscoModeForBridgeLampCmd(mode):
"""Give 'Set disco mode for bridge lamp' command
Keyword arguments:
mode -- (int) Disco mode between 1 and 9
return: (bytearray) 'Set disco mode for bridge lamp' command
"""
mode = int(mode)
if mode < 1:
mode = 1
elif mode > 9:
mode = 9
mode &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x04, mode, 0x00, 0x00, 0x00])
@staticmethod
def __getSetDiscoModeCmd(mode):
"""Give 'Set disco mode' command
Keyword arguments:
mode -- (int) Disco mode between 1 and 9
return: (bytearray) 'Set disco mode' command
"""
mode = int(mode)
if mode < 1:
mode = 1
elif mode > 9:
mode = 9
mode &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x06, mode, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBrightnessForBridgeLampCmd(brightness):
"""Give 'Set brightness for bridge lamp' command
Keyword arguments:
brightness -- (int) Brightness percentage between 0 and 100
return: (bytearray) 'Set brightness for bridge lamp' command
"""
brightness = int(brightness)
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
brightness &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x00, 0x02, brightness, 0x00, 0x00, 0x00])
@staticmethod
def __getSetBrightnessCmd(brightness):
"""Give 'Set brightness' command
Keyword arguments:
brightness -- (int) Brightness percentage between 0 and 100
return: (bytearray) 'Set brightness' command
"""
brightness = int(brightness)
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
brightness &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x03, brightness, 0x00, 0x00, 0x00])
@staticmethod
def __getSetSaturationCmd(saturation):
"""Give 'Set saturation' command
Keyword arguments:
saturation -- (int) Saturation percentage between 0 and 100
return: (bytearray) 'Set saturation' command
"""
saturation = int(saturation)
if saturation < 0:
saturation = 0
elif saturation > 100:
saturation = 100
saturation &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x02, saturation, 0x00, 0x00, 0x00])
@staticmethod
def __getSetTemperatureCmd(temperature):
"""Give 'Set temperature' command
Keyword arguments:
temperature -- (int) Temperature percentage between 0 and 100
0% <=> Warm white (2700K)
100% <=> Cool white (6500K)
return: (bytearray) 'Set temperature' command
"""
temperature = int(temperature)
if temperature < 0:
temperature = 0
elif temperature > 100:
temperature = 100
temperature &= 0xFF
return bytearray([0x31, 0x00, 0x00, 0x08, 0x05, temperature, 0x00, 0x00, 0x00])
@staticmethod
def __calculateCheckSum(command, zoneId):
"""Calculate request checksum
Note: Request checksum is equal to SUM(all command bytes and of the zone number) & 0xFF
Keyword arguments:
command -- (bytearray) Command
zoneId -- (int) Zone ID
return: (int) Request checksum
"""
checkSum = 0
for byteCommand in command:
checkSum += byteCommand
checkSum += zoneId
return (checkSum & 0xFF)
@staticmethod
def __getStringFromUnicode(value):
try:
return ord(value)
except Exception:
return value
################################### INIT ####################################
def __init__(self):
"""Class must be initialized with setup()"""
self.close()
################################### SETUP ####################################
def close(self):
"""Close connection with Milight wifi bridge"""
self.__initialized = False
self.__sequence_number = 0
try:
self.__sock.shutdown(socket.SHUT_RDWR)
self.__sock.close()
logging.debug("Socket closed")
# If close before initialization, better handle attribute error
except AttributeError:
pass
def setup(self, ip, port=5987, timeout_sec=5.0):
"""Initialize the class (can be launched multiple time if setup changed or module crashed)
Keyword arguments:
ip -- (string) IP to communication with the Milight wifi bridge
port -- (int, optional) UDP port to communication with the Milight wifi bridge
timeout_sec -- (int, optional) Timeout in sec for Milight wifi bridge to answer commands
return: (bool) Milight wifi bridge initialized
"""
# Close potential previous Milight wifi bridge session
self.close()
# Create new milight wifi bridge session
try:
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.__ip = ip
self.__port = port
self.__sock.connect((self.__ip, self.__port))
self.__sock.settimeout(timeout_sec)
self.__initialized = True
logging.debug("UDP connection initialized with ip {} and port {}".format(str(ip), str(port)))
except (socket.error, socket.herror, socket.gaierror, socket.timeout) as err:
logging.error("Impossible to initialize the UDP connection with ip {} and port {}: {}".format(str(ip), str(port), str(err)))
return self.__initialized
######################### INTERNAL UTILITY FUNCTIONS #########################
def __startSession(self):
"""Send start session request and return start session information
return: (MilightWifiBridge.__START_SESSION_RESPONSE) Start session information containing response received,
mac address and session IDs
"""
# Send start session request
data_to_send = MilightWifiBridge.__START_SESSION_MSG
logging.debug("Sending frame '{}' to {}:{}".format(str(binascii.hexlify(data_to_send)),
str(self.__ip), str(self.__port)))
self.__sock.send(data_to_send)
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=False, mac="", sessionId1=-1, sessionId2=-1)
try:
# Receive start session response
data = self.__sock.recvfrom(1024)[0]
if len(data) == 22:
# Parse valid start session response
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=True,
mac=str("{}:{}:{}:{}:{}:{}".format(format(MilightWifiBridge.__getStringFromUnicode(data[7]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[8]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[9]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[10]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[11]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[12]), 'x'))),
sessionId1=int(MilightWifiBridge.__getStringFromUnicode(data[19])),
sessionId2=int(MilightWifiBridge.__getStringFromUnicode(data[20])))
logging.debug("Start session (mac address: {}, session ID 1: {}, session ID 2: {})"
.format(str(response.mac), str(response.sessionId1), str(response.sessionId2)))
else:
logging.warning("Invalid start session response size")
except socket.timeout:
logging.warning("Timed out for start session response")
return response
def __sendRequest(self, command, zoneId):
"""Send command to a specific zone and get response (ACK from the wifi bridge)
Keyword arguments:
command -- (bytearray) Command
zoneId -- (int) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = False
# Send request only if valid parameters
if len(bytearray(command)) == 9:
if int(zoneId) >= 0 and int(zoneId) <= 4:
startSessionResponse = self.__startSession()
if startSessionResponse.responseReceived:
# For each request, increment the sequence number (even if the session ID is regenerated)
# Sequence number must be between 0x01 and 0xFF
self.__sequence_number = (self.__sequence_number + 1) & 0xFF
if self.__sequence_number == 0:
self.__sequence_number = 1
# Prepare request frame to send
bytesToSend = bytearray([0x80, 0x00, 0x00, 0x00, 0x11, startSessionResponse.sessionId1,
startSessionResponse.sessionId2, 0x00, int(self.__sequence_number), 0x00])
bytesToSend += bytearray(command)
bytesToSend += bytearray([int(zoneId), 0x00])
bytesToSend += bytearray([int(MilightWifiBridge.__calculateCheckSum(bytearray(command), int(zoneId)))])
# Send request frame
logging.debug("Sending request with command '{}' with session ID 1 '{}', session ID 2 '{}' and sequence number '{}'"
.format(str(binascii.hexlify(command)), str(startSessionResponse.sessionId1),
str(startSessionResponse.sessionId2), str(self.__sequence_number)))
self.__sock.send(bytesToSend)
try:
# Receive response frame
data = self.__sock.recvfrom(64)[0]
if len(data) == 8:
if int(MilightWifiBridge.__getStringFromUnicode(data[6])) == self.__sequence_number:
returnValue = True
logging.debug("Received valid response for previously sent request")
else:
logging.warning("Invalid sequence number ack {} instead of {}".format(str(data[6]),
self.__sequence_number))
else:
logging.warning("Invalid response size {} instead of 8".format(str(len(data))))
except socket.timeout:
logging.warning("Timed out for response")
else:
logging.warning("Start session failed")
else:
logging.error("Invalid zone {} (must be between 0 and 4)".format(str(zoneId)))
else:
logging.error("Invalid command size {} instead of 9".format(str(len(bytearray(command)))))
return returnValue
######################### PUBLIC FUNCTIONS #########################
def turnOn(self, zoneId):
"""Request 'Light on' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__ON_CMD, zoneId)
logging.debug("Turn on zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOff(self, zoneId):
"""Request 'Light off' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__OFF_CMD, zoneId)
logging.debug("Turn off zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOnWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp on' to a zone
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_ON_CMD, 0x01)
logging.debug("Turn on wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def turnOffWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp off'
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_OFF_CMD, 0x01)
logging.debug("Turn off wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def setNightMode(self, zoneId):
"""Request 'Night mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__NIGHT_MODE_CMD, zoneId)
logging.debug("Set night mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteMode(self, zoneId):
"""Request 'White mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WHITE_MODE_CMD, zoneId)
logging.debug("Set white mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteModeBridgeLamp(self):
"""Request 'White mode' to the bridge lamp
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_WHITE_MODE_CMD, 0x01)
logging.debug("Set white mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def setDiscoMode(self, discoMode, zoneId):
"""Request 'Set disco mode' to a zone
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeCmd(discoMode), zoneId)
logging.debug("Set disco mode {} to zone {}: {}".format(str(discoMode), str(zoneId), str(returnValue)))
return returnValue
def setDiscoModeBridgeLamp(self, discoMode):
"""Request 'Set disco mode' to the bridge lamp
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeForBridgeLampCmd(discoMode), 0x01)
logging.debug("Set disco mode {} to wifi bridge: {}".format(str(discoMode), str(returnValue)))
return returnValue
def speedUpDiscoMode(self, zoneId):
"""Request 'Disco mode speed up' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SPEED_UP_CMD, zoneId)
logging.debug("Speed up disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def speedUpDiscoModeBridgeLamp(self):
"""Request 'Disco mode speed up' to the wifi bridge
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SPEED_UP_CMD, 0x01)
logging.debug("Speed up disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def slowDownDiscoMode(self, zoneId):
"""Request 'Disco mode slow down' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SLOW_DOWN_CMD, zoneId)
logging.debug("Slow down disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def slowDownDiscoModeBridgeLamp(self):
"""Request 'Disco mode slow down' to wifi bridge
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SLOW_DOWN_CMD, 0x01)
logging.debug("Slow down disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def link(self, zoneId):
"""Request 'Link' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__LINK_CMD, zoneId)
logging.debug("Link zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def unlink(self, zoneId):
"""Request 'Unlink' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__UNLINK_CMD, zoneId)
logging.debug("Unlink zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setColor(self, color, zoneId):
"""Request 'Set color' to a zone
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetColorCmd(color), zoneId)
logging.debug("Set color {} to zone {}: {}".format(str(color), str(zoneId), str(returnValue)))
return returnValue
def setColorBridgeLamp(self, color):
"""Request 'Set color' to wifi bridge
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBridgeLampColorCmd(color), 0x01)
logging.debug("Set color {} to wifi bridge: {}".format(str(color), str(returnValue)))
return returnValue
def setBrightness(self, brightness, zoneId):
"""Request 'Set brightness' to a zone
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessCmd(brightness), zoneId)
logging.debug("Set brightness {}% to zone {}: {}".format(str(brightness), str(zoneId), str(returnValue)))
return returnValue
def setBrightnessBridgeLamp(self, brightness):
"""Request 'Set brightness' to the wifi bridge
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessForBridgeLampCmd(brightness), 0x01)
logging.debug("Set brightness {}% to the wifi bridge: {}".format(str(brightness), str(returnValue)))
return returnValue
def setSaturation(self, saturation, zoneId):
"""Request 'Set saturation' to a zone
Keyword arguments:
brightness -- (int) Saturation in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetSaturationCmd(saturation), zoneId)
logging.debug("Set saturation {}% to zone {}: {}".format(str(saturation), str(zoneId), str(returnValue)))
return returnValue
def setTemperature(self, temperature, zoneId):
"""Request 'Set temperature' to a zone
Keyword arguments:
brightness -- (int or MilightWifiBridge.eTemperature) Temperature in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetTemperatureCmd(temperature), zoneId)
logging.debug("Set temperature {}% ({} kelvin) to zone {}: {}"
.format(str(temperature), str(int(2700 + 38*temperature)), str(zoneId), str(returnValue)))
return returnValue
def getMacAddress(self):
"""Request the MAC address of the milight wifi bridge
return: (string) MAC address of the wifi bridge (empty if an error occured)
"""
returnValue = self.__startSession().mac
logging.debug("Get MAC address: {}".format(str(returnValue)))
return returnValue
################################# HELP FUNCTION ################################
def __help(func="", filename=__file__):
"""Show help on how to use command line milight wifi bridge functions
Keyword arguments:
func -- (string, optional) Command line function requiring help, none will show all function
filename -- (string, optional) File name of the python script implementing the commands
"""
func = func.lower()
# Help
if func in ("h", "help"):
print("Give information to use all or specific milight wifi bridge commands\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -h [command (default: none)]\r\n"
+filename+" --help [command (default: none)]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -h \r\n"
+filename+" -h turnOn \r\n"
+filename+" --help \r\n"
+filename+" --help link")
return
elif func == "":
print("HELP (-h, --help): Give information to use all or specific milight wifi bridge commands")
# Ip
if func in ("i", "ip"):
print("Specify milight wifi bridge IP (mandatory to use any command)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -i [ip]\r\n"
+filename+" --ip [ip]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -i 192.168.1.23\r\n"
+filename+" --ip 192.168.1.23\r\n")
return
elif func == "":
print("IP (-i, --ip): Specify milight wifi bridge IP (mandatory to use any command)")
# Port
if func in ("p", "port"):
print("Specify milight wifi bridge port\r\n"
+"\r\n"
+"Default value (if not called): 5987\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -port [port]\r\n"
+filename+" --port [port]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -p 1234\r\n"
+filename+" --port 1234\r\n")
return
elif func == "":
print("PORT (-p, --port): Specify milight wifi bridge port (default value: 5987)")
# Timeout
if func in ("t", "timeout"):
print("Specify timeout for communication with the wifi bridge (in sec)\r\n"
+"\r\n"
+"Default value (if not called): 5.0\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -t [timeout]\r\n"
+filename+" --timeout [timeout]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -t 1\r\n"
+filename+" --timeout 1\r\n")
return
elif func == "":
print("TIMEOUT (-t, --timeout): Specify timeout for communication with the wifi bridge in sec (default value: 5.0sec)")
# Zone
if func in ("z", "zone"):
print("Specify milight light zone to control\r\n"
+"\r\n"
+"Default value (if not called): 0\r\n"
+"\r\n"
+"Possible values: 0 for all zone or zone 1 to 4\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -z [zone]\r\n"
+filename+" --zone [zone]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -z 1\r\n"
+filename+" --zone 1\r\n")
return
elif func == "":
print("ZONE (-z, --zone): Specify milight light zone to control (default value: All zone)")
# Get MAC address
if func in ("m", "getmacaddress"):
print("Get the milight wifi bridge mac address\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -m\r\n"
+filename+" --ip 192.168.1.23 --getMacAddress\r\n")
return
elif func == "":
print("GET MAC ADDRESS (-m, --getMacAddress): Get the milight wifi bridge mac address")
# Link
if func in ("l", "link"):
print("Link lights to a specific zone\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -l\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --link\r\n")
return
elif func == "":
print("LINK (-l, --link): Link lights to a specific zone")
# Unlink
if func in ("u", "unlink"):
print("Unlink lights\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -u\r\n"
+filename+" --ip 192.168.1.23 --unlink\r\n")
return
elif func == "":
print("UNLINK (-u, --unlink): Unlink lights")
# Turn lights ON
if func in ("o", "turnon"):
print("Turn lights on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -o\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOn\r\n")
return
elif func == "":
print("TURN ON (-o, --turnOn): Turn lights on")
# Turn lights OFF
if func in ("f", "turnoff"):
print("Turn lights off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -f\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOff\r\n")
return
elif func == "":
print("TURN OFF (-o, --turnOff): Turn lights off")
# Turn wifi bridge lamp ON
if func in ("x", "turnonwifibridgelamp"):
print("Turn wifi bridge lamp on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -x\r\n"
+filename+" --ip 192.168.1.23 --turnOnWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP ON (-x, --turnOnWifiBridgeLamp): Turn wifi bridge lamp on")
# Turn wifi bridge lamp OFF
if func in ("y", "turnoffwifibridgelamp"):
print("Turn wifi bridge lamp off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -y\r\n"
+filename+" --ip 192.168.1.23 --turnOffWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP OFF (-y, --turnOffWifiBridgeLamp): Turn wifi bridge lamp off")
# Set night mode
if func in ("n", "setnightmode"):
print("Set night mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -n\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setNightMode\r\n")
return
elif func == "":
print("SET NIGHT MODE (-n, --setNightMode): Set night mode")
# Set white mode
if func in ("w", "setwhitemode"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -w\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setWhiteMode\r\n")
return
elif func == "":
print("SET WHITE MODE (-w, --setWhiteMode): Set white mode")
# Set white mode for bridge lamp
if func in ("j", "setwhitemodebridgelamp"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -j\r\n"
+filename+" --ip 192.168.1.23 --setWhiteModeBridgeLamp\r\n")
return
elif func == "":
print("SET WHITE MODE ON BRIDGE LAMP (-j, --setWhiteModeBridgeLamp): Set white mode on bridge lamp")
# Speed up disco mode for bridge lamp
if func in ("k", "speedupdiscomodebridgelamp"):
print("Speed up disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -k\r\n"
+filename+" --ip 192.168.1.23 --speedUpDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE FOR BRIDGE LAMP (-k, --speedUpDiscoModeBridgeLamp): Speed up disco mode for bridge lamp")
# Slow down disco mode for bridge lamp
if func in ("q", "slowdowndiscomodebridgelamp"):
print("Slow down disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -q\r\n"
+filename+" --ip 192.168.1.23 --slowDownDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE FOR BRIDGE LAMP (-q, --slowDownDiscoModeBridgeLamp): Slow down disco mode for bridge lamp")
# Speed up disco mode
if func in ("a", "speedupdiscomode"):
print("Speed up disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -a\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --speedUpDiscoMode\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE (-a, --speedUpDiscoMode): Speed up disco mode")
# Slow down disco mode
if func in ("g", "slowdowndiscomode"):
print("Slow down disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -g\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --slowDownDiscoMode\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE (-g, --slowDownDiscoMode): Slow down disco mode")
# Set specific color
if func in ("c", "setcolor"):
print("Set specific color (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -c 255\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setColor 255\r\n")
return
elif func == "":
print("SET COLOR (-c, --setColor): Set specific color (between 0 and 255)")
# Set brightness
if func in ("b", "setbrightness"):
print("Set brightness (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -b 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setBrightness 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS (-b, --setBrightness): Set brightness (in %)")
# Set specific color for bridge lamp
if func in ("r", "setcolorbridgelamp"):
print("Set specific color for the bridge lamp (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -r 255\r\n"
+filename+" --ip 192.168.1.23 --setColorBridgeLamp 255\r\n")
return
elif func == "":
print("SET COLOR FOR THE BRIDGE LAMP (-r, --setColorBridgeLamp): Set specific color for the bridge lamp (between 0 and 255)")
# Set brightness for bridge lamp
if func in ("v", "setbrightnessbridgelamp"):
print("Set brightness for the bridge lamp (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -v 50\r\n"
+filename+" --ip 192.168.1.23 --setBrightnessBridgeLamp 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS FOR THE BRIDGE LAMP (-v, --setBrightnessBridgeLamp): Set brightness for the bridge lamp(in %)")
# Set saturation
if func in ("s", "setsaturation"):
print("Set saturation (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -s 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setSaturation 50\r\n")
return
elif func == "":
print("SET SATURATION (-s, --setSaturation): Set saturation (in %)")
# Set temperature
if func in ("s", "settemperature"):
print("Set temperature (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -e 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setTemperature 50\r\n")
return
elif func == "":
print("SET TEMPERATURE (-e, --setTemperature): Set temperature (in %)")
# Set disco mode
if func in ("d", "setdiscomode"):
print("Set disco mode (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -d 5\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setDiscoMode 5\r\n")
return
elif func == "":
print("SET DISCO MODE (-d, --setDiscoMode): Set disco mode (between 1 and 9)")
# Set disco mode for bridge lamp
if func in ("d", "setdiscomodebridgelamp"):
print("Set disco mode for bridge lamp (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -1 5\r\n"
+filename+" --ip 192.168.1.23 --setDiscoModeBridgeLamp 5\r\n")
return
elif func == "":
print("SET DISCO MODE FOR BRIDGE LAMP (-1, --setDiscoModeBridgeLamp): Set disco mode for bridge lamp (between 1 and 9)")
# Add use case examples:
if func == "":
print("\r\n"
+"Some examples (if ip '192.168.1.23', port is 5987):\r\n"
+" - Get the mac address: "+filename+" --ip 192.168.1.23 --port 5987 --getMacAddress\r\n"
+" - Set disco mode 5 in light zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --setDiscoMode 5\r\n"
+" - Light on zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn\r\n"
+" - Light off zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOff\r\n"
+" - Light on and set with light in zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn --setWhiteMode\r\n"
+" - Light on all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOn\r\n"
+" - Light off all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOff")
################################# MAIN FUNCTION ###############################
def main(parsed_args = sys.argv[1:]):
"""Shell Milight utility function"""
# Set the log level (no log will be shown if "logging.CRITICAL" is used)
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL) #Other parameters: logging.DEBUG, logging.WARNING, logging.ERROR
ip = "" # No default IP, must be specified by the user
port = 5987 # Default milight 3.0 port
zone = 0 # By default, all zone are controlled
timeout = 5.0 # By default, Wait maximum 5sec
# Get options
try:
opts, args = getopt.getopt(parsed_args, "i:p:t:z:hmluofx23ynwagc:b:s:e:d:jkqr:v:1:",
["ip=", "port=", "timeout=", "zone=", "help", "debug", "nodebug",
"getMacAddress", "link", "unlink", "turnOn", "turnOff", "turnOnWifiBridgeLamp",
"turnOffWifiBridgeLamp", "setNightMode", "setWhiteMode", "speedUpDiscoMode", "slowDownDiscoMode",
"setColor=", "setBrightness=", "setSaturation=", "setTemperature=", "setDiscoMode=",
"setWhiteModeBridgeLamp", "speedUpDiscoModeBridgeLamp", "slowDownDiscoModeBridgeLamp",
"setColorBridgeLamp=", "setBrightnessBridgeLamp=", "setDiscoModeBridgeLamp="])
except getopt.GetoptError as err:
print("[ERROR] "+str(err))
__help()
sys.exit(1)
# Show help (if requested)
for o, a in opts:
if o in ("-h", "--help"):
if len(args) >= 1:
__help(args[0])
else:
__help()
sys.exit(0)
elif o in ("-l", "--debug"):
print("Debugging...")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
elif o in ("-z", "--nodebug"):
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Get base parameters
for o, a in opts:
if o in ("-i", "--ip"):
ip = str(a)
continue
if o in ("-p", "--port"):
port = int(a)
continue
if o in ("-t", "--timeout"):
timeout = int(a)
continue
if o in ("-z", "--zone"):
zone = int(a)
continue
# Check base parameters
if ip == "":
print("[ERROR] You need to specify the ip...\r\n")
__help("ip")
sys.exit(1)
if zone < 0 or zone > 4:
print("[ERROR] You need to specify a valid zone ID (between 0 and 4)\r\n")
__help("zone")
sys.exit(1)
if timeout <= 0:
print("[ERROR] You need to specify a valid timeout (more than 0sec)\r\n")
__help("timeout")
sys.exit(1)
if port <= 0:
print("[ERROR] You need to specify a valid port (more than 0)\r\n")
__help("port")
sys.exit(1)
# Show base parameters
print("Ip: "+str(ip))
print("Zone: "+str(zone))
print("Timeout: "+str(timeout))
print("Port: "+str(port))
# Initialize Milight bridge
milight = MilightWifiBridge()
milight.close()
is_init = milight.setup(ip, port, timeout)
logging.debug("Milight bridge connection initialized with ip {}:{} : {}".format(ip, port, is_init))
if (not is_init):
print("[ERROR] Initialization failed, re-check the ip (and the port), use '-h' to get more information.")
sys.exit(2)
# Execute requested commands in the requested order
returnValue = True
atLeastOneRequestDone = False
for o, a in opts:
if o in ("-m", "--getMacAddress"):
atLeastOneRequestDone = True
macAddress = milight.getMacAddress()
returnValue &= (macAddress != "")
if macAddress != "":
print("Mac address: "+str(macAddress))
else:
print("Failed to get mac address")
elif o in ("-l", "--link"):
atLeastOneRequestDone = True
res = milight.link(zoneId=zone)
returnValue &= res
print("Link zone "+str(zone)+": "+str(res))
elif o in ("-u", "--unlink"):
atLeastOneRequestDone = True
res = milight.unlink(zoneId=zone)
returnValue &= res
print("Unlink zone "+str(zone)+": "+str(res))
elif o in ("-o", "--turnOn"):
atLeastOneRequestDone = True
res = milight.turnOn(zoneId=zone)
returnValue &= res
print("Turn on zone "+str(zone)+": "+str(res))
elif o in ("-f", "--turnOff"):
atLeastOneRequestDone = True
res = milight.turnOff(zoneId=zone)
returnValue &= res
print("Turn off zone "+str(zone)+": "+str(res))
elif o in ("-x", "--turnOnWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOnWifiBridgeLamp()
returnValue &= res
print("Turn on wifi bridge lamp: "+str(res))
elif o in ("-y", "--turnOffWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOffWifiBridgeLamp()
returnValue &= res
print("Turn off wifi bridge lamp: "+str(res))
elif o in ("-j", "--setWhiteModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.setWhiteModeBridgeLamp()
returnValue &= res
print("Set white mode to wifi bridge: "+str(res))
elif o in ("-k", "--speedUpDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoModeBridgeLamp()
returnValue &= res
print("Speed up disco mode to wifi bridge: "+str(res))
elif o in ("-q", "--slowDownDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoModeBridgeLamp()
returnValue &= res
print("Slow down disco mode to wifi bridge: "+str(res))
elif o in ("-r", "--setColorBridgeLamp"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColorBridgeLamp(color=userColor)
returnValue &= res
print("Set color "+str(userColor)+" to wifi bridge: "+str(res))
elif o in ("-v", "--setBrightnessBridgeLamp"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightnessBridgeLamp(brightness=userBrightness)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to the wifi bridge: "+str(res))
elif o in ("-1", "--setDiscoModeBridgeLamp"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoModeBridgeLamp(discoMode=mode)
returnValue &= res
print("Set disco mode "+str(mode)+" to wifi bridge: "+str(res))
elif o in ("-n", "--setNightMode"):
atLeastOneRequestDone = True
res = milight.setNightMode(zoneId=zone)
returnValue &= res
print("Set night mode to zone "+str(zone)+": "+str(res))
elif o in ("-w", "--setWhiteMode"):
atLeastOneRequestDone = True
res = milight.setWhiteMode(zoneId=zone)
returnValue &= res
print("Set white mode to zone "+str(zone)+": "+str(res))
elif o in ("-a", "--speedUpDiscoMode"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoMode(zoneId=zone)
returnValue &= res
print("Speed up disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-g", "--slowDownDiscoMode"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoMode(zoneId=zone)
returnValue &= res
print("Slow down disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-d", "--setDiscoMode"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoMode(discoMode=mode, zoneId=zone)
returnValue &= res
print("Set disco mode "+str(mode)+" to zone "+str(zone)+": "+str(res))
elif o in ("-c", "--setColor"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColor(color=userColor, zoneId=zone)
returnValue &= res
print("Set color "+str(userColor)+" to zone "+str(zone)+": "+str(res))
elif o in ("-b", "--setBrightness"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightness(brightness=userBrightness, zoneId=zone)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-s", "--setSaturation"):
userSaturation = int(a)
if userSaturation < 0 or userSaturation > 100:
print("[ERROR] Saturation must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setSaturation(saturation=userSaturation, zoneId=zone)
returnValue &= res
print("Set saturation "+str(userSaturation)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-e", "--setTemperature"):
userTemperature = int(a)
if userTemperature < 0 or userTemperature > 100:
print("[ERROR] Temperature must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setTemperature(temperature=userTemperature, zoneId=zone)
returnValue &= res
print("Set temperature "+str(userTemperature)+"% to zone "+str(zone)+": "+str(res))
# In case an error occured in any of the request, stop the program
if not returnValue:
break
if not atLeastOneRequestDone:
print("[ERROR] You must call one action, use '-h' to get more information.")
sys.exit(1)
if not returnValue:
print("[ERROR] Request failed")
sys.exit(1)
if atLeastOneRequestDone and returnValue:
sys.exit(0)
if __name__ == '__main__':
main()
| 37.090703
| 163
| 0.610646
| 24,806
| 0.505512
| 0
| 0
| 4,706
| 0.095902
| 0
| 0
| 23,467
| 0.478225
|
02c2fced0a87e7a137014b222d4278f9278017f9
| 2,602
|
py
|
Python
|
Homework/HW5/Test.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
Homework/HW5/Test.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
Homework/HW5/Test.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
from Base import Train, Predict
def getTest(boolNormalize, boolDeep, boolBias, strProjectFolder):
if boolNormalize:
if boolDeep:
strOutputPath = "02-Output/" + "Deep" + "Normal"
else:
if boolBias:
strOutputPath = "02-Output/" + "Bias" + "Normal"
else:
strOutputPath = "02-Output/" + "unBias" + "Normal"
else:
if boolDeep:
strOutputPath = "02-Output/" + "Deep"
else:
if boolBias:
strOutputPath = "02-Output/" + "Bias"
else:
strOutputPath = "02-Output/" + "unBias"
strOutputPath = strOutputPath + "Test"
DataTrain = pd.read_csv(os.path.join(strProjectFolder, "01-Data/Train.csv"))
DataTest = pd.read_csv(os.path.join(strProjectFolder, "01-Data/Test.csv"))
submisson = pd.read_csv(os.path.join(strProjectFolder, "01-Data/SampleSubmisson.csv"))
DataTrain = DataTrain.sample(frac=1)
intUserSize = len(DataTrain["UserID"].drop_duplicates())
intMovieSize = len(DataTrain["MovieID"].drop_duplicates())
arrayUsers = DataTrain["UserID"].values
arrayMovies = DataTrain["MovieID"].values
arrayRate = DataTrain["Rating"].values
arrayTestUsers = DataTest["UserID"].values
arrayTestMovies = DataTest["MovieID"].values
intLatentSize = 32
if boolNormalize:
arrayRateAvg = np.mean(arrayRate)
arrayRateStd = np.std(arrayRate)
arrayRate = (arrayRate - arrayRateAvg)/arrayRateStd
Train.getTrain(arrayTrainUser=arrayUsers, arrayTrainMovie=arrayMovies, arrayTrainRate=arrayRate
, arrayValidUser=arrayUsers, arrayValidMovie=arrayMovies, arrayValidRate=arrayRate
, intUserSize=intUserSize
, intMovieSize=intMovieSize
, intLatentSize=intLatentSize
, boolBias=boolBias
, boolDeep=boolDeep
, strProjectFolder=strProjectFolder, strOutputPath=strOutputPath)
arrayPredict = Predict.makePredict(arrayTestUsers, arrayTestMovies, strProjectFolder, strOutputPath)
if boolNormalize:
arrayPredict = (arrayPredict * arrayRateStd) + arrayRateAvg
submisson["Rating"] = pd.DataFrame(arrayPredict)
submisson.to_csv(os.path.join(strProjectFolder, strOutputPath + "submission.csv"), index=False)
if __name__ == "__main__":
strProjectFolder = os.path.dirname(__file__)
getTest(boolNormalize=True, boolDeep=False, boolBias=True, strProjectFolder=strProjectFolder)
| 35.643836
| 104
| 0.65834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 301
| 0.11568
|