hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ff06848217ed3681bc4876e8ad79a90cb96f248 | 1,278 | py | Python | bot.py | Gagan-10/pyro_frwd_bot | e8b56cbf6284dd40800db2aec0bcc9f6d9419f15 | [
"Apache-2.0"
] | null | null | null | bot.py | Gagan-10/pyro_frwd_bot | e8b56cbf6284dd40800db2aec0bcc9f6d9419f15 | [
"Apache-2.0"
] | null | null | null | bot.py | Gagan-10/pyro_frwd_bot | e8b56cbf6284dd40800db2aec0bcc9f6d9419f15 | [
"Apache-2.0"
] | 1 | 2021-07-04T04:33:22.000Z | 2021-07-04T04:33:22.000Z | import logging
import asyncio
import os
from pyrogram import Client, filters
logging.basicConfig(
format='%(levelname)5s - %(name)s - %(message)s',
level=0
)
LOGGER = logging.getLogger("root")
LOGGER.setLevel(logging._nameToLevel[os.environ.get("log_level", "NOTSET").upper()])
string_session = os.environ.get("string_session")
api_id = os.environ.get("api_id")
api_hash = os.environ.get("api_hash")
group_a = int(os.environ.get("group_a"))
group_b = int(os.environ.get("group_b"))
password = os.environ.get("password", None)
client = Client(
string_session,
int(api_id),
api_hash,
password=password
)
basic_filters = filters.group & ~filters.edited & ~filters.service
@client.on_message(filters.chat(group_a) & basic_filters)
@client.on_message(filters.chat(group_b) & basic_filters)
if __name__ == "__main__":
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.get_event_loop()
loop.run_until_complete(client.run())
| 25.56 | 89 | 0.731612 | import logging
import asyncio
import os
from pyrogram import Client, filters
logging.basicConfig(
format='%(levelname)5s - %(name)s - %(message)s',
level=0
)
LOGGER = logging.getLogger("root")
LOGGER.setLevel(logging._nameToLevel[os.environ.get("log_level", "NOTSET").upper()])
string_session = os.environ.get("string_session")
api_id = os.environ.get("api_id")
api_hash = os.environ.get("api_hash")
group_a = int(os.environ.get("group_a"))
group_b = int(os.environ.get("group_b"))
password = os.environ.get("password", None)
client = Client(
string_session,
int(api_id),
api_hash,
password=password
)
basic_filters = filters.group & ~filters.edited & ~filters.service
@client.on_message(filters.chat(group_a) & basic_filters)
async def group_a_to_group_b(client, event):
await client.forward_messages(group_b, event.chat.id, event.message_id, as_copy=True)
@client.on_message(filters.chat(group_b) & basic_filters)
async def group_b_to_group_a(client, event):
await client.forward_messages(group_a, event.chat.id, event.message_id, as_copy=True)
if __name__ == "__main__":
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.get_event_loop()
loop.run_until_complete(client.run())
| 226 | 0 | 44 |
470758ee21ff2a22cbd1ecbce083c86c68da23c1 | 1,374 | py | Python | octavia/tests/unit/image/drivers/noop_driver/test_driver.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 129 | 2015-06-23T08:06:23.000Z | 2022-03-31T12:38:20.000Z | octavia/tests/unit/image/drivers/noop_driver/test_driver.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 6 | 2016-05-20T11:05:27.000Z | 2021-03-23T06:05:52.000Z | octavia/tests/unit/image/drivers/noop_driver/test_driver.py | zhangi/octavia | e68c851fecf55e1b5ffe7d5b849f729626af28a3 | [
"Apache-2.0"
] | 166 | 2015-07-15T16:24:05.000Z | 2022-03-02T20:54:36.000Z | # Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from octavia.image.drivers.noop_driver import driver
import octavia.tests.unit.base as base
CONF = cfg.CONF
| 34.35 | 75 | 0.706696 | # Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from octavia.image.drivers.noop_driver import driver
import octavia.tests.unit.base as base
CONF = cfg.CONF
class TestNoopImageDriver(base.TestCase):
def setUp(self):
super(TestNoopImageDriver, self).setUp()
self.driver = driver.NoopImageDriver()
def test_get_image_id_by_tag(self):
image_tag = 'amphora'
image_owner = uuidutils.generate_uuid()
image_id = self.driver.get_image_id_by_tag(image_tag, image_owner)
self.assertEqual((image_tag, image_owner, 'get_image_id_by_tag'),
self.driver.driver.imageconfig[(
image_tag, image_owner
)])
self.assertEqual(1, image_id)
| 509 | 20 | 77 |
6dfa6921805aafb1c080c1db6f47b6e22b6e52d6 | 7,364 | py | Python | Scripts/RemoveVerticalOffset.py | broxtopd/SFM-Processing | 2d6f4fc5748d0db212842a006d4ccce3ae0e84dc | [
"MIT"
] | null | null | null | Scripts/RemoveVerticalOffset.py | broxtopd/SFM-Processing | 2d6f4fc5748d0db212842a006d4ccce3ae0e84dc | [
"MIT"
] | null | null | null | Scripts/RemoveVerticalOffset.py | broxtopd/SFM-Processing | 2d6f4fc5748d0db212842a006d4ccce3ae0e84dc | [
"MIT"
] | null | null | null | import sys, os, shutil
import subprocess
from osgeo import gdal
from osgeo.gdalconst import *
import tempfile
import numpy as np
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
from GeoRefPars import GeoRefPars
# Removes vertical offset for an for a ground point cloud with uncertain georeferncing by comparing its elevation with
# that from reference ground point cloud (requires that the clouds match up reasonably closely in the horizontal)
#
# Usage: RemoveVerticalOffset.py <options> <Input Cloud> <Reference Cloud> <Suffix>
#
# Input Cloud: Path to input ground point cloud
# Refernce Cloud: Path to the reference ground point cloud
# Suffix: suffix to be added to the outputted las file (Warning, if set to "None", will overwrite the input file!)
# Options:
# -a, --additional_clouds: Specfies additional clouds (separated by commas) to perform the same adjustment for (for example,
# to adjust a point cloud containing canopy points using the same adjustment that is applied to the ground point cloud)
#
# Note that in addition to the dependencies listed above, this code assumes that the gdal and Fusion command line tools are
# installed and are accessable via the command line (e.g. on the system path), as this script makes subprocess calls to them
#
# Created by Patrick Broxton
# Updated 6/30/2020
# Read the georeferencing information and fusion parameters
(crs, fusion_parameters, cellsize) = GeoRefPars()
# Optional parameters
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = 'Usage: %prog [options] input_file(s) [output]'
p = OptionParser(usage)
p.add_option('-a', '--additional_clouds', dest='additional_clouds', help='Additional clouds to apply the correction to')
return p
if __name__ == '__main__':
# Parse the command line arguments
argv = gdal.GeneralCmdLineProcessor( sys.argv )
parser = optparse_init()
options,args = parser.parse_args(args=argv[1:])
incloud_ground = args[0] # Input ground point cloud
ref_cloud_ground = args[1] # Reference ground surface file
out_suffix = args[2] # Output file suffix (for saved files)
additional_clouds = options.additional_clouds
# Check for the existance of the input and reference clouds
path_errors = False
if not os.path.exists(incloud_ground):
print('Error: ' + incloud_ground + ' does not exist!')
path_errors = True
if not os.path.exists(ref_cloud_ground):
print('Error: ' + ref_cloud_ground + ' does not exist!')
path_errors = True
if path_errors == True:
sys.exit()
# Create a temporary working directory`
working_dir = tempfile.mktemp()
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# Create Surface files for the SFM ground point cloud in the temporary directory (using Fusion's GridSurfaceCreate program)
cmd = 'GridSurfaceCreate "' + working_dir + '/surf_sfm.dtm" ' + str(cellsize) + ' ' + fusion_parameters + ' "' + incloud_ground + '"'
print(cmd)
subprocess.call(cmd, shell=True)
# Convert from dtm format to asc format (so it can be read by gdal)
cmd = 'DTM2ASCII "' + working_dir + '/surf_sfm.dtm" "' + working_dir + '/surf_sfm.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Assign georeferencing information using a gdal virtual raster layer
cmd = 'gdalbuildvrt -a_srs "EPSG:' + str(crs) + '" "' + working_dir + '/surf_sfm.vrt" "' + working_dir + '/surf_sfm.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Open the dataset
inDs = gdal.Open(working_dir + '/surf_sfm.vrt')
if inDs is None:
print('Could not open ' + working_dir + '/surf_sfm.vrt')
sys.exit(1)
# Get raster characteristics
width = inDs.RasterXSize
height = inDs.RasterYSize
gt = inDs.GetGeoTransform()
ulx = gt[0]
lry = gt[3] + width*gt[4] + height*gt[5]
lrx = gt[0] + width*gt[1] + height*gt[2]
uly = gt[3]
dx = gt[1]
dy = -gt[5]
# Read the ground surface file
band = inDs.GetRasterBand(inDs.RasterCount)
sfm_z = band.ReadAsArray()
sfm_z[sfm_z == band.GetNoDataValue()] = np.nan
inDs = None
# Create Surface files for the reference point cloud in the temporary directory (using Fusion's GridSurfaceCreate program)
cmd = 'GridSurfaceCreate "' + working_dir + '/surf_lidar.dtm" ' + str(cellsize) + ' ' + fusion_parameters + ' "' + ref_cloud_ground + '"'
print(cmd)
subprocess.call(cmd, shell=True)
# Convert from dtm format to asc format (so it can be read by gdal)
cmd = 'DTM2ASCII "' + working_dir + '/surf_lidar.dtm" "' + working_dir + '/surf_lidar.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Assign georeferencing information using a gdal virtual raster layer
te_str = str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
cmd = 'gdalbuildvrt -te ' + te_str + ' -a_srs "EPSG:' + str(crs) + '" "' + working_dir + '/surf_lidar.vrt" "' + working_dir + '/surf_lidar.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Open the dataset
inDs2 = gdal.Open(working_dir + '/surf_lidar.vrt')
if inDs2 is None:
print('Could not open ' + working_dir + '/surf_lidar.vrt')
sys.exit(1)
# Read the reference cloud
band = inDs2.GetRasterBand(inDs2.RasterCount)
lidar_z = band.ReadAsArray()
lidar_z[lidar_z == band.GetNoDataValue()] = np.nan
inDs2 = None
# Figure out the average difference
diff = sfm_z - lidar_z
vcorr = np.nanmean(diff)
# Name the output file (depending on whether a suffix to be added)
if out_suffix == 'None':
outcloud = incloud_ground[:-4] + '.laz'
else:
outcloud = incloud_ground[:-4] + '_' + out_suffix + '.laz'
# Apply the vertical offset to the input point cloud (using Fusion's clipdata program)
cmd = 'clipdata /height /biaselev:' + str(-vcorr) + ' "' + incloud_ground + '" "' + outcloud + '" ' + str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
print(cmd)
subprocess.call(cmd, shell=True)
# Apply the same vertical offset to any additional point clouds
if additional_clouds != None:
AdditionalClouds = additional_clouds.split(',')
for incloud in AdditionalClouds:
# If necissary, apply a suffix to these additional clouds
if out_suffix == 'None':
outcloud = incloud[:-4] + '.laz'
else:
outcloud = incloud[:-4] + '_' + out_suffix + '.laz'
# Apply the vertical offset to each additional cloud
cmd = 'clipdata /height /biaselev:' + str(-vcorr) + ' "' + incloud + '" "' + outcloud + '" ' + str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
print(cmd)
subprocess.call(cmd, shell=True)
# Remove the temporary directory (including any orphaned files)
if os.path.exists(working_dir):
shutil.rmtree(working_dir) | 44.095808 | 168 | 0.63566 | import sys, os, shutil
import subprocess
from osgeo import gdal
from osgeo.gdalconst import *
import tempfile
import numpy as np
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
from GeoRefPars import GeoRefPars
# Removes vertical offset for an for a ground point cloud with uncertain georeferncing by comparing its elevation with
# that from reference ground point cloud (requires that the clouds match up reasonably closely in the horizontal)
#
# Usage: RemoveVerticalOffset.py <options> <Input Cloud> <Reference Cloud> <Suffix>
#
# Input Cloud: Path to input ground point cloud
# Refernce Cloud: Path to the reference ground point cloud
# Suffix: suffix to be added to the outputted las file (Warning, if set to "None", will overwrite the input file!)
# Options:
# -a, --additional_clouds: Specfies additional clouds (separated by commas) to perform the same adjustment for (for example,
# to adjust a point cloud containing canopy points using the same adjustment that is applied to the ground point cloud)
#
# Note that in addition to the dependencies listed above, this code assumes that the gdal and Fusion command line tools are
# installed and are accessable via the command line (e.g. on the system path), as this script makes subprocess calls to them
#
# Created by Patrick Broxton
# Updated 6/30/2020
# Read the georeferencing information and fusion parameters
(crs, fusion_parameters, cellsize) = GeoRefPars()
# Optional parameters
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = 'Usage: %prog [options] input_file(s) [output]'
p = OptionParser(usage)
p.add_option('-a', '--additional_clouds', dest='additional_clouds', help='Additional clouds to apply the correction to')
return p
if __name__ == '__main__':
# Parse the command line arguments
argv = gdal.GeneralCmdLineProcessor( sys.argv )
parser = optparse_init()
options,args = parser.parse_args(args=argv[1:])
incloud_ground = args[0] # Input ground point cloud
ref_cloud_ground = args[1] # Reference ground surface file
out_suffix = args[2] # Output file suffix (for saved files)
additional_clouds = options.additional_clouds
# Check for the existance of the input and reference clouds
path_errors = False
if not os.path.exists(incloud_ground):
print('Error: ' + incloud_ground + ' does not exist!')
path_errors = True
if not os.path.exists(ref_cloud_ground):
print('Error: ' + ref_cloud_ground + ' does not exist!')
path_errors = True
if path_errors == True:
sys.exit()
# Create a temporary working directory`
working_dir = tempfile.mktemp()
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# Create Surface files for the SFM ground point cloud in the temporary directory (using Fusion's GridSurfaceCreate program)
cmd = 'GridSurfaceCreate "' + working_dir + '/surf_sfm.dtm" ' + str(cellsize) + ' ' + fusion_parameters + ' "' + incloud_ground + '"'
print(cmd)
subprocess.call(cmd, shell=True)
# Convert from dtm format to asc format (so it can be read by gdal)
cmd = 'DTM2ASCII "' + working_dir + '/surf_sfm.dtm" "' + working_dir + '/surf_sfm.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Assign georeferencing information using a gdal virtual raster layer
cmd = 'gdalbuildvrt -a_srs "EPSG:' + str(crs) + '" "' + working_dir + '/surf_sfm.vrt" "' + working_dir + '/surf_sfm.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Open the dataset
inDs = gdal.Open(working_dir + '/surf_sfm.vrt')
if inDs is None:
print('Could not open ' + working_dir + '/surf_sfm.vrt')
sys.exit(1)
# Get raster characteristics
width = inDs.RasterXSize
height = inDs.RasterYSize
gt = inDs.GetGeoTransform()
ulx = gt[0]
lry = gt[3] + width*gt[4] + height*gt[5]
lrx = gt[0] + width*gt[1] + height*gt[2]
uly = gt[3]
dx = gt[1]
dy = -gt[5]
# Read the ground surface file
band = inDs.GetRasterBand(inDs.RasterCount)
sfm_z = band.ReadAsArray()
sfm_z[sfm_z == band.GetNoDataValue()] = np.nan
inDs = None
# Create Surface files for the reference point cloud in the temporary directory (using Fusion's GridSurfaceCreate program)
cmd = 'GridSurfaceCreate "' + working_dir + '/surf_lidar.dtm" ' + str(cellsize) + ' ' + fusion_parameters + ' "' + ref_cloud_ground + '"'
print(cmd)
subprocess.call(cmd, shell=True)
# Convert from dtm format to asc format (so it can be read by gdal)
cmd = 'DTM2ASCII "' + working_dir + '/surf_lidar.dtm" "' + working_dir + '/surf_lidar.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Assign georeferencing information using a gdal virtual raster layer
te_str = str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
cmd = 'gdalbuildvrt -te ' + te_str + ' -a_srs "EPSG:' + str(crs) + '" "' + working_dir + '/surf_lidar.vrt" "' + working_dir + '/surf_lidar.asc"'
print(cmd)
subprocess.call(cmd, shell=True)
# Open the dataset
inDs2 = gdal.Open(working_dir + '/surf_lidar.vrt')
if inDs2 is None:
print('Could not open ' + working_dir + '/surf_lidar.vrt')
sys.exit(1)
# Read the reference cloud
band = inDs2.GetRasterBand(inDs2.RasterCount)
lidar_z = band.ReadAsArray()
lidar_z[lidar_z == band.GetNoDataValue()] = np.nan
inDs2 = None
# Figure out the average difference
diff = sfm_z - lidar_z
vcorr = np.nanmean(diff)
# Name the output file (depending on whether a suffix to be added)
if out_suffix == 'None':
outcloud = incloud_ground[:-4] + '.laz'
else:
outcloud = incloud_ground[:-4] + '_' + out_suffix + '.laz'
# Apply the vertical offset to the input point cloud (using Fusion's clipdata program)
cmd = 'clipdata /height /biaselev:' + str(-vcorr) + ' "' + incloud_ground + '" "' + outcloud + '" ' + str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
print(cmd)
subprocess.call(cmd, shell=True)
# Apply the same vertical offset to any additional point clouds
if additional_clouds != None:
AdditionalClouds = additional_clouds.split(',')
for incloud in AdditionalClouds:
# If necissary, apply a suffix to these additional clouds
if out_suffix == 'None':
outcloud = incloud[:-4] + '.laz'
else:
outcloud = incloud[:-4] + '_' + out_suffix + '.laz'
# Apply the vertical offset to each additional cloud
cmd = 'clipdata /height /biaselev:' + str(-vcorr) + ' "' + incloud + '" "' + outcloud + '" ' + str(ulx) + ' ' + str(lry) + ' ' + str(lrx) + ' ' + str(uly)
print(cmd)
subprocess.call(cmd, shell=True)
# Remove the temporary directory (including any orphaned files)
if os.path.exists(working_dir):
shutil.rmtree(working_dir) | 0 | 0 | 0 |
16d6b0504a61f587a159bffc9e40a94165c3807a | 13,916 | py | Python | tests/plugins/test_openid.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | tests/plugins/test_openid.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | tests/plugins/test_openid.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | import unittest
from unittest import mock
from kinto.core.testing import DummyRequest
from kinto.plugins.openid import OpenIDConnectPolicy
from kinto.plugins.openid.utils import fetch_openid_config
from .. import support
| 43.4875 | 99 | 0.659529 | import unittest
from unittest import mock
from kinto.core.testing import DummyRequest
from kinto.plugins.openid import OpenIDConnectPolicy
from kinto.plugins.openid.utils import fetch_openid_config
from .. import support
def get_openid_configuration(url):
base_url = url.replace("/.well-known/openid-configuration", "")
m = mock.Mock()
m.json.return_value = {
"issuer": "{base_url} issuer".format(base_url=base_url),
"authorization_endpoint": "{base_url}/authorize".format(base_url=base_url),
"userinfo_endpoint": "{base_url}/oauth/user".format(base_url=base_url),
"token_endpoint": "{base_url}/oauth/token".format(base_url=base_url),
}
return m
class OpenIDWebTest(support.BaseWebTest, unittest.TestCase):
@classmethod
def make_app(cls, *args, **kwargs):
with mock.patch("kinto.plugins.openid.requests.get") as get:
get.side_effect = get_openid_configuration
return super(OpenIDWebTest, cls).make_app(*args, **kwargs)
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
openid_policy = "kinto.plugins.openid.OpenIDConnectPolicy"
settings["includes"] = "kinto.plugins.openid"
settings["multiauth.policies"] = "auth0 google"
settings["multiauth.policy.auth0.use"] = openid_policy
settings["multiauth.policy.auth0.issuer"] = "https://auth.mozilla.auth0.com"
settings["multiauth.policy.auth0.client_id"] = "abc"
settings["multiauth.policy.auth0.client_secret"] = "xyz"
settings["multiauth.policy.google.use"] = openid_policy
settings["multiauth.policy.google.issuer"] = "https://accounts.google.com"
settings["multiauth.policy.google.client_id"] = "123"
settings["multiauth.policy.google.client_secret"] = "789"
settings["multiauth.policy.google.userid_field"] = "email"
return settings
def test_openid_multiple_providers(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
providers = capabilities["openid"]["providers"]
assert len(providers) == 2
class OpenIDWithoutPolicyTest(support.BaseWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings["includes"] = "kinto.plugins.openid"
return settings
def test_openid_capability_is_not_added(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
assert "openid" not in capabilities
class OpenIDOnePolicyTest(support.BaseWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
openid_policy = "kinto.plugins.openid.OpenIDConnectPolicy"
settings["includes"] = "kinto.plugins.openid"
settings["multiauth.policies"] = "google"
settings["multiauth.policy.auth0.use"] = openid_policy
settings["multiauth.policy.auth0.issuer"] = "https://auth.mozilla.auth0.com"
settings["multiauth.policy.auth0.client_id"] = "abc"
settings["multiauth.policy.auth0.client_secret"] = "xyz"
settings["multiauth.policy.google.use"] = openid_policy
settings["multiauth.policy.google.issuer"] = "https://accounts.google.com"
settings["multiauth.policy.google.client_id"] = "123"
settings["multiauth.policy.google.client_secret"] = "789"
settings["multiauth.policy.google.userid_field"] = "email"
return settings
def test_openid_one_provider(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
providers = capabilities["openid"]["providers"]
assert len(providers) == 1
class HelloViewTest(OpenIDWebTest):
def test_openid_capability_if_enabled(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
assert "openid" in capabilities
assert len(capabilities["openid"]["providers"]) == 2
assert "userinfo_endpoint" in capabilities["openid"]["providers"][0]
assert "auth_path" in capabilities["openid"]["providers"][0]
def test_openid_in_openapi(self):
resp = self.app.get("/__api__")
assert "auth0" in resp.json["securityDefinitions"]
auth = resp.json["securityDefinitions"]["auth0"]["authorizationUrl"]
assert auth == "https://auth.mozilla.auth0.com/authorize"
class PolicyTest(unittest.TestCase):
def setUp(self):
mocked = mock.patch("kinto.plugins.openid.requests.get")
self.mocked_get = mocked.start()
self.addCleanup(mocked.stop)
self.policy = OpenIDConnectPolicy(issuer="https://idp", client_id="abc")
self.request = DummyRequest()
self.request.registry.cache.get.return_value = None
mocked = mock.patch.object(self.policy, "_verify_token")
self.verify = mocked.start()
self.addCleanup(mocked.stop)
self.verify.return_value = {"sub": "userid"}
def test_returns_none_if_no_authorization(self):
assert self.policy.unauthenticated_userid(self.request) is None
def test_returns_header_type_in_forget(self):
h = self.policy.forget(self.request)
assert "Bearer " in h[0][1]
def test_header_type_can_be_configured(self):
self.policy.header_type = "bearer+oidc"
h = self.policy.forget(self.request)
assert "bearer+oidc " in h[0][1]
def test_returns_none_if_no_authorization_prefix(self):
self.request.headers["Authorization"] = "avrbnnbrbr"
assert self.policy.unauthenticated_userid(self.request) is None
def test_returns_none_if_bad_prefix(self):
self.request.headers["Authorization"] = "Basic avrbnnbrbr"
assert self.policy.unauthenticated_userid(self.request) is None
def test_can_specify_only_opaque_access_token(self):
self.request.headers["Authorization"] = "Bearer xyz"
assert self.policy.unauthenticated_userid(self.request) == "userid"
self.verify.assert_called_with("xyz")
def test_returns_none_if_no_cache_and_invalid_access_token(self):
self.request.headers["Authorization"] = "Bearer xyz"
self.request.registry.cache.get.return_value = None
self.verify.return_value = None
assert self.policy.unauthenticated_userid(self.request) is None
assert not self.request.registry.cache.set.called
def test_payload_is_read_from_cache(self):
self.request.headers["Authorization"] = "Bearer xyz"
self.request.registry.cache.get.return_value = {"sub": "me"}
assert self.policy.unauthenticated_userid(self.request) == "me"
def test_payload_is_stored_in_cache(self):
self.request.headers["Authorization"] = "Bearer xyz"
assert self.policy.unauthenticated_userid(self.request) == "userid"
assert self.request.registry.cache.set.called
def test_payload_is_read_from_cache_but_differently_by_access_token(self):
# State to keep track of cache keys queried.
cache_keys_used = []
def mocked_cache_get(cache_key):
# This makes sure the same cache key is not used twice
assert cache_key not in cache_keys_used
cache_keys_used.append(cache_key)
if len(cache_keys_used) == 1:
return {"sub": "me"}
elif len(cache_keys_used) == 2:
return {"sub": "you"}
self.request.registry.cache.get.side_effect = mocked_cache_get
self.request.headers["Authorization"] = "Bearer xyz"
assert self.policy.unauthenticated_userid(self.request) == "me"
# Change the Authorization header the second time
self.request.headers["Authorization"] = "Bearer abc"
assert self.policy.unauthenticated_userid(self.request) == "you"
class VerifyTokenTest(unittest.TestCase):
@classmethod
def setUpClass(self):
# Populate OpenID config cache.
with mock.patch("kinto.plugins.openid.utils.requests.get") as m:
m.return_value.json.return_value = {
"userinfo_endpoint": "http://uinfo",
"jwks_uri": "https://jwks",
}
fetch_openid_config("https://fxa")
def setUp(self):
mocked = mock.patch("kinto.plugins.openid.requests.get")
self.mocked_get = mocked.start()
self.addCleanup(mocked.stop)
self.policy = OpenIDConnectPolicy(issuer="https://fxa", client_id="abc")
def test_fetches_userinfo_if_id_token_is_none(self):
self.mocked_get.return_value.json.side_effect = [{"sub": "me"}]
payload = self.policy._verify_token(access_token="abc")
assert payload["sub"] == "me"
def test_returns_none_if_fetching_userinfo_fails(self):
self.mocked_get.return_value.raise_for_status.side_effect = ValueError
payload = self.policy._verify_token(access_token="abc")
assert payload is None
class LoginViewTest(OpenIDWebTest):
def test_returns_400_if_parameters_are_missing_or_bad(self):
self.app.get("/openid/auth0/login", status=400)
self.app.get("/openid/auth0/login", params={"callback": "http://no-scope"}, status=400)
self.app.get(
"/openid/auth0/login", params={"callback": "bad", "scope": "openid"}, status=400
)
def test_returns_400_if_provider_is_unknown(self):
self.app.get("/openid/fxa/login", status=400)
def test_returns_400_if_email_is_not_in_scope_when_userid_field_is_email(self):
scope = "openid"
cb = "http://ui"
self.app.get("/openid/auth0/login", params={"callback": cb, "scope": scope}, status=307)
# See config above (email is userid field)
self.app.get("/openid/google/login", params={"callback": cb, "scope": scope}, status=400)
def test_returns_400_if_prompt_is_not_recognized(self):
scope = "openid"
cb = "http://ui"
self.app.get(
"/openid/auth0/login",
params={"callback": cb, "scope": scope, "prompt": "junk"},
status=400,
)
def test_redirects_to_the_identity_provider(self):
params = {"callback": "http://ui", "scope": "openid"}
resp = self.app.get("/openid/auth0/login", params=params, status=307)
location = resp.headers["Location"]
assert "auth0.com/authorize?" in location
assert "%2Fv1%2Fopenid%2Fauth0%2Ftoken" in location
assert "scope=openid" in location
assert "client_id=abc" in location
def test_redirects_to_the_identity_provider_with_prompt_none(self):
params = {"callback": "http://ui", "scope": "openid", "prompt": "none"}
resp = self.app.get("/openid/auth0/login", params=params, status=307)
location = resp.headers["Location"]
assert "auth0.com/authorize?" in location
assert "%2Fv1%2Fopenid%2Fauth0%2Ftoken" in location
assert "scope=openid" in location
assert "client_id=abc" in location
assert "prompt=none" in location
def test_callback_is_stored_in_cache(self):
params = {"callback": "http://ui", "scope": "openid"}
with mock.patch("kinto.plugins.openid.views.random_bytes_hex") as m:
m.return_value = "key"
self.app.get("/openid/auth0/login", params=params, status=307)
cached = self.app.app.registry.cache.get("openid:state:key")
assert cached == "http://ui"
class TokenViewTest(OpenIDWebTest):
def test_returns_400_if_parameters_are_missing_or_bad(self):
self.app.get("/openid/auth0/token", status=400)
self.app.get("/openid/auth0/token", params={"code": "abc"}, status=400)
self.app.get("/openid/auth0/token", params={"state": "abc"}, status=400)
def test_returns_400_if_provider_is_unknown(self):
self.app.get("/openid/fxa/token", status=400)
def test_returns_400_if_state_is_invalid(self):
self.app.get("/openid/auth0/token", params={"code": "abc", "state": "abc"}, status=400)
def test_code_is_traded_using_client_secret(self):
self.app.app.registry.cache.set("openid:state:key", "http://ui", ttl=100)
with mock.patch("kinto.plugins.openid.views.requests.post") as m:
m.return_value.text = '{"access_token": "token"}'
self.app.get("/openid/auth0/token", params={"code": "abc", "state": "key"})
m.assert_called_with(
"https://auth.mozilla.auth0.com/oauth/token",
data={
"code": "abc",
"client_id": "abc",
"client_secret": "xyz",
"redirect_uri": "http://localhost/v1/openid/auth0/token",
"grant_type": "authorization_code",
},
)
def test_state_cannot_be_reused(self):
self.app.app.registry.cache.set("openid:state:key", "http://ui", ttl=100)
with mock.patch("kinto.plugins.openid.views.requests.post") as m:
m.return_value.text = '{"access_token": "token"}'
self.app.get("/openid/auth0/token", params={"code": "abc", "state": "key"})
self.app.get("/openid/auth0/token", params={"code": "abc", "state": "key"}, status=400)
def test_redirects_to_callback_using_authorization_response(self):
self.app.app.registry.cache.set("openid:state:key", "http://ui/#token=", ttl=100)
with mock.patch("kinto.plugins.openid.views.requests.post") as m:
m.return_value.text = '{"access_token": "token"}'
resp = self.app.get(
"/openid/auth0/token", params={"code": "abc", "state": "key"}, status=307
)
location = resp.headers["Location"]
assert location == "http://ui/#token=eyJhY2Nlc3NfdG9rZW4iOiAidG9rZW4ifQ%3D%3D"
| 12,191 | 588 | 905 |
0d69c6f9e4c0b06d6484e7fbc6b6d1882eed3358 | 8,699 | py | Python | bcs-ui/backend/tests/helm/app/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 1 | 2021-11-16T08:15:13.000Z | 2021-11-16T08:15:13.000Z | bcs-ui/backend/tests/helm/app/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/tests/helm/app/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.helm.app.utils import remove_updater_creator_from_manifest
FAKE_MANIFEST_YAML = """
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test12-redis\n labels:\n app: bk-redis\n chart: bk-redis-0.1.29\n release: test12\n heritage: Helm\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: bk-redis\n release: test12\n template:\n metadata:\n labels:\n app: bk-redis\n release: test12\n app-name: test-db\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n spec:\n containers:\n - name: bk-redis\n image: /paas/test/test:latest\n imagePullPolicy: IfNotPresent\n env:\n - name: test\n value: test\n - name: test\n value: test123\n - name: test\n value: ieod\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n command:\n - bash -c\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n resources: {}\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: test12-db-migrate\n labels:\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n backoffLimit: 0\n template:\n metadata:\n name: test12\n labels:\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: test12\n helm.sh/chart: bk-redis-0.1.29\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n spec:\n restartPolicy: Never\n containers:\n - name: pre-install-job\n image: /paas/test/test:latest\n command:\n - /bin/bash\n - -c\n args:\n - python manage.py migrate\n env:\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n imagePullPolicy: Always\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n
""" # noqa
EXPECTED_MANIFEST_YAML = """
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test12-redis\n labels:\n app: bk-redis\n chart: bk-redis-0.1.29\n release: test12\n heritage: Helm\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: bk-redis\n release: test12\n template:\n metadata:\n labels:\n app: bk-redis\n release: test12\n app-name: test-db\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n spec:\n containers:\n - name: bk-redis\n image: /paas/test/test:latest\n imagePullPolicy: IfNotPresent\n env:\n - name: test\n value: test\n - name: test\n value: test123\n - name: test\n value: ieod\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n command:\n - bash -c\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n resources: {}\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: test12-db-migrate\n labels:\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n backoffLimit: 0\n template:\n metadata:\n name: test12\n labels:\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: test12\n helm.sh/chart: bk-redis-0.1.29\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n spec:\n restartPolicy: Never\n containers:\n - name: pre-install-job\n image: /paas/test/test:latest\n command:\n - /bin/bash\n - -c\n args:\n - python manage.py migrate\n env:\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n imagePullPolicy: Always\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n
""" # noqa
| 271.84375 | 3,756 | 0.620071 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.helm.app.utils import remove_updater_creator_from_manifest
FAKE_MANIFEST_YAML = """
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test12-redis\n labels:\n app: bk-redis\n chart: bk-redis-0.1.29\n release: test12\n heritage: Helm\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: bk-redis\n release: test12\n template:\n metadata:\n labels:\n app: bk-redis\n release: test12\n app-name: test-db\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n spec:\n containers:\n - name: bk-redis\n image: /paas/test/test:latest\n imagePullPolicy: IfNotPresent\n env:\n - name: test\n value: test\n - name: test\n value: test123\n - name: test\n value: ieod\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n command:\n - bash -c\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n resources: {}\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: test12-db-migrate\n labels:\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n backoffLimit: 0\n template:\n metadata:\n name: test12\n labels:\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: test12\n helm.sh/chart: bk-redis-0.1.29\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n spec:\n restartPolicy: Never\n containers:\n - name: pre-install-job\n image: /paas/test/test:latest\n command:\n - /bin/bash\n - -c\n args:\n - python manage.py migrate\n env:\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n imagePullPolicy: Always\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n
""" # noqa
EXPECTED_MANIFEST_YAML = """
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test12-redis\n labels:\n app: bk-redis\n chart: bk-redis-0.1.29\n release: test12\n heritage: Helm\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: bk-redis\n release: test12\n template:\n metadata:\n labels:\n app: bk-redis\n release: test12\n app-name: test-db\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Deployment\n io.tencent.bcs.controller.name: test12-redis\n spec:\n containers:\n - name: bk-redis\n image: /paas/test/test:latest\n imagePullPolicy: IfNotPresent\n env:\n - name: test\n value: test\n - name: test\n value: test123\n - name: test\n value: ieod\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n command:\n - bash -c\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n resources: {}\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: test12-db-migrate\n labels:\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n annotations:\n io.tencent.paas.version: 0.1.29\n io.tencent.bcs.clusterid: BCS-K8S-00000\nspec:\n backoffLimit: 0\n template:\n metadata:\n name: test12\n labels:\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: test12\n helm.sh/chart: bk-redis-0.1.29\n io.tencent.paas.source_type: helm\n io.tencent.paas.projectid: xxx\n io.tencent.bcs.clusterid: BCS-K8S-00000\n io.tencent.bcs.namespace: test-tes123\n io.tencent.bcs.controller.type: Job\n io.tencent.bcs.controller.name: test12-db-migrate\n spec:\n restartPolicy: Never\n containers:\n - name: pre-install-job\n image: /paas/test/test:latest\n command:\n - /bin/bash\n - -c\n args:\n - python manage.py migrate\n env:\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: test\n - name: test\n value: \"80\"\n - name: test\n value: \"true\"\n - name: test\n value: test\n - name: io_tencent_bcs_namespace\n value: test-tes123\n - name: io_tencent_bcs_custom_labels\n value: '{}'\n imagePullPolicy: Always\n imagePullSecrets:\n - name: paas.image.registry.test-tes123\n
""" # noqa
def test_remove_updater_creator():
updater_creator = ["io.tencent.paas.updator: admin", "io.tencent.paas.creator: admin"]
mf = remove_updater_creator_from_manifest(FAKE_MANIFEST_YAML)
for key in updater_creator:
assert key not in mf
assert mf == EXPECTED_MANIFEST_YAML
| 271 | 0 | 23 |
2c04ea16448ad25374f173a5b3f0f6650c13fa5c | 112 | py | Python | lesson10/legb3.py | drednout/letspython | 9747442d63873b5f71e2c15ed5528bd98ad5ac31 | [
"BSD-2-Clause"
] | 1 | 2015-11-26T15:53:58.000Z | 2015-11-26T15:53:58.000Z | lesson10/legb3.py | drednout/letspython | 9747442d63873b5f71e2c15ed5528bd98ad5ac31 | [
"BSD-2-Clause"
] | null | null | null | lesson10/legb3.py | drednout/letspython | 9747442d63873b5f71e2c15ed5528bd98ad5ac31 | [
"BSD-2-Clause"
] | null | null | null | x = 1
f()
print("globally, x={}".format(x))
| 11.2 | 33 | 0.473214 | x = 1
def f():
global x
x = 2
print("in f, x={}".format(x))
f()
print("globally, x={}".format(x))
| 44 | 0 | 23 |
a3aa05e597d13f67acaa82a3abc3d35f823b65f0 | 518 | py | Python | CS305_Computer-Network/Lab4-socket/Echo-Server.py | Eveneko/SUSTech-Courses | 0420873110e91e8d13e6e85a974f1856e01d28d6 | [
"MIT"
] | 4 | 2020-11-11T11:56:57.000Z | 2021-03-11T10:05:09.000Z | CS305_Computer-Network/Lab4-socket/Echo-Server.py | Eveneko/SUSTech-Courses | 0420873110e91e8d13e6e85a974f1856e01d28d6 | [
"MIT"
] | null | null | null | CS305_Computer-Network/Lab4-socket/Echo-Server.py | Eveneko/SUSTech-Courses | 0420873110e91e8d13e6e85a974f1856e01d28d6 | [
"MIT"
] | 3 | 2021-01-07T04:14:11.000Z | 2021-04-27T13:41:36.000Z | import socket
if __name__ == "__main__":
try:
echo()
except KeyboardInterrupt:
exit() | 23.545455 | 60 | 0.5 | import socket
def echo():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 5555))
sock.listen(10)
while True:
conn, address = sock.accept()
while True:
data = conn.recv(2048)
if data and data != b'exit':
conn.send(data)
print(data)
else:
conn.close()
break
if __name__ == "__main__":
try:
echo()
except KeyboardInterrupt:
exit() | 385 | 0 | 22 |
867169c5cb95c528d91ff9a8929f06c50a130b6c | 147 | py | Python | python/helpers/pydev/tests_pydevd_python/my_extensions/pydevd_plugins/extensions/__init__.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/helpers/pydev/pydevd_plugins/extensions/types/__init__.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/helpers/pydev/pydevd_plugins/extensions/types/__init__.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | try:
__import__('pkg_resources').declare_namespace(__name__)
except:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| 24.5 | 59 | 0.761905 | try:
__import__('pkg_resources').declare_namespace(__name__)
except:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| 0 | 0 | 0 |
ee70cd3ee753442147507e1cc07ddb76132eb92d | 2,056 | py | Python | Materials.py | manveti/shipgen | c008f1c4bb3be7c67d4634c458aa7dfac48df0b5 | [
"MIT"
] | 1 | 2022-03-15T00:11:09.000Z | 2022-03-15T00:11:09.000Z | Materials.py | hanok2/shipgen | c008f1c4bb3be7c67d4634c458aa7dfac48df0b5 | [
"MIT"
] | null | null | null | Materials.py | hanok2/shipgen | c008f1c4bb3be7c67d4634c458aa7dfac48df0b5 | [
"MIT"
] | 1 | 2022-03-15T00:11:10.000Z | 2022-03-15T00:11:10.000Z | import os.path
import ConfigFile
from Constants import *
BLOCK = 'block'
SLOPE = 'slope'
CORNER = 'corner'
EXTERIOR_DEFAULT = "Light Armor"
EXTERIOR_CONFIG = {
TYPE_SM: {
BLOCK: "1 2",
SLOPE: "1 2",
CORNER: "1 2",
},
TYPE_LG: {
BLOCK: "1 2",
SLOPE: "1 2",
CORNER: "1 2",
},
}
INTERIOR_DEFAULT = "Interior Wall"
INTERIOR_CONFIG = {
TYPE_LG: {
BLOCK: "1 1",
},
}
initialized = False
materials = {}
| 24.771084 | 107 | 0.664883 | import os.path
import ConfigFile
from Constants import *
BLOCK = 'block'
SLOPE = 'slope'
CORNER = 'corner'
EXTERIOR_DEFAULT = "Light Armor"
EXTERIOR_CONFIG = {
TYPE_SM: {
BLOCK: "1 2",
SLOPE: "1 2",
CORNER: "1 2",
},
TYPE_LG: {
BLOCK: "1 2",
SLOPE: "1 2",
CORNER: "1 2",
},
}
INTERIOR_DEFAULT = "Interior Wall"
INTERIOR_CONFIG = {
TYPE_LG: {
BLOCK: "1 1",
},
}
initialized = False
materials = {}
class Material:
def __init__(self, materialName, configDict):
self.name = materialName
self.mass = {}
self.toughness = {}
for size in SIZES:
if (size in configDict):
self.mass[size] = {}
self.toughness[size] = {}
for key in [BLOCK, SLOPE, CORNER]:
if (key in configDict[size]):
blockConfig = [float(x) for x in configDict[size].get(key, "").split() if x]
if (blockConfig):
if (size not in self.mass):
self.mass[size] = {}
self.toughness[size] = {}
self.mass[size][key] = blockConfig.pop(0)
if (blockConfig):
self.toughness[size][key] = blockConfig.pop(0)
elif (key in self.mass.get(size, {})):
self.toughness[size][key] = self.mass[size][key]
def init():
global initialized
if (initialized):
return
configPath = os.path.join(os.path.dirname(__file__), "data", "materials.cfg")
configDict = ConfigFile.readFile(configPath)
materials[EXTERIOR_DEFAULT] = Material(EXTERIOR_DEFAULT, EXTERIOR_CONFIG)
materials[INTERIOR_DEFAULT] = Material(INTERIOR_DEFAULT, INTERIOR_CONFIG)
for materialName in configDict.keys():
if (type(configDict[materialName]) != type({})):
continue
materials[materialName] = Material(materialName, configDict[materialName])
initialized = True
def toughestMaterial(m1, m2, size=TYPE_LG, block=BLOCK):
if ((m1 not in materials) or (size not in materials[m1].toughness)):
return m2
if ((m2 not in materials) or (size not in materials[m2].toughness)):
return m1
if (materials[m1].toughness.get(size, {}).get(block) >= materials[m2].toughness.get(size, {}).get(block)):
return m1
return m2
| 1,545 | -6 | 92 |
e7a3578c0980fbe822c28ac5ab0f35f4a6a6f1d2 | 4,703 | py | Python | src/cogs/unsplash.py | pure-cheekbones/hot-bot-pol-pot | 107082318659e402261bbccacaccc7a701c2d8ba | [
"MIT"
] | 3 | 2021-08-29T07:45:30.000Z | 2021-08-29T21:10:18.000Z | src/cogs/unsplash.py | pure-cheekbones/hot-bot-pol-pot | 107082318659e402261bbccacaccc7a701c2d8ba | [
"MIT"
] | null | null | null | src/cogs/unsplash.py | pure-cheekbones/hot-bot-pol-pot | 107082318659e402261bbccacaccc7a701c2d8ba | [
"MIT"
] | 2 | 2021-10-04T15:07:41.000Z | 2022-01-07T17:42:37.000Z | from os import environ
from typing import ContextManager, Optional, Union
from aiohttp import request
from discord.ext.commands.core import guild_only
from src.data_clusters.configuration.server_vars import server
from discord import Member, Reaction, Embed, channel, colour
from discord.ext.commands import Cog, bot, command, cooldown, BucketType
from DiscordUtils.Pagination import CustomEmbedPaginator
| 33.35461 | 124 | 0.575165 | from os import environ
from typing import ContextManager, Optional, Union
from aiohttp import request
from discord.ext.commands.core import guild_only
from src.data_clusters.configuration.server_vars import server
from discord import Member, Reaction, Embed, channel, colour
from discord.ext.commands import Cog, bot, command, cooldown, BucketType
from DiscordUtils.Pagination import CustomEmbedPaginator
async def pic_fetcher(
query, orientation: str, pic_count: Union[int, str], api_endpoint
):
fetcher_pics_list = list()
async with request(
"GET",
url=api_endpoint,
params={
"query": query,
"orientation": orientation,
"client_id": server["unsplash_access_key"],
"count": str(pic_count),
"content_filter": "low",
},
) as response:
if response.status in (401, 405, 404):
return None
elif response.status == 200: # ok-request
rtrvd_data = await response.json()
for pic_num in range(int(pic_count)):
fetcher_pics_list.append(
{
"color": int((rtrvd_data[pic_num]["color"])[1:], 16),
"resolution": f"{rtrvd_data[pic_num]['width']}x{rtrvd_data[pic_num]['height']}",
"description": rtrvd_data[pic_num]["description"],
"url_full": rtrvd_data[pic_num]["urls"]["full"],
"url_regular": rtrvd_data[pic_num]["urls"]["regular"],
"url_small": rtrvd_data[pic_num]["urls"]["small"],
"user_name": rtrvd_data[pic_num]["user"]["name"],
"user_link": rtrvd_data[pic_num]["user"]["links"]["html"],
}
)
return fetcher_pics_list
def wall_embeds(list_of_json: Optional[list]):
if not list_of_json:
return None
wall_embeds_list = list()
for wall in list_of_json:
wall_embed = Embed(
title=f"Unsplash Wallpapers",
colour=wall["color"],
description=f"{wall['description'] or 'No Description'}\n{wall['resolution']}",
)
wall_embed.set_author(
name=f"by {wall['user_name']}",
url=f"{wall['user_link']}",
icon_url=r"https://user-images.githubusercontent.com/5659117/53183813-c7a2f900-35da-11e9-8c41-b1e399dc3a6c.png",
)
wall_embed.set_image(url=wall["url_regular"])
wall_embed.add_field(
inline=False,
name="download",
value=f"[full]({wall['url_full']}) | [regular]({wall['url_regular']}) | [small]({wall['url_small']})",
)
wall_embed.set_footer(
text=f"{list_of_json.index(wall)+1} of {len(list_of_json)} wallpapers"
)
wall_embeds_list.append(wall_embed)
return wall_embeds_list
class UnsplashEmbeds:
def __init__(
self, query: str = "", orientation: str = "", pic_count: int = 30
) -> None:
self.query = query
self.orientation = orientation
self.pic_count = pic_count
self.api_endpoint = "https://api.unsplash.com/photos/random/"
async def pics(self):
self.pics_list = await pic_fetcher(
query=self.query,
orientation=self.orientation,
api_endpoint=self.api_endpoint,
pic_count=self.pic_count,
)
return self.pics_list
class Unsplash(Cog):
def __init__(self, bot) -> None:
self.bot = bot
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("unsplash")
else:
print("unsplash cog loaded")
@command(
name="unsplash",
aliases=["wall", "wallpaper", "splash", "background"],
brief="fetch wallpapers from unsplash",
)
@guild_only()
@cooldown(rate=3, per=600, type=BucketType.user)
async def unsplash(self, ctx, *, search_term=""):
wallpapers = UnsplashEmbeds(query=search_term)
wall_list = await wallpapers.pics()
embed_list = wall_embeds(wall_list)
paginator = CustomEmbedPaginator(ctx, remove_reactions=True, timeout=180)
pag_reacts = [
("⏮️", "first"),
("⏪", "back"),
("🔐", "lock"),
("⏩", "next"),
("⏭️", "last"),
]
for emj, cmd in pag_reacts:
paginator.add_reaction(emoji=emj, command=cmd)
try:
await paginator.run(embed_list)
except:
await ctx.send("No results found.")
def setup(bot):
bot.add_cog(Unsplash(bot))
| 3,813 | 326 | 168 |
8fef6ae80ef2401159b86038263f12464f4fb621 | 269 | py | Python | nexusdash2/nexusdash/__init__.py | fmichalo/n9k-programmability | 3a359df5f048ea8c7695e47e9014ffdfe03835f4 | [
"Apache-2.0"
] | 2 | 2015-02-03T20:35:11.000Z | 2021-06-01T04:08:41.000Z | nexusdash2/nexusdash/__init__.py | fmichalo/n9k-programmability | 3a359df5f048ea8c7695e47e9014ffdfe03835f4 | [
"Apache-2.0"
] | null | null | null | nexusdash2/nexusdash/__init__.py | fmichalo/n9k-programmability | 3a359df5f048ea8c7695e47e9014ffdfe03835f4 | [
"Apache-2.0"
] | null | null | null | # http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app | 44.833333 | 78 | 0.791822 | # http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app | 0 | 0 | 0 |
f0116cd97e8d55449ce07c760c277edda7cd215f | 3,031 | py | Python | chatty_goose/cqr/ntr.py | jacklin64/chatty-goose | acb49fad6acdfb433aabc03a8be36f29bfcfc761 | [
"Apache-2.0"
] | 24 | 2021-03-08T09:53:59.000Z | 2022-03-17T06:47:06.000Z | chatty_goose/cqr/ntr.py | jacklin64/chatty-goose | acb49fad6acdfb433aabc03a8be36f29bfcfc761 | [
"Apache-2.0"
] | 10 | 2021-03-08T13:35:54.000Z | 2021-11-15T03:32:37.000Z | chatty_goose/cqr/ntr.py | jacklin64/chatty-goose | acb49fad6acdfb433aabc03a8be36f29bfcfc761 | [
"Apache-2.0"
] | 8 | 2021-03-03T00:37:18.000Z | 2021-08-01T00:50:47.000Z | import logging
import time
import torch
from typing import Optional
from chatty_goose.settings import NtrSettings
from spacy.lang.en import English
from transformers import T5ForConditionalGeneration, T5Tokenizer
from .cqr import ConversationalQueryRewriter
__all__ = ["Ntr"]
class Ntr(ConversationalQueryRewriter):
"""Neural Transfer Reformulation using a trained T5 model"""
| 33.677778 | 105 | 0.628835 | import logging
import time
import torch
from typing import Optional
from chatty_goose.settings import NtrSettings
from spacy.lang.en import English
from transformers import T5ForConditionalGeneration, T5Tokenizer
from .cqr import ConversationalQueryRewriter
__all__ = ["Ntr"]
class Ntr(ConversationalQueryRewriter):
"""Neural Transfer Reformulation using a trained T5 model"""
def __init__(self, settings: NtrSettings = NtrSettings(), device: str = None):
super().__init__("Ntr", verbose=settings.verbose)
# Model settings
self.max_length = settings.max_length
self.num_beams = settings.num_beams
self.early_stopping = settings.early_stopping
device = device or ("cuda" if torch.cuda.is_available() else "cpu")
self.device = torch.device(device)
if self.verbose:
logging.info(f"Initializing T5 using model {settings.model_name}...")
self.model = (
T5ForConditionalGeneration.from_pretrained(settings.model_name)
.to(device)
.eval()
)
self.tokenizer = T5Tokenizer.from_pretrained(settings.model_name)
self.nlp = English()
self.history_query = []
self.history = []
def rewrite(self, query: str, context: Optional[str] = None, response_num: Optional[int] = 0) -> str:
start_time = time.time()
# If the passage from canonical result (context) is provided, it is added to history.
# Since canonical passage can be large and there is limit on length of tokens,
# only one passage for the new query is used at a time.
# if len(self.history) >= 2 and self.has_canonical_context:
# self.history.pop(-2)
# self.has_canonical_context = False
self.history_query += [query]
self.history += [query]
# Build input sequence from query and history
if response_num!=0:
src_text = " ||| ".join(self.history_query[:-response_num] + self.history[-2*response_num:])
else:
src_text = " ||| ".join(self.history_query)
src_text = " ".join([tok.text for tok in self.nlp(src_text)])
input_ids = self.tokenizer(
src_text, return_tensors="pt", add_special_tokens=True
).input_ids.to(self.device)
# Generate new sequence
output_ids = self.model.generate(
input_ids,
max_length=self.max_length,
num_beams=self.num_beams,
early_stopping=self.early_stopping,
)
# Decode output
rewrite_text = self.tokenizer.decode(
output_ids[0, 0:],
clean_up_tokenization_spaces=True,
skip_special_tokens=True,
)
if context:
self.history += [context]
self.total_latency += time.time() - start_time
return rewrite_text
def reset_history(self):
super().reset_history()
self.history = []
self.history_query = []
| 2,564 | 0 | 81 |
72e17695fa6812a78b90659f0bbee5a5b14b0e64 | 9,962 | py | Python | Bot/tasks/im2txt.py | Frikallo/YAKbot | bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc | [
"MIT"
] | 1 | 2022-02-21T00:02:57.000Z | 2022-02-21T00:02:57.000Z | Bot/tasks/im2txt.py | Frikallo/YAKbot | bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc | [
"MIT"
] | 1 | 2022-01-12T19:41:39.000Z | 2022-01-14T03:56:56.000Z | Bot/tasks/im2txt.py | Frikallo/BATbot | bc798fe4ead1f6a3e4828960ea77e2a8f07b5fdc | [
"MIT"
] | null | null | null | import collections
import torch
import PIL
import pytorch_lightning as pl
import numpy as np
import torchvision.transforms.functional as F
import webdataset as wds
from pathlib import Path
from torchvision import transforms as T
from random import randint, choice
from torch.utils.data import DataLoader
from PIL import Image
from io import BytesIO
def web_dataset_helper(path):
"""
https://github.com/tgisaturday/dalle-lightning/blob/master/pl_dalle/loader.py
"""
if Path(path).is_dir():
DATASET = [
str(p) for p in Path(path).glob("**/*") if ".tar" in str(p).lower()
] # .name
assert (
len(DATASET) > 0
), "The directory ({}) does not contain any WebDataset/.tar files.".format(path)
print(
"Found {} WebDataset .tar(.gz) file(s) under given path {}!".format(
len(DATASET), path
)
)
elif ("http://" in path.lower()) | ("https://" in path.lower()):
DATASET = f"pipe:curl -L -s {path} || true"
print("Found {} http(s) link under given path!".format(len(DATASET), path))
elif "gs://" in path.lower():
DATASET = f"pipe:gsutil cat {path} || true"
print("Found {} GCS link under given path!".format(len(DATASET), path))
elif ".tar" in path:
DATASET = path
print("Found WebDataset .tar(.gz) file under given path {}!".format(path))
else:
raise Exception(
"No folder, no .tar(.gz) and no url pointing to tar files provided under {}.".format(
path
)
)
return DATASET
def build_table(
x,
perceiver,
tokenize,
indices,
indices_data,
device,
knn,
y=None,
ctx=None,
is_image=True,
return_images=False,
):
"""im2txt table."""
table = [" || "] * len(x)
x = perceiver.encode_image(x).float()
x /= x.norm(dim=-1, keepdim=True)
for (index, index_data) in zip(indices, indices_data):
top_ind = index.search(x.cpu().numpy(), knn)[1]
for idx in range(len(x)):
results = [index_data[i] for i in top_ind[idx]]
for r in results:
table[idx] += r + " | "
table = [r[:-1] + "|| " for r in table]
if y:
table = [table[idx] + y[idx] for idx in range(len(x))]
if return_images:
return table, x
return table
| 33.317726 | 97 | 0.56043 | import collections
import torch
import PIL
import pytorch_lightning as pl
import numpy as np
import torchvision.transforms.functional as F
import webdataset as wds
from pathlib import Path
from torchvision import transforms as T
from random import randint, choice
from torch.utils.data import DataLoader
from PIL import Image
from io import BytesIO
def web_dataset_helper(path):
"""
https://github.com/tgisaturday/dalle-lightning/blob/master/pl_dalle/loader.py
"""
if Path(path).is_dir():
DATASET = [
str(p) for p in Path(path).glob("**/*") if ".tar" in str(p).lower()
] # .name
assert (
len(DATASET) > 0
), "The directory ({}) does not contain any WebDataset/.tar files.".format(path)
print(
"Found {} WebDataset .tar(.gz) file(s) under given path {}!".format(
len(DATASET), path
)
)
elif ("http://" in path.lower()) | ("https://" in path.lower()):
DATASET = f"pipe:curl -L -s {path} || true"
print("Found {} http(s) link under given path!".format(len(DATASET), path))
elif "gs://" in path.lower():
DATASET = f"pipe:gsutil cat {path} || true"
print("Found {} GCS link under given path!".format(len(DATASET), path))
elif ".tar" in path:
DATASET = path
print("Found WebDataset .tar(.gz) file under given path {}!".format(path))
else:
raise Exception(
"No folder, no .tar(.gz) and no url pointing to tar files provided under {}.".format(
path
)
)
return DATASET
class Dataset(torch.utils.data.Dataset):
def __init__(self, folder: str, image_size=224, resize_ratio=0.75, transform=None):
"""
im2txt task dataset.
Args:
folder (str): Folder containing images and text
image_size (int, optional): The size of outputted images. Defaults to 224.
resize_ratio (float, optional): Minimum percentage of image contained by resize.
"""
super().__init__()
path = Path(folder)
text_files = [*path.glob("**/*.txt")]
image_files = [
*path.glob("**/*.png"),
*path.glob("**/*.jpg"),
*path.glob("**/*.jpeg"),
*path.glob("**/*.bmp"),
]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = {image_file.stem: image_file for image_file in image_files}
keys = image_files.keys() & text_files.keys()
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.resize_ratio = resize_ratio
self.image_transform = transform
def __len__(self):
return len(self.keys)
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
try:
descriptions = text_file.read_text().split("\n")
except UnicodeDecodeError:
return self.skip_sample(ind)
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
try:
description = choice(descriptions)
except IndexError as zero_captions_in_file_ex:
print(f"An exception occurred trying to load file {text_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
try:
image_tensor = self.image_transform(PIL.Image.open(image_file))
except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
# Success
return image_tensor, description
class DataModule(pl.LightningDataModule):
def __init__(
self,
train_datadir,
dev_datadir,
batch_size=64,
image_size=224,
resize_ratio=0.75,
web_dataset=False,
wds_keys="img,cap",
world_size=1,
dataset_size=[int(1e9)],
nworkers=0,
):
super().__init__()
self.train_datadir = train_datadir
self.dev_datadir = dev_datadir
self.batch_size = batch_size
self.image_size = image_size
self.resize_ratio = resize_ratio
self.web_dataset = web_dataset
self.wds_keys = wds_keys
self.world_size = world_size
if len(dataset_size) == 1:
self.train_dataset_size = dataset_size[0]
self.val_dataset_size = dataset_size[0]
else:
self.train_dataset_size = dataset_size[0]
self.val_dataset_size = dataset_size[1]
self.nworkers = nworkers
self.transform_train = T.Compose(
[
T.Lambda(self.fix_img),
T.RandomResizedCrop(
image_size, scale=(self.resize_ratio, 1.0), ratio=(1.0, 1.0)
),
T.ToTensor(),
T.Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
self.transform_val = T.Compose(
[
T.Resize(image_size, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(image_size),
T.Lambda(self.fix_img),
T.ToTensor(),
T.Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
def imagetransform(self, b):
return Image.open(BytesIO(b))
def decode(self, s):
s = s.decode("utf-8")
s = s.split("\n")
s = list(filter(lambda t: len(t) > 0, s))
return choice(s)
def fix_img(self, img):
return img.convert("RGB") if img.mode != "RGB" else img
def setup(self, stage=None):
if self.web_dataset:
DATASET_TRAIN = web_dataset_helper(self.train_datadir)
DATASET_VAL = web_dataset_helper(self.dev_datadir)
myimg, mycap = (self.wds_keys.split(",")[0], self.wds_keys.split(",")[1])
train_image_text_mapping = {myimg: self.imagetransform, mycap: self.decode}
train_image_mapping = {myimg: self.transform_train}
val_image_text_mapping = {myimg: self.imagetransform, mycap: self.decode}
val_image_mapping = {myimg: self.transform_val}
self.train = (
wds.WebDataset(DATASET_TRAIN)
.map_dict(**train_image_text_mapping)
.map_dict(**train_image_mapping)
.to_tuple(myimg, mycap)
.batched(self.batch_size, partial=False)
)
self.valid = (
wds.WebDataset(DATASET_VAL)
.map_dict(**val_image_text_mapping)
.map_dict(**val_image_mapping)
.to_tuple(myimg, mycap)
.batched(self.batch_size, partial=False)
)
else:
self.train = Dataset(
folder=self.train_datadir,
transform=self.transform_train,
image_size=self.image_size,
resize_ratio=self.resize_ratio,
)
self.valid = Dataset(
folder=self.dev_datadir,
transform=self.transform_val,
image_size=self.image_size,
resize_ratio=self.resize_ratio,
)
def train_dataloader(self):
if self.web_dataset:
dl = wds.WebLoader(self.train, batch_size=None, shuffle=False)
number_of_batches = self.train_dataset_size // (
self.batch_size * self.world_size
)
dl = dl.repeat(9999999999).slice(number_of_batches)
dl.length = number_of_batches
return dl
else:
return DataLoader(
self.train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.nworkers,
pin_memory=True,
)
def val_dataloader(self):
if self.web_dataset:
dl = wds.WebLoader(self.valid, batch_size=None, shuffle=False)
number_of_batches = self.val_dataset_size // (
self.batch_size * self.world_size
)
dl = dl.repeat(9999999999).slice(number_of_batches)
dl.length = number_of_batches
return dl
else:
return DataLoader(
self.valid,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.nworkers,
pin_memory=True,
)
def build_table(
x,
perceiver,
tokenize,
indices,
indices_data,
device,
knn,
y=None,
ctx=None,
is_image=True,
return_images=False,
):
"""im2txt table."""
table = [" || "] * len(x)
x = perceiver.encode_image(x).float()
x /= x.norm(dim=-1, keepdim=True)
for (index, index_data) in zip(indices, indices_data):
top_ind = index.search(x.cpu().numpy(), knn)[1]
for idx in range(len(x)):
results = [index_data[i] for i in top_ind[idx]]
for r in results:
table[idx] += r + " | "
table = [r[:-1] + "|| " for r in table]
if y:
table = [table[idx] + y[idx] for idx in range(len(x))]
if return_images:
return table, x
return table
| 6,028 | 1,309 | 234 |
edea7506d46522a734417e4f7b59ebf848d9ed33 | 5,111 | py | Python | python/londiste.py | Neroe4eDev/Londiste | 933dfb255736ef68a04c2c322f9887033e2adfd0 | [
"0BSD",
"Unlicense"
] | null | null | null | python/londiste.py | Neroe4eDev/Londiste | 933dfb255736ef68a04c2c322f9887033e2adfd0 | [
"0BSD",
"Unlicense"
] | null | null | null | python/londiste.py | Neroe4eDev/Londiste | 933dfb255736ef68a04c2c322f9887033e2adfd0 | [
"0BSD",
"Unlicense"
] | null | null | null | #! /usr/bin/env python
"""Londiste launcher.
"""
import sys, os, optparse, signal, skytools
# python 2.3 will try londiste.py first...
import sys, os.path
if os.path.exists(os.path.join(sys.path[0], 'londiste.py')) \
and not os.path.isdir(os.path.join(sys.path[0], 'londiste')):
del sys.path[0]
from londiste import *
__all__ = ['Londiste']
command_usage = """
%prog [options] INI CMD [subcmd args]
commands:
replay replay events to subscriber
provider install installs modules, creates queue
provider add TBL ... add table to queue
provider remove TBL ... remove table from queue
provider tables show all tables on provider
provider add-seq SEQ ... add sequence to provider
provider remove-seq SEQ ... remove sequence from provider
provider seqs show all sequences on provider
subscriber install installs schema
subscriber add TBL ... add table to subscriber
subscriber remove TBL ... remove table from subscriber
subscriber add-seq SEQ ... add table to subscriber
subscriber remove-seq SEQ ... remove table from subscriber
subscriber tables list tables subscriber has attached to
subscriber seqs list sequences subscriber is interested
subscriber missing list tables subscriber has not yet attached to
subscriber check compare table structure on both sides
subscriber resync TBL ... do full copy again
subscriber fkeys [pending|active] show fkeys on tables
subscriber triggers [pending|active] show triggers on tables
subscriber restore-triggers TBL [TGNAME ..] restore pending triggers
subscriber register register consumer on provider's queue
subscriber unregister unregister consumer on provider's queue
compare [TBL ...] compare table contents on both sides
repair [TBL ...] repair data on subscriber
copy [internal command - copy table logic]
"""
if __name__ == '__main__':
script = Londiste(sys.argv[1:])
script.start()
| 37.580882 | 82 | 0.599296 | #! /usr/bin/env python
"""Londiste launcher.
"""
import sys, os, optparse, signal, skytools
# python 2.3 will try londiste.py first...
import sys, os.path
if os.path.exists(os.path.join(sys.path[0], 'londiste.py')) \
and not os.path.isdir(os.path.join(sys.path[0], 'londiste')):
del sys.path[0]
from londiste import *
__all__ = ['Londiste']
command_usage = """
%prog [options] INI CMD [subcmd args]
commands:
replay replay events to subscriber
provider install installs modules, creates queue
provider add TBL ... add table to queue
provider remove TBL ... remove table from queue
provider tables show all tables on provider
provider add-seq SEQ ... add sequence to provider
provider remove-seq SEQ ... remove sequence from provider
provider seqs show all sequences on provider
subscriber install installs schema
subscriber add TBL ... add table to subscriber
subscriber remove TBL ... remove table from subscriber
subscriber add-seq SEQ ... add table to subscriber
subscriber remove-seq SEQ ... remove table from subscriber
subscriber tables list tables subscriber has attached to
subscriber seqs list sequences subscriber is interested
subscriber missing list tables subscriber has not yet attached to
subscriber check compare table structure on both sides
subscriber resync TBL ... do full copy again
subscriber fkeys [pending|active] show fkeys on tables
subscriber triggers [pending|active] show triggers on tables
subscriber restore-triggers TBL [TGNAME ..] restore pending triggers
subscriber register register consumer on provider's queue
subscriber unregister unregister consumer on provider's queue
compare [TBL ...] compare table contents on both sides
repair [TBL ...] repair data on subscriber
copy [internal command - copy table logic]
"""
class Londiste(skytools.DBScript):
def __init__(self, args):
skytools.DBScript.__init__(self, 'londiste', args)
if self.options.rewind or self.options.reset:
self.script = Replicator(args)
return
if len(self.args) < 2:
print "need command"
sys.exit(1)
cmd = self.args[1]
if cmd =="provider":
script = ProviderSetup(args)
elif cmd == "subscriber":
script = SubscriberSetup(args)
elif cmd == "replay":
method = self.cf.get('method', 'direct')
if method == 'direct':
script = Replicator(args)
elif method == 'file_write':
script = FileWrite(args)
elif method == 'file_write':
script = FileWrite(args)
else:
print "unknown method, quitting"
sys.exit(1)
elif cmd == "copy":
script = CopyTable(args)
elif cmd == "compare":
script = Comparator(args)
elif cmd == "repair":
script = Repairer(args)
elif cmd == "upgrade":
script = UpgradeV2(args)
else:
print "Unknown command '%s', use --help for help" % cmd
sys.exit(1)
self.script = script
def start(self):
self.script.start()
def init_optparse(self, parser=None):
p = skytools.DBScript.init_optparse(self, parser)
p.set_usage(command_usage.strip())
g = optparse.OptionGroup(p, "expert options")
g.add_option("--all", action="store_true",
help = "add: include all possible tables")
g.add_option("--force", action="store_true",
help = "add: ignore table differences, repair: ignore lag")
g.add_option("--expect-sync", action="store_true", dest="expect_sync",
help = "add: no copy needed", default=False)
g.add_option("--skip-truncate", action="store_true", dest="skip_truncate",
help = "add: keep old data", default=False)
g.add_option("--rewind", action="store_true",
help = "replay: sync queue pos with subscriber")
g.add_option("--reset", action="store_true",
help = "replay: forget queue pos on subscriber")
p.add_option_group(g)
return p
def send_signal(self, sig):
""" Londiste can launch other process for copy, so manages it here """
if sig in (signal.SIGTERM, signal.SIGINT):
# kill copy process if it exists before stopping
copy_pidfile = self.pidfile + ".copy"
if os.path.isfile(copy_pidfile):
self.log.info("Signaling running COPY first")
skytools.signal_pidfile(copy_pidfile, signal.SIGTERM)
# now resort to DBScript send_signal()
skytools.DBScript.send_signal(self, sig)
if __name__ == '__main__':
script = Londiste(sys.argv[1:])
script.start()
| 2,269 | 641 | 23 |
5e6773c308304439a03274941f1457d2c57822f4 | 12,241 | py | Python | InfotecsPython/script.py | JesusProfile/colloquium | 206b27c5ea553350ac7a7c5e1c3d9797add47be5 | [
"MIT"
] | null | null | null | InfotecsPython/script.py | JesusProfile/colloquium | 206b27c5ea553350ac7a7c5e1c3d9797add47be5 | [
"MIT"
] | null | null | null | InfotecsPython/script.py | JesusProfile/colloquium | 206b27c5ea553350ac7a7c5e1c3d9797add47be5 | [
"MIT"
] | null | null | null | # python3
import json
import socket
import sys
from email.parser import Parser
from functools import lru_cache
from urllib.parse import parse_qs, urlparse, urlencode, quote
import re # for regulars
MAX_LINE = 64 * 1024
MAX_HEADERS = 100
if __name__ == '__main__':
serv = Server()
try:
serv.serve_forever()
except KeyboardInterrupt:
pass
| 33.536986 | 97 | 0.535087 | # python3
import json
import socket
import sys
from email.parser import Parser
from functools import lru_cache
from urllib.parse import parse_qs, urlparse, urlencode, quote
import re # for regulars
MAX_LINE = 64 * 1024
MAX_HEADERS = 100
class Server:
def __init__(self):
self._host = '127.0.0.1'
self._port = 8000
self._worker = workerTxt("RU.txt")
def serve_forever(self):
serv_sock = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
proto=0)
try:
serv_sock.bind((self._host, self._port))
serv_sock.listen()
while True:
conn, _ = serv_sock.accept()
try:
self.serve_client(conn)
except Exception as e:
print('Client serving failed', e)
finally:
serv_sock.close()
def serve_client(self, conn):
print("Serving client")
try:
req = self.parse_request(conn)
resp = self.handle_request(req)
self.send_response(conn, resp)
except ConnectionResetError:
conn = None
except Exception as e:
self.send_error(conn, e)
if conn:
req.rfile.close()
conn.close()
def parse_request(self, conn):
rfile = conn.makefile('rb')
method, target, ver = self.parse_request_line(rfile)
headers = self.parse_headers(rfile)
host = headers.get('Host')
if not host:
raise HTTPError(400, 'Bad request',
'Host header is missing')
if host not in (self._host,
f'{self._host}:{self._port}'):
raise HTTPError(404, 'Not found')
return Request(method, target, ver, headers, rfile)
def parse_request_line(self, rfile):
raw = rfile.readline(MAX_LINE + 1)
if len(raw) > MAX_LINE:
raise HTTPError(400, 'Bad request',
'Request line is too long')
req_line = str(raw, 'iso-8859-1')
words = req_line.split()
words[1] = str(raw, 'utf-8').split()[1]
if len(words) != 3:
raise HTTPError(400, 'Bad request',
'Malformed request line')
method, target, ver = words
# target = target.decode('iso-8859-1')
if ver != 'HTTP/1.1':
raise HTTPError(505, 'HTTP Version Not Supported')
return method, target, ver
def parse_headers(self, rfile):
headers = []
while True:
line = rfile.readline(MAX_LINE + 1)
if len(line) > MAX_LINE:
raise HTTPError(494, 'Request header too large')
if line in (b'\r\n', b'\n', b''):
break
headers.append(line)
if len(headers) > MAX_HEADERS:
raise HTTPError(494, 'Too many headers')
sheaders = b''.join(headers).decode('iso-8859-1')
return Parser().parsestr(sheaders)
def handle_request(self, req):
if req.path == '/towns' and req.method == 'GET':
print("Got signal to handle_get_n_towns_from")
return self.handle_get_n_towns_from(req)
if req.path == '/towns/north' and req.method == 'GET':
print("Got signal to handle_get_norther_town")
return self.handle_get_norther_town(req)
if req.path == '/towns/id' and req.method == 'GET':
print("Got signal to handle_get_town_by_id")
return self.handle_get_town_by_id(req)
raise HTTPError(404, 'Not found')
def send_response(self, conn, resp):
wfile = conn.makefile('wb')
status_line = f'HTTP/1.1 {resp.status} {resp.reason}\r\n'
wfile.write(status_line.encode('iso-8859-1'))
if resp.headers:
for (key, value) in resp.headers:
header_line = f'{key}: {value}\r\n'
wfile.write(header_line.encode('iso-8859-1'))
wfile.write(b'\r\n')
if resp.body:
wfile.write(resp.body)
wfile.flush()
wfile.close()
def send_error(self, conn, err):
try:
status = err.status
reason = err.reason
body = (err.body or err.reason).encode('utf-8')
except:
status = 500
reason = b'Internal Server Error'
body = b'Internal Server Error'
resp = Response(status, reason,
[('Content-Length', len(body))],
body)
self.send_response(conn, resp)
def handle_get_town_by_id(self, req):
print("Start handle_get_town_by_id")
accept = req.headers.get('Accept')
if 'text/html' in accept:
contentType = 'text/html; charset=utf-8'
data = {'id': req.query_ru['id'][0]}
print("Data:\n",data)
text = self._worker.get_town_by_id(int(data['id']))
print(text)
body = '<html><head></head><body>'
body += f'#{text}'
body += '</body></html>'
else:
print("Error in Accept")
return Response(406, 'Not Acceptable')
body = body.encode('utf-8')
headers = [('Content-Type', contentType),
('Content-Length', len(body))]
print("End handle_get_town_by_id")
return Response(200, 'OK', headers, body)
def handle_get_n_towns_from(self, req):
print("Start handle_get_n_towns_from")
accept = req.headers.get('Accept')
if 'text/html' in accept:
contentType = 'text/html; charset=utf-8'
data = {'id': req.query['id'][0],
'n': req.query['n'][0]}
print("Data:\n",data)
text = self._worker.get_n_towns_from(int(data['id']), int(data['n']))
print(text)
body = '<html><head></head><body>'
body += f'<div>Города ({len(text)})</div>'
body += '<ul>'
for line in text:
body += f'<li>#{line}</li>'
body += '</ul>'
body += '</body></html>'
else:
print("Error in Accept")
return Response(406, 'Not Acceptable')
body = body.encode('utf-8')
headers = [('Content-Type', contentType),
('Content-Length', len(body))]
print("End handle_get_n_towns_from")
return Response(200, 'OK', headers, body)
def handle_get_norther_town(self, req):
print("Start handle_get_norther_town")
accept = req.headers.get('Accept')
if 'text/html' in accept:
contentType = 'text/html; charset=utf-8'
print("Want to get data")
data = {'first': req.query['first'][0],
'second': req.query['second'][0]}
print("Data:\n",data)
text = self._worker.get_norther_town(data['first'], data['second'])
body = '<html><head></head><body>'
if isinstance(text, str):
body += f'<div>{text}</div>'
else:
body += f'<div>{text["difference"]}</div>'
body += '<ul>'
body += f'<li>Север: {text["north"]}</li>'
body += f'<li>Юг: {text["south"]}</li>'
body += '</ul>'
body += '</body></html>'
else:
print("Error in Accept")
return Response(406, 'Not Acceptable')
body = body.encode('utf-8')
headers = [('Content-Type', contentType),
('Content-Length', len(body))]
print("End handle_get_norther_town")
return Response(200, 'OK', headers, body)
class Request:
def __init__(self, method, target, version, headers, rfile):
self.method = method
self.target = target
self.version = version
self.headers = headers
self.rfile = rfile
@property
def path(self):
return self.url.path
@property
@lru_cache(maxsize=None)
def query(self):
return parse_qs(self.url.query)
@property
@lru_cache(maxsize=None)
def url(self):
return urlparse(self.target)
def body(self):
size = self.headers.get('Content-Length')
if not size:
return None
return self.rfile.read(size)
class Response:
def __init__(self, status, reason, headers=None, body=None):
self.status = status
self.reason = reason
self.headers = headers
self.body = body
class HTTPError(Exception):
def __init__(self, status, reason, body=None):
super()
self.status = status
self.reason = reason
self.body = body
class workerTxt:
def __init__(self, filename):
self.filename = filename
def get_town_by_id(self, id):
file = open(self.filename)
for line in file:
if (line.startswith(str(id) + '\t')):
return line.replace(str(id) + '\t', '')
return "No such town"
def get_n_towns_from(self, id, count):
file = open(self.filename)
towns = []
n = 0
start_count = False
for line in file:
if (line.startswith(str(id) + '\t')):
start_count = True
if (start_count):
if (n == count):
return towns
towns.append(re.sub(r'^\d*\t', '', line))
n += 1
if (not start_count):
return "No such town"
file = open(self.filename)
for line in file:
if (n == count):
return towns
towns.append(re.sub(r'^\d*\t', '', line))
n += 1
def get_n_towns(self, count):
file = open(self.filename)
towns = []
for i in range(count):
towns.append(re.sub(r'^\d*\t', '', file.readline()))
return towns
def get_norther_town(self, first, second):
file = open(self.filename)
first_pretenders = [] # номинанты быть первым городом
second_pretenders = [] # номинанты быть вторым городом
for line in file:
info = line.split('\t')
search = re.search("," + first + r"$", info[3])
if (search): # состоит ли первый город в перечислении альтернативных названий города
first_pretenders.append(info)
search = re.search("," + second + r"$", info[3])
if (search):
second_pretenders.append((info))
if (not first_pretenders and not second_pretenders):
return "No such towns"
if (not first_pretenders):
return "No such first town"
if (not second_pretenders):
return "No such second town"
if (len(first_pretenders) > 1):
nice_pretender = first_pretenders[0]
for pretender in first_pretenders: # Рассматриваем каждого претендента
if (pretender[14] > nice_pretender[14]):
nice_pretender = pretender
first_pretenders = [nice_pretender]
if (len(second_pretenders) > 1):
nice_pretender = second_pretenders[0]
for pretender in second_pretenders: # Рассматриваем каждого претендента
if (pretender[14] > nice_pretender[14]):
nice_pretender = pretender
second_pretenders = [nice_pretender]
if (first_pretenders[0][4] >= second_pretenders[0][4]):
town1 = "\t".join(first_pretenders[0][1:])
town2 = "\t".join(second_pretenders[0][1:])
if (second_pretenders[0][4] > first_pretenders[0][4]):
town1 = "\t".join(second_pretenders[0][1:])
town2 = "\t".join(first_pretenders[0][1:])
difference = "Нет временной разницы" if first_pretenders[0][-2] == second_pretenders[0][
-2] else "Есть временная разница"
towns = {"north": town1, "south": town2, "difference": difference}
return towns
if __name__ == '__main__':
serv = Server()
try:
serv.serve_forever()
except KeyboardInterrupt:
pass
| 11,255 | 214 | 624 |
db7a2a590cbd3ccb3a4e00727109d45f722dcfc1 | 6,794 | py | Python | workloadsecurityconnector_aws.py | GeorgeDavis-TM/WorkloadSecurityConnector-AWS | 16294479307d563dea39c28d4805685dbc5d3abf | [
"MIT"
] | null | null | null | workloadsecurityconnector_aws.py | GeorgeDavis-TM/WorkloadSecurityConnector-AWS | 16294479307d563dea39c28d4805685dbc5d3abf | [
"MIT"
] | null | null | null | workloadsecurityconnector_aws.py | GeorgeDavis-TM/WorkloadSecurityConnector-AWS | 16294479307d563dea39c28d4805685dbc5d3abf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import urllib3
import boto3
from botocore.exceptions import ClientError
from boto3 import Session
f = open("config.json", "r+")
configObj = json.loads(f.read())
f.close()
headers = {
"Content-Type": "application/json",
"api-secret-key": configObj["c1wsApiKey"],
"api-version": "v1"
}
if __name__ == "__main__":
main()
| 39.271676 | 352 | 0.61937 | #!/usr/bin/env python3
import json
import urllib3
import boto3
from botocore.exceptions import ClientError
from boto3 import Session
f = open("config.json", "r+")
configObj = json.loads(f.read())
f.close()
headers = {
"Content-Type": "application/json",
"api-secret-key": configObj["c1wsApiKey"],
"api-version": "v1"
}
def buildRequestBody():
data = {
"displayName": getConfigValue("awsDisplayName") if checkConfKeyExists("awsDisplayName") else "",
"accountId": getConfigValue("awsAccountId") if checkConfKeyExists("awsAccountId") else "",
"accountAlias": getConfigValue("awsDisplayName") if checkConfKeyExists("awsDisplayName") else "",
"useInstanceRole": getConfigValue("useInstanceRole") if checkConfKeyExists("useInstanceRole") else False,
"workspacesEnabled": getConfigValue("workspacesEnabled") if checkConfKeyExists("workspacesEnabled") else False
}
return data
def selectConnectorOptions():
print("\n\t1. Use an Instance Role\n\t2. Use a Cross-Account Role\n\t3. Use Access and Secret Keys")
option = input("\nChoose an option to connect your AWS Account - ")
return option
def checkConfKeyExists(configKey):
return configKey in configObj.keys()
def getConfigValue(configKey):
return configObj[configKey]
def createIAMUser():
try:
iamClient = boto3.client('iam')
iamResponse = iamClient.create_user(
Path='/',
UserName='CloudOneWorkloadSecurityConnectorUser',
Tags=[
{
'Key': 'Owner',
'Value': 'TrendMicro'
},
{
'Key': 'Product',
'Value': 'CloudOneWorkloadSecurity'
},
{
"Key": "Name",
"Value": "CloudOneWorkloadSecurityConnectorUser"
}
]
)
iamPolicyResponse = iamClient.create_policy(
PolicyName='CloudOneWorkloadSecurityConnectorPolicy',
Path='/',
PolicyDocument='{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["ec2:DescribeInstances","ec2:DescribeImages","ec2:DescribeRegions","ec2:DescribeVpcs","ec2:DescribeSubnets","ec2:DescribeTags","workspaces:DescribeWorkspaces","workspaces:DescribeWorkspaceDirectories","workspaces:DescribeWorkspaceBundles"],"Resource":"*"}]}',
Description='Policy for the AWS Connector for Trend Micro Cloud One Workload Security'
)
iamClient.attach_user_policy(
UserName=iamResponse["User"]["UserName"],
PolicyArn=iamPolicyResponse["Policy"]["Arn"]
)
return iamResponse["User"]["UserName"]
except ClientError as err:
print("\n\nError: " + str(err))
print("\n\nExiting..\n\n")
return False
def createAccessKeyForIAMUser(username):
iamClient = boto3.client('iam')
iamResponse = iamClient.create_access_key(
UserName=username
)
return iamResponse["AccessKey"]["AccessKeyId"], iamResponse["AccessKey"]["SecretAccessKey"]
def getAwsAccessSecretKeys(data):
accessKey = ""
secretKey = ""
print("\n\t1. Create a new AWS User Access Key and Secret credentials\n\t2. Use an existing credentials from the local workspace\n\t3. Manually enter an Access and Secret Key")
option = input("\nChoose an option to get credentials for your AWS Account - ")
if option == "1":
username = createIAMUser()
if username:
accessKey, secretKey = createAccessKeyForIAMUser(username)
elif option == "2":
print("\n\tChecking for aws credentials/config file in the current user directory, if it exists...")
session = Session()
credentials = session.get_credentials()
# Credentials are refreshable, so accessing your access key / secret key
# separately can lead to a race condition. Use this to get an actual matched
# set.
current_credentials = credentials.get_frozen_credentials()
# I would not recommend actually printing these. Generally unsafe.
accessKey = current_credentials.access_key
secretKey = current_credentials.secret_key
if accessKey and secretKey:
print("\nLocal credentials accepted.")
elif option == "3":
accessKey = str(input("\n\tAWS Access Key : "))
secretKey = str(input("\n\tAWS Secret Key : "))
else:
print("\n\nError: Invalid choice input")
if accessKey and secretKey:
data.update({"accessKey": accessKey})
data.update({"secretKey": secretKey})
return data
else:
return ""
def postAwsConnector(data):
http = urllib3.PoolManager()
r = http.request("POST", configObj["dsmHost"] + "/api/awsconnectors", headers=headers, body=json.dumps(data))
if r.status == 200:
print("\n\nSuccess: AWS Connector created.")
print("\n\nExiting..\n\n")
else:
print(str(r.data))
def main():
print("\n\nCloud One Workload Security - AWS Connector Configurator tool\n==================================================================")
data = buildRequestBody()
option = selectConnectorOptions()
if option == "1":
if checkConfKeyExists("useInstanceRole"):
if not getConfigValue("useInstanceRole"):
confirmation = input("\nuseInstanceRole flag is set to false in config.json. Do you want to enable 'useInstanceRole'? [Y/n] - ")
if confirmation.lower() == "y":
data.update({"useInstanceRole": True})
else:
data = None
else:
print("\nNo 'useInstanceRole' flag mentioned in config.json")
data = None
elif option == "2":
if checkConfKeyExists("crossAccountRoleArn"):
data.update({"crossAccountRoleArn": getConfigValue("crossAccountRoleArn")})
else:
print("\nNo Cross-Account Access Role ARN mentioned in config.json")
data = None
elif option == "3":
data = getAwsAccessSecretKeys(data)
else:
print("\n\nInvalid choice. Try again.")
print("\n\nExiting..\n\n")
if data:
if not data["workspacesEnabled"]:
confirmation = input("\nAre you sure to proceed without connecting your AWS Workspaces to this connector? [Y/n] - ")
if confirmation.lower() == "n":
data["workspacesEnabled"] = True
else:
print("\nSkipping AWS Workspaces...")
postAwsConnector(data)
else:
print("\n\nError: Missing or incorrect data parameters used for the tool.")
print("\n\nExiting..\n\n")
if __name__ == "__main__":
main()
| 6,214 | 0 | 207 |
e5d1d3f73d7dc19875fc6ba4e0d15a2d3d40974b | 805 | py | Python | utils/audio/pc_text_to_voice.py | westoun/moneypenny | a6b4904e369a14b71a6fddab0bf2d5180229291b | [
"MIT"
] | 1 | 2020-09-14T18:15:32.000Z | 2020-09-14T18:15:32.000Z | utils/audio/pc_text_to_voice.py | westoun/moneypenny | a6b4904e369a14b71a6fddab0bf2d5180229291b | [
"MIT"
] | null | null | null | utils/audio/pc_text_to_voice.py | westoun/moneypenny | a6b4904e369a14b71a6fddab0bf2d5180229291b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Note: This file became necessary as a workaround
# since pyttsx3 is not thread-safe.
# See https://github.com/nateshmbhat/pyttsx3/issues/8
# for further details.
import pyttsx3
# id: 3 is french, 4 is german, 10 & 28 is english
import sys
text = str(sys.argv[1])
try:
language = str(sys.argv[2])
except:
language = "en"
if language == "de":
language_code = 4
elif language == "fr":
language_code = 3
else:
language_code = 10
engine = init_engine(language_code)
say(text)
| 19.166667 | 57 | 0.684472 | #!/usr/bin/env python3
# Note: This file became necessary as a workaround
# since pyttsx3 is not thread-safe.
# See https://github.com/nateshmbhat/pyttsx3/issues/8
# for further details.
import pyttsx3
# id: 3 is french, 4 is german, 10 & 28 is english
import sys
def init_engine(language_code):
engine = pyttsx3.init()
engine.setProperty('rate', 180)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[language_code].id)
return engine
def say(s):
engine.say(s)
engine.runAndWait() # blocks
text = str(sys.argv[1])
try:
language = str(sys.argv[2])
except:
language = "en"
if language == "de":
language_code = 4
elif language == "fr":
language_code = 3
else:
language_code = 10
engine = init_engine(language_code)
say(text)
| 234 | 0 | 46 |
bbfab1f3930c8cbd9b09e2c4f5e3ad08d28fbeb1 | 1,018 | py | Python | sso/user/migrations/0006_emailaddress.py | uktrade/staff-sso | c23da74415befdaed60649a9a940b1ba8331581e | [
"MIT"
] | 7 | 2018-07-30T16:18:52.000Z | 2022-03-21T12:58:20.000Z | sso/user/migrations/0006_emailaddress.py | uktrade/staff-sso | c23da74415befdaed60649a9a940b1ba8331581e | [
"MIT"
] | 55 | 2017-06-26T12:49:01.000Z | 2022-03-09T15:48:49.000Z | sso/user/migrations/0006_emailaddress.py | uktrade/staff-sso | c23da74415befdaed60649a9a940b1ba8331581e | [
"MIT"
] | 1 | 2020-05-28T07:17:26.000Z | 2020-05-28T07:17:26.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-22 17:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
| 27.513514 | 95 | 0.465619 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-22 17:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user", "0005_auto_20171106_1515"),
]
operations = [
migrations.CreateModel(
name="EmailAddress",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("email", models.EmailField(max_length=254, unique=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="emails",
to="user.User",
),
),
],
),
]
| 0 | 806 | 23 |
3e63de3cca4cc234c42f209a7094758193f8b21c | 4,128 | py | Python | tweets/api/views.py | Hassanzadeh-sd/tweetme | a25db991694ed1c44c76fbb1880fdc4837ea320b | [
"MIT"
] | 12 | 2019-05-24T11:06:17.000Z | 2021-05-11T15:57:52.000Z | tweets/api/views.py | Hassanzadeh-sd/tweetme | a25db991694ed1c44c76fbb1880fdc4837ea320b | [
"MIT"
] | 10 | 2020-02-12T00:20:48.000Z | 2022-03-11T23:48:26.000Z | tweets/api/views.py | Hassanzadeh-sd/tweetme | a25db991694ed1c44c76fbb1880fdc4837ea320b | [
"MIT"
] | null | null | null | from rest_framework import generics
from .serializers import TweetModelSerializer
from ..models import Tweet
from django.db.models import Q
from rest_framework import permissions
from .pagination import TweetsSetPagination
from rest_framework.views import APIView
from rest_framework.response import Response | 38.579439 | 95 | 0.659641 | from rest_framework import generics
from .serializers import TweetModelSerializer
from ..models import Tweet
from django.db.models import Q
from rest_framework import permissions
from .pagination import TweetsSetPagination
from rest_framework.views import APIView
from rest_framework.response import Response
class SearchTweetAPIView(generics.ListAPIView):
queryset = Tweet.objects.all().order_by("-timestamp")
serializer_class = TweetModelSerializer
pagination_class = TweetsSetPagination
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self, *args, **kwargs):
qs = self.queryset
query = self.request.GET.get("q",None)
if query is not None:
qs = qs.filter(
Q(content__contains=query) |
Q(user__username__contains=query)
)
return qs
def get_serializer_context(self , *args, **kwargs):
context = super(SearchTweetAPIView, self).get_serializer_context()
context['request'] = self.request
return context
class TweetListAPIView(generics.ListAPIView):
serializer_class = TweetModelSerializer
pagination_class = TweetsSetPagination
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self, *args, **kwargs):
requested_user = self.kwargs.get('username')
if (requested_user):
qs = Tweet.objects.filter(user__username=requested_user).order_by('-timestamp')
else:
im_following = self.request.user.profile.get_following()
qs1 = Tweet.objects.filter(user__in=im_following).order_by('-timestamp')
qs2 = Tweet.objects.filter(user=self.request.user)
qs = (qs1 | qs2)
query = self.request.GET.get("q",None)
if query is not None:
qs = qs.filter(
Q(content__contains=query) |
Q(user__username__contains=query)
)
return qs
def get_serializer_context(self , *args, **kwargs):
context = super(TweetListAPIView, self).get_serializer_context()
context['request'] = self.request
return context
class TweetCreateAPIView(generics.CreateAPIView):
serializer_class = TweetModelSerializer
permission_classes = [permissions.IsAuthenticated]
def perform_create(self ,serializer):
serializer.save(user=self.request.user)
class TweetDetailAPIView(generics.ListAPIView):
serializer_class = TweetModelSerializer
permission_classes = [permissions.AllowAny]
pagination_class = TweetsSetPagination
def get_queryset(self, *args, **kwargs):
tweet_id = self.kwargs.get('pk')
qs = Tweet.objects.filter(pk=tweet_id)
if qs.exists() and qs.count() ==1:
parent_obj = qs.first()
qs1 = parent_obj.get_children()
qs = (qs | qs1).distinct().extra(select={"parent_id_null":"parent_id IS NOT NULL"})
return qs.order_by("parent_id_null",'-timestamp')
class RetweetAPIView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, pk, format=None):
tweet_qs = Tweet.objects.filter(pk=pk)
message = "Not allowed"
if (tweet_qs.exists() and tweet_qs.count() == 1):
new_tweet = Tweet.objects.retweet(request.user, tweet_qs.first())
print(new_tweet)
if (new_tweet != tweet_qs.first()):
data = TweetModelSerializer(new_tweet).data
return Response(data)
message = "Cannot Retweet the same on day"
return Response({"message": message}, status=400)
class LikeAPIView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, pk, format=None):
tweet_qs = Tweet.objects.filter(pk=pk)
message = "Not allowed"
if (tweet_qs.exists() and tweet_qs.count() == 1):
is_like = Tweet.objects.liketoggle(request.user,tweet_qs.first())
return Response({'liked':is_like})
return Response({"message": message}, status=400) | 2,623 | 1,055 | 142 |
8f29d2f8083c474fd5766686a4e02cdca211bd6f | 6,689 | py | Python | Automatic Transfer Scripts/LexicalChange.py | lvyiwei1/StylePTB | 42c80a07b999501d741f0c5e71481e627b758e3c | [
"CC-BY-4.0"
] | 36 | 2021-04-13T06:56:44.000Z | 2022-03-23T16:35:09.000Z | Automatic Transfer Scripts/LexicalChange.py | lvyiwei1/StylePTB | 42c80a07b999501d741f0c5e71481e627b758e3c | [
"CC-BY-4.0"
] | 3 | 2021-04-17T08:54:07.000Z | 2022-03-12T21:35:50.000Z | Automatic Transfer Scripts/LexicalChange.py | lvyiwei1/StylePTB | 42c80a07b999501d741f0c5e71481e627b758e3c | [
"CC-BY-4.0"
] | 3 | 2021-05-17T11:24:25.000Z | 2022-03-10T07:52:40.000Z | from nltk.corpus import treebank
from nltk.tree import Tree
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
from nltk.corpus import wordnet as wn
import VerbMorph
import PTBdata
import pickle
#selects first synonym
import create2koriginal
import copy
if __name__ == "__main__":
f = open('../../dictionaries/synonym.dict','rb')
dict = pickle.load(f)
"""
for file in treebank.fileids():
for i in treebank.parsed_sents(file):
print(i)
ADJReplacement(i,dict)
print(i)
count += 1
if count == 5:
break
if count == 5:
break
"""
trees = PTBdata.getalltrees('ptb-train.txt')
trees.extend(PTBdata.getalltrees('ptb-test.txt'))
trees.extend(PTBdata.getalltrees('ptb-valid.txt'))
freqdict=makeFrequency(trees)
print(freqdictselector(dict['angry'],freqdict,1))
count=0
for tree in trees:
if len(tree.leaves()) < 5 or len(tree.leaves()) > 12 or not tree.label()[0] == 'S':
continue
j = copy.deepcopy(tree)
FrequencySynonymReplacement(j,dict,freqdict,1)
if tree.leaves()==j.leaves():
continue
count += 1
if count < 30:
pass
#print(tree.leaves())
#print(j.leaves())
print(count)
| 33.782828 | 115 | 0.517417 | from nltk.corpus import treebank
from nltk.tree import Tree
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
from nltk.corpus import wordnet as wn
import VerbMorph
import PTBdata
import pickle
#selects first synonym
def defaultselectfunc(l):
if len(l)>1:
return l[1]
return l[0]
def checkpos(word,pos):
syn = wn.synsets(word)
if len(syn) == 0:
return False
tmp = syn[0].pos()
return tmp == pos
def ADJReplacement(tree,dict,selectfunc=defaultselectfunc, limit = 3):
if limit == 0:
return 0
if isinstance(tree,Tree):
for i in tree:
if isinstance(i,Tree) and i.label() in ['JJ']:
lemma = wnl.lemmatize(i[0])
replace = lemma
if lemma in dict and len(dict[lemma])>0:
replace = selectfunc(dict[lemma])
if checkpos(replace,'a') and not i[0]==replace:
limit -= 1
i[0]=replace
else:
limit = ADJReplacement(i,dict,selectfunc,limit)
return limit
else:
return limit
def VerbReplacement(tree,dict,selectfunc=defaultselectfunc, limit = 3):
if limit == 0:
return 0
if isinstance(tree,Tree):
for i in tree:
if isinstance(i,Tree) and i.label() in ['VB','VBZ','VBD','VBN'] and i[0] not in ['have','has','had']:
lemma = wnl.lemmatize(i[0])
replace = lemma
if lemma in dict and not lemma == 'be' and len(dict[lemma])>0:
replace = selectfunc(dict[lemma])
if checkpos(replace,'v') and not i[0]==replace:
if i.label() == 'VBZ':
replace = VerbMorph.pluralverb(replace)
elif i.label() == 'VBD':
replace = VerbMorph.find_past(replace)
elif i.label() == 'VBN':
replace = VerbMorph.find_past_participle(replace)
limit -= 1
i[0]=replace
else:
limit = VerbReplacement(i,dict,selectfunc,limit)
return limit
else:
return limit
def NounReplacement(tree,dict,selectfunc=defaultselectfunc, limit = 3):
if limit == 0:
return 0
if isinstance(tree,Tree):
for i in tree:
if isinstance(i,Tree) and i.label() in ['NN','NNS']:
lemma = wnl.lemmatize(i[0])
replace = lemma
if lemma in dict and len(dict[lemma])>0:
replace = selectfunc(dict[lemma])
if checkpos(replace,'n') and not i[0]==replace:
if not lemma == i[0]:
replace = VerbMorph.pluralverb(replace)
limit -= 1
i[0]=replace
else:
limit = NounReplacement(i,dict,selectfunc,limit)
return limit
else:
return limit
def makeFrequency(trees):
freqdict = {}
for tree in trees:
for word in tree.leaves():
word = word.lower()
if word not in freqdict:
freqdict[word]=0
freqdict[word]+=1
return freqdict
def freqdictselector(words,freqdict,freqlvl):
for word in words:
if word not in freqdict:
freqdict[word]=0
sorter = lambda x : freqdict[x]
words.sort(key=sorter)
place = round(float(len(words)-1)/4.0*float(freqlvl-1),0)
return words[int(place)]
def FrequencySynonymReplacement(tree,dict,freqdict,freqlvl):
if isinstance(tree,Tree):
for i in tree:
if isinstance(i,Tree) and i.label() in ['NN','NNS']:
lemma = wnl.lemmatize(i[0].lower())
replace = lemma
if lemma in dict and len(dict[lemma])>0:
replaces = dict[lemma]
replace = freqdictselector(replaces,freqdict,freqlvl)
if checkpos(replace,'n') and not i[0]==replace:
if not lemma == i[0]:
replace = VerbMorph.pluralverb(replace)
i[0]=replace
elif isinstance(i,Tree) and i.label() in ['VB','VBZ','VBD','VBN'] and i[0] not in ['have','has','had']:
lemma = wnl.lemmatize(i[0].lower())
replace = lemma
if lemma in dict and not lemma == 'be' and len(dict[lemma])>0:
replaces = dict[lemma]
replace = freqdictselector(replaces, freqdict, freqlvl)
if checkpos(replace,'v') and not i[0]==replace:
if i.label() == 'VBZ':
replace = VerbMorph.pluralverb(replace)
elif i.label() == 'VBD':
replace = VerbMorph.find_past(replace)
elif i.label() == 'VBN':
replace = VerbMorph.find_past_participle(replace)
i[0]=replace
elif isinstance(i,Tree) and i.label() in ['JJ']:
lemma = wnl.lemmatize(i[0].lower())
replace = lemma
if lemma in dict and len(dict[lemma])>0:
replaces = dict[lemma]
replace = freqdictselector(replaces, freqdict, freqlvl)
if checkpos(replace,'a') and not i[0]==replace:
i[0]=replace
else:
FrequencySynonymReplacement(i,dict,freqdict,freqlvl)
import create2koriginal
import copy
if __name__ == "__main__":
f = open('../../dictionaries/synonym.dict','rb')
dict = pickle.load(f)
"""
for file in treebank.fileids():
for i in treebank.parsed_sents(file):
print(i)
ADJReplacement(i,dict)
print(i)
count += 1
if count == 5:
break
if count == 5:
break
"""
trees = PTBdata.getalltrees('ptb-train.txt')
trees.extend(PTBdata.getalltrees('ptb-test.txt'))
trees.extend(PTBdata.getalltrees('ptb-valid.txt'))
freqdict=makeFrequency(trees)
print(freqdictselector(dict['angry'],freqdict,1))
count=0
for tree in trees:
if len(tree.leaves()) < 5 or len(tree.leaves()) > 12 or not tree.label()[0] == 'S':
continue
j = copy.deepcopy(tree)
FrequencySynonymReplacement(j,dict,freqdict,1)
if tree.leaves()==j.leaves():
continue
count += 1
if count < 30:
pass
#print(tree.leaves())
#print(j.leaves())
print(count)
| 5,142 | 0 | 183 |
ba107ddc61295d645875a7f271eedaa4181c5057 | 1,164 | py | Python | MCapp/filewatcher.py | magnarch/dissertation | 68b56554dd5a14e97ff1c0338e2d0ae7309fde1c | [
"MIT"
] | null | null | null | MCapp/filewatcher.py | magnarch/dissertation | 68b56554dd5a14e97ff1c0338e2d0ae7309fde1c | [
"MIT"
] | 2 | 2020-02-12T00:20:40.000Z | 2020-06-05T20:57:42.000Z | MCapp/filewatcher.py | magnarch/minicooper | 68b56554dd5a14e97ff1c0338e2d0ae7309fde1c | [
"MIT"
] | null | null | null | import sys
import os
import json
import time
import post_to_server
import argparse
if __name__ == "__main__":
main(sys.argv[1:])
| 35.272727 | 177 | 0.668385 | import sys
import os
import json
import time
import post_to_server
import argparse
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--folder', default='./')
parser.add_argument('--IP', default='127.0.0.1:8000')
args = parser.parse_args(argv)
args_dict = vars(args)
WATCHED_FOLDER = args_dict['folder']
IP_SERVER = args_dict['IP']
while True:
time.sleep(1)
filename_list = [f for f in os.listdir(WATCHED_FOLDER) if f.endswith('.pdf')] # get all PDF files in the watched folder in a list (of string representing the file names)
for filename in filename_list:
# remove the PDF file from the folder only if a template was found by the server
if post_to_server.post(WATCHED_FOLDER, filename, IP_SERVER) != -1: # upload them to the database
# remove the filename from the list of files in the directory, and remove the file from the directory
os.remove(WATCHED_FOLDER +'/'+ filename)
filename_list.remove(filename)
else:
print("program could not deliver")
if __name__ == "__main__":
main(sys.argv[1:])
| 1,007 | 0 | 23 |
347f4d56881a85774c72116ebe61ca227fd39bd8 | 1,848 | py | Python | Trigger_azure/fonctions/fonctions.py | Nico34000/cloud_library_nicolas | 7b762a705baeb19208e1448f11df88f03e4d265e | [
"MIT"
] | null | null | null | Trigger_azure/fonctions/fonctions.py | Nico34000/cloud_library_nicolas | 7b762a705baeb19208e1448f11df88f03e4d265e | [
"MIT"
] | null | null | null | Trigger_azure/fonctions/fonctions.py | Nico34000/cloud_library_nicolas | 7b762a705baeb19208e1448f11df88f03e4d265e | [
"MIT"
] | null | null | null | import os
import mysql.connector
import json
from jinja2 import Template
config_sql = {
'host':os.environ['HOST_SQL_AZURE'],
'user':os.environ['USER_SQL_AZURE'],
'password':os.environ['PASSWORD_SQL_AZURE'],
'database':os.environ['DATABASE_SQL_AZURE'],
'client_flags': [mysql.connector.ClientFlag.SSL],
'ssl_ca': os.environ["SSL_CA_SQL_AZURE"]}
conn = mysql.connector.connect(**config_sql)
cursor = conn.cursor(dictionary=True)
| 22 | 57 | 0.637987 | import os
import mysql.connector
import json
from jinja2 import Template
config_sql = {
'host':os.environ['HOST_SQL_AZURE'],
'user':os.environ['USER_SQL_AZURE'],
'password':os.environ['PASSWORD_SQL_AZURE'],
'database':os.environ['DATABASE_SQL_AZURE'],
'client_flags': [mysql.connector.ClientFlag.SSL],
'ssl_ca': os.environ["SSL_CA_SQL_AZURE"]}
conn = mysql.connector.connect(**config_sql)
cursor = conn.cursor(dictionary=True)
def jinja_list_book():
books= book()
with open("list_book.html") as file_:
template = Template(file_.read())
result = template.render(books=books)
return result
def book():
request = cursor.execute("""SELECT titre
From library""")
result = cursor.fetchall()
res =[]
for row in result:
res.append(row["titre"])
# return json.dumps(result)
return res
def list_book():
request = cursor.execute("""SELECT titre
From library""")
result = cursor.fetchall()
res =[]
for row in result:
res.append(row["titre"])
return json.dumps(result)
def index():
html = open("index.html", "r").read()
return html
def info_book(titre):
cursor.execute("""SELECT info,urlblob
From library where titre = %s """,(titre,))
result = cursor.fetchall()
res= []
for row in result:
res.append(row['info'])
for info in res:
return info.split(',')
def jinja_info(name):
info = info_book(name)
name=name
url = url_book(name)
with open("info.html") as file_:
template = Template(file_.read())
result = template.render(info=info,name=name,url=url)
return result
def url_book(titre):
cursor.execute("""SELECT urlblob
From library where titre = %s """,(titre,))
result = cursor.fetchone()
return str(result['urlblob'])
| 1,229 | 0 | 161 |
b09e2ec0375a04f7082446388ed3ad8f60874111 | 1,134 | py | Python | detect_fraud_email_enron/tools/utils.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | detect_fraud_email_enron/tools/utils.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | detect_fraud_email_enron/tools/utils.py | gotamist/other_machine_learning | 70c7f5367ed5cf9b6fd4818cda16add24a2b468d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 08:55:11 2018
@author: gotamist
"""
def clean_and_move_up_poi(df):
''' Make 'poi' the first feature, remove lines having just nans
'''
import pandas as pd
import numpy as np
f_list = list(df)
x_list = f_list #I need the x_list later. Not just to move up the poi
poi_series = df[ 'poi' ]
poi_series = poi_series.astype('int')
poi_df = poi_series.to_frame()
x_list.remove('poi')
x_list.remove('email_address')
f_list = [ 'poi' ]+x_list
df = df.loc[:, x_list]
df=df.replace('NaN', np.nan)
df=df.dropna( how ='all')
df = poi_df.join( df, how = 'right') #if not dropping NaN here, right or left join does not matter
return df
| 28.35 | 105 | 0.584656 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 08:55:11 2018
@author: gotamist
"""
def clean_and_move_up_poi(df):
''' Make 'poi' the first feature, remove lines having just nans
'''
import pandas as pd
import numpy as np
f_list = list(df)
x_list = f_list #I need the x_list later. Not just to move up the poi
poi_series = df[ 'poi' ]
poi_series = poi_series.astype('int')
poi_df = poi_series.to_frame()
x_list.remove('poi')
x_list.remove('email_address')
f_list = [ 'poi' ]+x_list
df = df.loc[:, x_list]
df=df.replace('NaN', np.nan)
df=df.dropna( how ='all')
df = poi_df.join( df, how = 'right') #if not dropping NaN here, right or left join does not matter
return df
def scale_features(df, col_list):
import numpy as np
for col in col_list:
maxim = np.max( df[ col ] )
minim = np.min( df[ col ] )
if maxim==minim:
print("no variation in feature ", col), ". Drop it!"
return df
else:
df[ col ] = ( df[ col ] - minim ) / (maxim - minim)
return df
| 338 | 0 | 23 |
3001afde61940e88f304032ce4e921465dc94f47 | 1,004 | py | Python | tests/test_activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 4 | 2021-01-03T07:40:01.000Z | 2021-09-03T09:21:02.000Z | tests/test_activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 5 | 2020-09-03T17:28:13.000Z | 2021-10-04T22:47:23.000Z | tests/test_activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 4 | 2021-01-07T05:30:49.000Z | 2021-09-13T08:08:54.000Z | import vcr
from copper_sdk.activities import Activities
@vcr.use_cassette('tests/vcr_cassettes/activities-list.yml', filter_headers=['X-PW-AccessToken', 'X-PW-UserEmail'])
def test_activities_list(copper):
'''Test list activities'''
response = copper.activities().list({
'page_size': 10,
})
assert isinstance(response, list)
assert isinstance(response[0], dict)
assert len(response) == 10
# @vcr.use_cassette('tests/vcr_cassettes/lead-activities.yml', filter_headers=['X-PW-AccessToken', 'X-PW-UserEmail'])
# def test_leads_activities(copper):
# '''Test getting activities from a lead'''
#
# # get a lead id
# response = copper.leads().list({
# 'page_size': 1,
# })
# lead_id = response[0]['id']
#
# # get activity for the lead
# response = copper.leads().activities(lead_id)
#
# assert isinstance(response, list)
#
# # Cannot guarentee a result
# # assert isinstance(response[0], dict)
# # assert len(response) == 1
| 30.424242 | 117 | 0.663347 | import vcr
from copper_sdk.activities import Activities
@vcr.use_cassette('tests/vcr_cassettes/activities-list.yml', filter_headers=['X-PW-AccessToken', 'X-PW-UserEmail'])
def test_activities_list(copper):
'''Test list activities'''
response = copper.activities().list({
'page_size': 10,
})
assert isinstance(response, list)
assert isinstance(response[0], dict)
assert len(response) == 10
# @vcr.use_cassette('tests/vcr_cassettes/lead-activities.yml', filter_headers=['X-PW-AccessToken', 'X-PW-UserEmail'])
# def test_leads_activities(copper):
# '''Test getting activities from a lead'''
#
# # get a lead id
# response = copper.leads().list({
# 'page_size': 1,
# })
# lead_id = response[0]['id']
#
# # get activity for the lead
# response = copper.leads().activities(lead_id)
#
# assert isinstance(response, list)
#
# # Cannot guarentee a result
# # assert isinstance(response[0], dict)
# # assert len(response) == 1
| 0 | 0 | 0 |
de4f8531f4d2faed22bd859f32d4905d82fa2e15 | 8,734 | py | Python | bidaf/data_loader.py | qianyingw/bioqa | 0511597e3b1cd6eaafe893677ed601c7303befef | [
"MIT"
] | null | null | null | bidaf/data_loader.py | qianyingw/bioqa | 0511597e3b1cd6eaafe893677ed601c7303befef | [
"MIT"
] | null | null | null | bidaf/data_loader.py | qianyingw/bioqa | 0511597e3b1cd6eaafe893677ed601c7303befef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 18:47:55 2020
@author: qwang
"""
import os
import json
import torch
from torchtext import data
import torchtext.vocab as vocab
#%% Process for qanet only
#%%
# %% Instance
# args = {
# 'batch_size': 32,
# 'max_vocab_size': 30000,
# 'min_occur_freq': 0,
# 'embed_path': '/media/mynewdrive/rob/wordvec/wikipedia-pubmed-and-PMC-w2v.txt',
# # 'data_path': "/media/mynewdrive/bioqa/mnd/intervention/MND-Intervention-1983-06Aug20.json"
# # 'data_path': "/media/mynewdrive/bioqa/PsyCIPN-II-796-factoid-20s-02112020.json"
# 'data_path': "/media/mynewdrive/bioqa/PsyCIPN-II-1984-30s-20012021.json"
# }
# # BaseIter = BaselineIterators(args)
# import helper.helper_psci as helper_psci
# BaseIter.process_data(process_fn = helper_psci.process_for_baseline, model='bidaf') # 8mins
# train_data, valid_data, test_data = BaseIter.create_data()
# train_iter, valid_iter, test_iter = BaseIter.create_iterators(train_data, valid_data, test_data)
# BaseIter.load_embedding().stoi['set'] # 347
# BaseIter.load_embedding().stoi['Set'] # 11912
# BaseIter.load_embedding().stoi['SET'] # 32073
# BaseIter.TEXT.vocab.itos[:12] # ['<unk>', '<pad>', ',', 'the', 'of', 'in', '.', 'and', ')', '(', 'to', 'a']
# BaseIter.TEXT.vocab.itos[-4:] # ['~30o', '~Ctrl', '~nd', '~uced']
# BaseIter.TEXT.pad_token # '<pad>'
# BaseIter.TEXT.unk_token # '<unk>'
# BaseIter.TEXT.vocab.stoi[BaseIter.TEXT.pad_token] # 1
# BaseIter.TEXT.vocab.stoi[BaseIter.TEXT.unk_token] # 0
# BaseIter.TEXT.vocab.vectors.shape # [26940, 200] / [20851, 200]
# count = 0
# for batch in valid_iter:
# if count < 20:
# print(batch.context.shape) # [batch_size, context_len]
# count += 1
# count = 0
# for batch in valid_iter:
# if count < 8:
# print("=======================")
# print(batch.context.shape) # [batch_size, context_len]
# print(batch.question.shape) # [batch_size, question_len]
# # print(batch.y1s)
# # print(batch.y2s)
# print(len(batch.y1s))
# # print(batch.y1s.shape)
# # print(batch.context[0,:].shape)
# # print(batch.context[1,:].shape)
# # print(batch.context[-1,:].shape)
# count += 1
# b = next(iter(train_iter))
# vars(b).keys() # dict_keys(['batch_size', 'dataset', 'fields', 'input_fields', 'target_fields', 'id', 'question', 'context', 'y1s', 'y2s'])
| 38.817778 | 142 | 0.549347 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 18:47:55 2020
@author: qwang
"""
import os
import json
import torch
from torchtext import data
import torchtext.vocab as vocab
#%% Process for qanet only
def pad_tokens(tokens, max_len):
if len(tokens) <= max_len:
tokens = tokens + ['<pad>']*(max_len-len(tokens))
else:
tokens = tokens[:max_len]
return tokens
def correct_ys(y, max_len):
if y >= max_len:
y = -999
return y
def qanet_process(dat_processed, max_clen, max_qlen):
for i, _ in enumerate(dat_processed):
dat_processed[i]['context_tokens'] = pad_tokens(dat_processed[i]['context_tokens'], max_len=max_clen)
dat_processed[i]['ques_tokens'] = pad_tokens(dat_processed[i]['ques_tokens'], max_len=max_qlen)
dat_processed[i]['y1s'] = correct_ys(dat_processed[i]['y1s'], max_len=max_clen)
dat_processed[i]['y2s'] = correct_ys(dat_processed[i]['y2s'], max_len=max_clen)
return dat_processed
#%%
class BaselineIterators(object):
def __init__(self, args):
self.args = args
self.ID = data.RawField()
self.PID = data.RawField()
self.TEXT = data.Field(batch_first=True)
self.POSITION = data.RawField()
def process_data(self, process_fn, model='bidaf', max_clen=None, max_qlen=None):
with open(self.args['data_path']) as fin:
dat = json.load(fin)
data_dir = os.path.dirname(self.args['data_path'])
# PsyCIPN data
if os.path.basename(self.args['data_path']).split('-')[0] == 'PsyCIPN':
dat_train, dat_valid, dat_test = [], [], []
for ls in dat:
if ls['group'] == 'train':
dat_train.append(ls)
elif ls['group'] == 'valid':
dat_valid.append(ls)
else:
dat_test.append(ls)
train_processed = process_fn(dat_train)
valid_processed = process_fn(dat_valid)
test_processed = process_fn(dat_test)
# MND data
if os.path.basename(self.args['data_path']).split('-')[0] == 'MND':
train_processed = process_fn(dat['train'])
valid_processed = process_fn(dat['valid'])
test_processed = process_fn(dat['test'])
# Pading over batches and correct y1s/y2s to -999 if answers are in the truncated text(qanet only)
if model == 'qanet':
train_processed = qanet_process(train_processed, max_clen, max_qlen)
valid_processed = qanet_process(valid_processed, max_clen, max_qlen)
test_processed = qanet_process(test_processed, max_clen, max_qlen)
# Write to train/valid/test json
with open(os.path.join(data_dir, 'train.json'), 'w') as fout:
for ls in train_processed:
fout.write(json.dumps(ls) + '\n')
with open(os.path.join(data_dir, 'valid.json'), 'w') as fout:
for ls in valid_processed:
fout.write(json.dumps(ls) + '\n')
with open(os.path.join(data_dir, 'test.json'), 'w') as fout:
for ls in test_processed:
fout.write(json.dumps(ls) + '\n')
def create_data(self):
# If a Field is shared between two columns in a dataset (e.g., question/answer in a QA dataset),
# then they will have a shared vocabulary.
fields = {'id': ('id', self.ID),
'ques_tokens': ('question', self.TEXT),
'context_tokens': ('context', self.TEXT),
'y1s': ('y1s', self.POSITION),
'y2s': ('y2s', self.POSITION)}
# PsyCIPN data
if os.path.basename(self.args['data_path']).split('-')[0] == 'PsyCIPN':
fields['pubId'] = ('pid', self.PID)
dir_path = os.path.dirname(self.args['data_path'])
assert os.path.exists(dir_path), "Path not exist!"
train_data, valid_data, test_data = data.TabularDataset.splits(path = dir_path,
train = 'train.json',
validation = 'valid.json',
test = 'test.json',
format = 'json',
fields = fields)
return train_data, valid_data, test_data
def load_embedding(self):
embed_path = self.args['embed_path']
custom_embedding = vocab.Vectors(name = os.path.basename(embed_path),
cache = os.path.dirname(embed_path))
return custom_embedding
def build_vocabulary(self, train_data, valid_data, test_data):
# self.ID.build_vocab(train_data) # can't build vocab for RawField
# self.POSITION.build_vocab(train_data)
self.TEXT.build_vocab(train_data, valid_data,
max_size = self.args['max_vocab_size'],
min_freq = self.args['min_occur_freq'],
vectors = self.load_embedding(),
unk_init = torch.Tensor.normal_)
def create_iterators(self, train_data, valid_data, test_data):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.build_vocabulary(train_data, valid_data, test_data)
train_iterator = data.BucketIterator(
train_data,
sort = True,
sort_key = lambda x: len(x.context),
shuffle = True,
batch_size = self.args['batch_size'],
device = device
)
valid_iterator, test_iterator = data.BucketIterator.splits(
(valid_data, test_data),
sort = False,
shuffle = False,
batch_size = self.args['batch_size'],
device = device
)
return train_iterator, valid_iterator, test_iterator
# %% Instance
# args = {
# 'batch_size': 32,
# 'max_vocab_size': 30000,
# 'min_occur_freq': 0,
# 'embed_path': '/media/mynewdrive/rob/wordvec/wikipedia-pubmed-and-PMC-w2v.txt',
# # 'data_path': "/media/mynewdrive/bioqa/mnd/intervention/MND-Intervention-1983-06Aug20.json"
# # 'data_path': "/media/mynewdrive/bioqa/PsyCIPN-II-796-factoid-20s-02112020.json"
# 'data_path': "/media/mynewdrive/bioqa/PsyCIPN-II-1984-30s-20012021.json"
# }
# # BaseIter = BaselineIterators(args)
# import helper.helper_psci as helper_psci
# BaseIter.process_data(process_fn = helper_psci.process_for_baseline, model='bidaf') # 8mins
# train_data, valid_data, test_data = BaseIter.create_data()
# train_iter, valid_iter, test_iter = BaseIter.create_iterators(train_data, valid_data, test_data)
# BaseIter.load_embedding().stoi['set'] # 347
# BaseIter.load_embedding().stoi['Set'] # 11912
# BaseIter.load_embedding().stoi['SET'] # 32073
# BaseIter.TEXT.vocab.itos[:12] # ['<unk>', '<pad>', ',', 'the', 'of', 'in', '.', 'and', ')', '(', 'to', 'a']
# BaseIter.TEXT.vocab.itos[-4:] # ['~30o', '~Ctrl', '~nd', '~uced']
# BaseIter.TEXT.pad_token # '<pad>'
# BaseIter.TEXT.unk_token # '<unk>'
# BaseIter.TEXT.vocab.stoi[BaseIter.TEXT.pad_token] # 1
# BaseIter.TEXT.vocab.stoi[BaseIter.TEXT.unk_token] # 0
# BaseIter.TEXT.vocab.vectors.shape # [26940, 200] / [20851, 200]
# count = 0
# for batch in valid_iter:
# if count < 20:
# print(batch.context.shape) # [batch_size, context_len]
# count += 1
# count = 0
# for batch in valid_iter:
# if count < 8:
# print("=======================")
# print(batch.context.shape) # [batch_size, context_len]
# print(batch.question.shape) # [batch_size, question_len]
# # print(batch.y1s)
# # print(batch.y2s)
# print(len(batch.y1s))
# # print(batch.y1s.shape)
# # print(batch.context[0,:].shape)
# # print(batch.context[1,:].shape)
# # print(batch.context[-1,:].shape)
# count += 1
# b = next(iter(train_iter))
# vars(b).keys() # dict_keys(['batch_size', 'dataset', 'fields', 'input_fields', 'target_fields', 'id', 'question', 'context', 'y1s', 'y2s'])
| 5,888 | 11 | 284 |
efb76aa94d086ff0f04ad3596a8456c94adf0807 | 158 | py | Python | myapp/main.py | roedebaron/python-for-android | e57aff57aa538160178517716959f817e16b7da1 | [
"MIT"
] | null | null | null | myapp/main.py | roedebaron/python-for-android | e57aff57aa538160178517716959f817e16b7da1 | [
"MIT"
] | null | null | null | myapp/main.py | roedebaron/python-for-android | e57aff57aa538160178517716959f817e16b7da1 | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.uix.button import Button
MainApp().run() | 19.75 | 37 | 0.708861 | from kivy.app import App
from kivy.uix.button import Button
class MainApp(App):
def build(self):
return Button(text="Hello World")
MainApp().run() | 33 | -2 | 47 |
7558f040dfe8e456d2fde328a7b5069c490f2fd0 | 1,622 | py | Python | preprocessing/datetime.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | 2 | 2019-04-26T19:40:31.000Z | 2019-10-12T15:18:29.000Z | preprocessing/datetime.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | null | null | null | preprocessing/datetime.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = [
'DatetimeConverter1D',
'DatetimeConverter',
'CyclicEncoder',
]
class CyclicEncoder(BaseEstimator, TransformerMixin):
"""Cyclic Encoder
Convert x to the [cos(2*pi*t), sin(2*pi*t)] pair, where t is
pre-normalized x: t = (x - min[x])/(max[x] - min[x] + delta)
Parameters
----------
delta : float
Distance between maximum and minimum "angle"
"""
| 22.527778 | 72 | 0.586313 | import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = [
'DatetimeConverter1D',
'DatetimeConverter',
'CyclicEncoder',
]
class DatetimeConverter1D(BaseEstimator, TransformerMixin):
def __init__(self, **params):
self.params = params
def fit(self, x, y=None):
return self
def transform(self, x):
return pd.to_datetime(x, **self.params)
class DatetimeConverter(BaseEstimator, TransformerMixin):
def __init__(self, copy=True, **params):
self.params = params
self.copy = copy
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy() if self.copy else X
for col in X:
X[col] = pd.to_datetime(X[col], **self.params)
return X
class CyclicEncoder(BaseEstimator, TransformerMixin):
"""Cyclic Encoder
Convert x to the [cos(2*pi*t), sin(2*pi*t)] pair, where t is
pre-normalized x: t = (x - min[x])/(max[x] - min[x] + delta)
Parameters
----------
delta : float
Distance between maximum and minimum "angle"
"""
def __init__(self, delta=1):
self.delta = delta # max..min distance
def fit(self, X, y=None):
self.min_ = X.min()
self.max_ = X.max()
return self
def transform(self, X):
X = (X - self.min_)/(self.max_ - self.min_ + self.delta)
return pd.concat([np.cos(X).rename(lambda x: x+'_cos', axis=1),
np.sin(X).rename(lambda x: x+'_sin', axis=1)],
axis=1).sort_index(axis=1)
| 751 | 74 | 286 |
9a273354a19a34cfa2a33481c207eb60e028b4f5 | 3,999 | py | Python | source/world.py | kchevali/Localization | f9abf470bef37016518406c8b65a8f5edf7c62e8 | [
"MIT"
] | null | null | null | source/world.py | kchevali/Localization | f9abf470bef37016518406c8b65a8f5edf7c62e8 | [
"MIT"
] | null | null | null | source/world.py | kchevali/Localization | f9abf470bef37016518406c8b65a8f5edf7c62e8 | [
"MIT"
] | null | null | null | from node import Node
from random import randint
from math import sqrt, pi, e
import pygame as pg
if __name__ == '__main__':
pass
| 31.242188 | 121 | 0.493373 | from node import Node
from random import randint
from math import sqrt, pi, e
import pygame as pg
class World:
def __init__(self, width, height, blockSize, transmitRange, agentCount, anchorCount, errDist):
pg.init()
pg.display.set_caption("Localization Sim")
self.width = width
self.height = height
self.blockSize = blockSize
self.screen = pg.display.set_mode(
(self.width * self.blockSize, self.height * self.blockSize))
self.nodes = []
self.transmitRange = transmitRange
self.errDist = errDist
for _ in range(agentCount):
self.addNode(False)
for _ in range(anchorCount):
self.addNode(True)
self.updateFixedStatus()
self.frame = 0
def addNode(self, isAnchor):
ratio = 0.4
dx = int(self.width * ratio)
dy = int(self.height * ratio)
x = self.width // 2 + randint(-dx, dx)
y = self.height // 2 + randint(-dy, dy)
a = Node(x, y, isAnchor, self)
for b in self.nodes:
if a.isClose(b) and (not a.isAnchor or not b.isAnchor):
a.addAdj(b)
b.addAdj(a)
self.nodes.append(a)
def err(self, x):
return (e**(-x * x / (2 * self.errDist))) / sqrt(2 * pi * self.errDist)
def updateFixedStatus(self):
isDone = False
while not isDone:
isDone = True
for node in self.nodes:
if node.updateFixed():
isDone = False
def setProbGrid(self):
self.prob = [[0.0 for _ in range(self.width - 1)]
for _ in range(self.height - 1)]
# for a in self.nodes:
a = self.nodes[self.frame % len(self.nodes)]
fail = 0
while a.isAnchor or a.fixedCount == 0:
fail += 1
if fail >= len(self.nodes):
print("Fail")
exit()
self.frame += 1
a = self.nodes[self.frame % len(self.nodes)]
a.prob = [[1.0 for _ in range(self.width - 1)]
for _ in range(self.height - 1)]
for b in a.adj:
if(b.isFixed()):
a.multProbGrid(b)
self.maxProbGrid(a.prob)
self.normalize(self.prob)
def maxProbGrid(self, grid):
# print("BEGIN")
for y in range(self.height - 1):
for x in range(self.width - 1):
self.prob[y][x] = max(self.prob[y][x], grid[y][x])
# print(x, y, self.prob[y][x])
def normalize(self, grid):
maxValue = 0
for y in range(self.height - 1):
for x in range(self.width - 1):
maxValue = max(grid[y][x], maxValue)
if maxValue > 0:
for y in range(self.height - 1):
for x in range(self.width - 1):
grid[y][x] /= maxValue
def display(self):
for y in range(self.height - 1):
for x in range(self.width - 1):
pg.draw.circle(self.screen, (0, 255 * self.prob[y][x], 0), ((x + 1) *
self.blockSize, (y + 1) * self.blockSize), 2)
for a in self.nodes:
for b in a.adj:
pg.draw.line(self.screen, (255, 255, 255),
(a.x * self.blockSize, a.y * self.blockSize), (b.x * self.blockSize, b.y * self.blockSize))
for node in self.nodes:
node.display()
def run(self):
clock = pg.time.Clock()
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
self.setProbGrid()
self.display()
pg.display.update()
clock.tick(1)
self.frame += 1
if(self.frame >= 20):
done = True
pg.quit()
if __name__ == '__main__':
pass
| 3,606 | -9 | 265 |
924bde2d147f9329b460942e869afbae2d63f4f7 | 7,748 | py | Python | google_compute_engine/metadata_scripts/script_retriever.py | jrw972/compute-image-packages | f5b2ae581c4bb2d02d4d86918a27baa81dd30861 | [
"Apache-2.0"
] | null | null | null | google_compute_engine/metadata_scripts/script_retriever.py | jrw972/compute-image-packages | f5b2ae581c4bb2d02d4d86918a27baa81dd30861 | [
"Apache-2.0"
] | 2 | 2018-06-10T18:10:31.000Z | 2018-06-29T13:10:15.000Z | google_compute_engine/metadata_scripts/script_retriever.py | jrw972/compute-image-packages | f5b2ae581c4bb2d02d4d86918a27baa81dd30861 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve and store user provided metadata scripts."""
import re
import socket
import subprocess
import tempfile
from google_compute_engine import metadata_watcher
from google_compute_engine.compat import httpclient
from google_compute_engine.compat import urlerror
from google_compute_engine.compat import urlretrieve
class ScriptRetriever(object):
"""A class for retrieving and storing user provided metadata scripts."""
def __init__(self, logger, script_type):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the metadata script type to run.
"""
self.logger = logger
self.script_type = script_type
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
def _DownloadGsUrl(self, url, dest_dir):
"""Download a Google Storage URL using gsutil.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
try:
subprocess.check_call(
['which', 'gsutil'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
self.logger.warning(
'gsutil is not installed, cannot download items from Google Storage.')
return None
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s using gsutil.', url, dest)
try:
subprocess.check_call(['gsutil', 'cp', url, dest])
return dest
except subprocess.CalledProcessError as e:
self.logger.warning(
'Could not download %s using gsutil. %s.', url, str(e))
except Exception as e:
self.logger.warning(
'Exception downloading %s using gsutil. %s.', url, str(e))
return None
def _DownloadUrl(self, url, dest_dir):
"""Download a script from a given URL.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s.', url, dest)
try:
urlretrieve.urlretrieve(url, dest)
return dest
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
except Exception as e:
self.logger.warning('Exception downloading %s. %s.', url, str(e))
return None
def _DownloadScript(self, url, dest_dir):
"""Download the contents of the URL to the destination.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
# Check for the preferred Google Storage URL format:
# gs://<bucket>/<object>
if url.startswith(r'gs://'):
return self._DownloadGsUrl(url, dest_dir)
header = r'http[s]?://'
domain = r'storage\.googleapis\.com'
# Many of the Google Storage URLs are supported below.
# It is prefered that customers specify their object using
# its gs://<bucket>/<object> url.
bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'
# Accept any non-empty string that doesn't contain a wildcard character
# gsutil interprets some characters as wildcards.
# These characters in object names make it difficult or impossible
# to perform various wildcard operations using gsutil
# For a complete list use "gsutil help naming".
obj = r'(?P<obj>[^\*\?]+)'
# Check for the Google Storage URLs:
# http://<bucket>.storage.googleapis.com/<object>
# https://<bucket>.storage.googleapis.com/<object>
gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj))
match = gs_regex.match(url)
if match:
gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
# In case gsutil is not installed, continue as a normal URL.
return (self._DownloadGsUrl(gs_url, dest_dir) or
self._DownloadUrl(url, dest_dir))
# Check for the other possible Google Storage URLs:
# http://storage.googleapis.com/<bucket>/<object>
# https://storage.googleapis.com/<bucket>/<object>
#
# The following are deprecated but checked:
# http://commondatastorage.googleapis.com/<bucket>/<object>
# https://commondatastorage.googleapis.com/<bucket>/<object>
gs_regex = re.compile(
r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj))
match = gs_regex.match(url)
if match:
gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
# In case gsutil is not installed, continue as a normal URL.
return (self._DownloadGsUrl(gs_url, dest_dir) or
self._DownloadUrl(url, dest_dir))
# Unauthenticated download of the object.
return self._DownloadUrl(url, dest_dir)
def _GetAttributeScripts(self, attribute_data, dest_dir):
"""Retrieve the scripts from attribute metadata.
Args:
attribute_data: dict, the contents of the attributes metadata.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping metadata keys to files storing scripts.
"""
script_dict = {}
attribute_data = attribute_data or {}
metadata_key = '%s-script' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.' % metadata_key)
with tempfile.NamedTemporaryFile(
mode='w', dir=dest_dir, delete=False) as dest:
dest.write(metadata_value.lstrip())
script_dict[metadata_key] = dest.name
metadata_key = '%s-script-url' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.' % metadata_key)
script_dict[metadata_key] = self._DownloadScript(metadata_value, dest_dir)
return script_dict
def GetScripts(self, dest_dir):
"""Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts.
"""
metadata_dict = self.watcher.GetMetadata() or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = None
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = None
self.logger.warning('Project attributes were not found.')
return (self._GetAttributeScripts(instance_data, dest_dir) or
self._GetAttributeScripts(project_data, dest_dir))
| 36.037209 | 80 | 0.688694 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieve and store user provided metadata scripts."""
import re
import socket
import subprocess
import tempfile
from google_compute_engine import metadata_watcher
from google_compute_engine.compat import httpclient
from google_compute_engine.compat import urlerror
from google_compute_engine.compat import urlretrieve
class ScriptRetriever(object):
"""A class for retrieving and storing user provided metadata scripts."""
def __init__(self, logger, script_type):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the metadata script type to run.
"""
self.logger = logger
self.script_type = script_type
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
def _DownloadGsUrl(self, url, dest_dir):
"""Download a Google Storage URL using gsutil.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
try:
subprocess.check_call(
['which', 'gsutil'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
self.logger.warning(
'gsutil is not installed, cannot download items from Google Storage.')
return None
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s using gsutil.', url, dest)
try:
subprocess.check_call(['gsutil', 'cp', url, dest])
return dest
except subprocess.CalledProcessError as e:
self.logger.warning(
'Could not download %s using gsutil. %s.', url, str(e))
except Exception as e:
self.logger.warning(
'Exception downloading %s using gsutil. %s.', url, str(e))
return None
def _DownloadUrl(self, url, dest_dir):
"""Download a script from a given URL.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s.', url, dest)
try:
urlretrieve.urlretrieve(url, dest)
return dest
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
except Exception as e:
self.logger.warning('Exception downloading %s. %s.', url, str(e))
return None
def _DownloadScript(self, url, dest_dir):
"""Download the contents of the URL to the destination.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
"""
# Check for the preferred Google Storage URL format:
# gs://<bucket>/<object>
if url.startswith(r'gs://'):
return self._DownloadGsUrl(url, dest_dir)
header = r'http[s]?://'
domain = r'storage\.googleapis\.com'
# Many of the Google Storage URLs are supported below.
# It is prefered that customers specify their object using
# its gs://<bucket>/<object> url.
bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'
# Accept any non-empty string that doesn't contain a wildcard character
# gsutil interprets some characters as wildcards.
# These characters in object names make it difficult or impossible
# to perform various wildcard operations using gsutil
# For a complete list use "gsutil help naming".
obj = r'(?P<obj>[^\*\?]+)'
# Check for the Google Storage URLs:
# http://<bucket>.storage.googleapis.com/<object>
# https://<bucket>.storage.googleapis.com/<object>
gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj))
match = gs_regex.match(url)
if match:
gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
# In case gsutil is not installed, continue as a normal URL.
return (self._DownloadGsUrl(gs_url, dest_dir) or
self._DownloadUrl(url, dest_dir))
# Check for the other possible Google Storage URLs:
# http://storage.googleapis.com/<bucket>/<object>
# https://storage.googleapis.com/<bucket>/<object>
#
# The following are deprecated but checked:
# http://commondatastorage.googleapis.com/<bucket>/<object>
# https://commondatastorage.googleapis.com/<bucket>/<object>
gs_regex = re.compile(
r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj))
match = gs_regex.match(url)
if match:
gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
# In case gsutil is not installed, continue as a normal URL.
return (self._DownloadGsUrl(gs_url, dest_dir) or
self._DownloadUrl(url, dest_dir))
# Unauthenticated download of the object.
return self._DownloadUrl(url, dest_dir)
def _GetAttributeScripts(self, attribute_data, dest_dir):
"""Retrieve the scripts from attribute metadata.
Args:
attribute_data: dict, the contents of the attributes metadata.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping metadata keys to files storing scripts.
"""
script_dict = {}
attribute_data = attribute_data or {}
metadata_key = '%s-script' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.' % metadata_key)
with tempfile.NamedTemporaryFile(
mode='w', dir=dest_dir, delete=False) as dest:
dest.write(metadata_value.lstrip())
script_dict[metadata_key] = dest.name
metadata_key = '%s-script-url' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.' % metadata_key)
script_dict[metadata_key] = self._DownloadScript(metadata_value, dest_dir)
return script_dict
def GetScripts(self, dest_dir):
"""Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts.
"""
metadata_dict = self.watcher.GetMetadata() or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = None
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = None
self.logger.warning('Project attributes were not found.')
return (self._GetAttributeScripts(instance_data, dest_dir) or
self._GetAttributeScripts(project_data, dest_dir))
| 0 | 0 | 0 |
fdb0de39b17ce6faa979c47e5b91b69793112187 | 1,915 | py | Python | src/preprocess/buildData_notuse.py | zhouqilin1993/CodeSumy | ea824e6a45f42b73aba85187eb056e75e0b16a36 | [
"MIT"
] | 1 | 2018-08-30T11:37:35.000Z | 2018-08-30T11:37:35.000Z | src/preprocess/buildData_notuse.py | zhouqilin1993/CodeSumy | ea824e6a45f42b73aba85187eb056e75e0b16a36 | [
"MIT"
] | null | null | null | src/preprocess/buildData_notuse.py | zhouqilin1993/CodeSumy | ea824e6a45f42b73aba85187eb056e75e0b16a36 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
sys.path.append("..")
from src.seq2seq import setting
import re
import collections
import json
# 将下载得到的数据进行预处理,处理结果放置到到workdir目录中
# 获取GitHub和StackOverflow的数据,并将处理后的数据放到workdir目录下
if __name__ == '__main__':
buildVocab("so","java")
buildVocab("so","csharp")
| 29.921875 | 77 | 0.591645 | # -*- coding: utf-8 -*-
import sys
sys.path.append("..")
from src.seq2seq import setting
import re
import collections
import json
# 将下载得到的数据进行预处理,处理结果放置到到workdir目录中
def tokenizeNL(nl):
nl = nl.strip().decode('utf-8').encode('ascii', 'replace')
return re.findall(r"[\w]+|[^\s\w]", nl)
def tokenizeCode(code, lang):
code = code.strip().decode('utf-8').encode('ascii', 'replace')
# Coded的Token提取可以使用ANTLR4构造词法分析程序进行处理,此处先使用正则进行提取
# typedCode = None
# if lang == "java":
# typedCode = parseJava(code)
# elif lang == "csharp":
# typedCode = parseCSharp(code)
# tokens = [re.sub( '\s+', ' ', x.strip()) for x in typedCode]
tokens = re.findall(r"[\w]+|[^\s\w]", code)
return tokens
def buildVocab(plat,lang):
filename = setting.HOME_DIR + "/data/" + plat + "/" + lang + "/data.txt"
words = collections.Counter()
tokens = collections.Counter()
for line in open(filename, "r"):
Lid, Lnl, Lcode = line.strip().split('\t')
tokens.update(tokenizeCode(Lcode, lang))
words.update(tokenizeNL(Lnl))
fa = open(setting.WORKDIR + '/vocab.' + lang + '.text', 'w')
fb = open(setting.WORKDIR + '/vocab.' + lang + '.code', 'w')
for tok in tokens:
if tokens[tok] > setting.CODE_UNK_THRESHOLD:
fb.write(tok + '\t' + str(tokens[tok]) + '\n')
for wd in words:
if words[wd] > setting.TEXT_UNK_THRESHOLD:
fa.write(wd + '\t' + str(words[wd]) + '\n')
fa.close()
fb.close()
f1 = open(setting.WORKDIR + '/' + plat + '.' + lang + '.vocab.text', 'w')
f1.write(json.dumps(words))
f1.close()
f2 = open(setting.WORKDIR + '/' + plat + '.' + lang + '.vocab.code', 'w')
f2.write(json.dumps(tokens))
f2.close()
return
# 获取GitHub和StackOverflow的数据,并将处理后的数据放到workdir目录下
if __name__ == '__main__':
buildVocab("so","java")
buildVocab("so","csharp")
| 1,600 | 0 | 69 |
f8bc9b48dadf247d6218a64c8d04306d447dd74d | 2,039 | py | Python | src/config/api-server/vnc_cfg_api_server/resources/port_profile.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/config/api-server/vnc_cfg_api_server/resources/port_profile.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/resources/port_profile.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
from vnc_api.gen.resource_common import PortProfile
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
| 32.365079 | 71 | 0.62874 | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
from vnc_api.gen.resource_common import PortProfile
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
class PortProfileServer(ResourceMixin, PortProfile):
@staticmethod
def validate_storm_control_back_refs(obj_dict):
storm_profile_refs = obj_dict.get('storm_control_profile_refs')
if storm_profile_refs and len(storm_profile_refs) > 1:
ref_list = [ref.get('to') for ref in storm_profile_refs]
return (False, (400, "Port profile %s has more than one "
"storm profile refs %s" % (
obj_dict.get('fq_name'),
ref_list)))
return True, ''
# end validate_storm_control_back_refs
@staticmethod
def validate_port_profile_params(obj_dict):
port_profile_params = obj_dict.get('port_profile_params') or {}
port_params = port_profile_params.get('port_params') or {}
port_mtu = port_params.get('port_mtu')
if port_mtu and (port_mtu < 256 or port_mtu > 9216):
return (False, (400, "Port mtu can be only within 256"
" - 9216"))
return True, ''
# end validate_port_profile_params
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
ok, result = cls.validate_storm_control_back_refs(obj_dict)
if not ok:
return ok, result
ok, result = cls.validate_port_profile_params(obj_dict)
if not ok:
return ok, result
return True, ''
# end pre_dbe_create
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
ok, result = cls.validate_storm_control_back_refs(obj_dict)
if not ok:
return ok, result
ok, result = cls.validate_port_profile_params(obj_dict)
if not ok:
return ok, result
return True, ''
# end pre_dbe_update
| 1,482 | 340 | 23 |
093a0c672f3627f1f5e237a45fb6d67c6e9a5291 | 5,061 | py | Python | sdk/python/pulumi_aws_native/amplifyuibuilder/get_theme.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/amplifyuibuilder/get_theme.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/amplifyuibuilder/get_theme.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetThemeResult',
'AwaitableGetThemeResult',
'get_theme',
'get_theme_output',
]
@pulumi.output_type
# pylint: disable=using-constant-test
def get_theme(app_id: Optional[str] = None,
environment_name: Optional[str] = None,
id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetThemeResult:
"""
Definition of AWS::AmplifyUIBuilder::Theme Resource Type
"""
__args__ = dict()
__args__['appId'] = app_id
__args__['environmentName'] = environment_name
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:amplifyuibuilder:getTheme', __args__, opts=opts, typ=GetThemeResult).value
return AwaitableGetThemeResult(
app_id=__ret__.app_id,
created_at=__ret__.created_at,
environment_name=__ret__.environment_name,
id=__ret__.id,
modified_at=__ret__.modified_at,
name=__ret__.name,
overrides=__ret__.overrides,
values=__ret__.values)
@_utilities.lift_output_func(get_theme)
def get_theme_output(app_id: Optional[pulumi.Input[str]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetThemeResult]:
"""
Definition of AWS::AmplifyUIBuilder::Theme Resource Type
"""
...
| 35.893617 | 147 | 0.652638 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetThemeResult',
'AwaitableGetThemeResult',
'get_theme',
'get_theme_output',
]
@pulumi.output_type
class GetThemeResult:
def __init__(__self__, app_id=None, created_at=None, environment_name=None, id=None, modified_at=None, name=None, overrides=None, values=None):
if app_id and not isinstance(app_id, str):
raise TypeError("Expected argument 'app_id' to be a str")
pulumi.set(__self__, "app_id", app_id)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if environment_name and not isinstance(environment_name, str):
raise TypeError("Expected argument 'environment_name' to be a str")
pulumi.set(__self__, "environment_name", environment_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if modified_at and not isinstance(modified_at, str):
raise TypeError("Expected argument 'modified_at' to be a str")
pulumi.set(__self__, "modified_at", modified_at)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if overrides and not isinstance(overrides, list):
raise TypeError("Expected argument 'overrides' to be a list")
pulumi.set(__self__, "overrides", overrides)
if values and not isinstance(values, list):
raise TypeError("Expected argument 'values' to be a list")
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[str]:
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="environmentName")
def environment_name(self) -> Optional[str]:
return pulumi.get(self, "environment_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> Optional[str]:
return pulumi.get(self, "modified_at")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def overrides(self) -> Optional[Sequence['outputs.ThemeValues']]:
return pulumi.get(self, "overrides")
@property
@pulumi.getter
def values(self) -> Optional[Sequence['outputs.ThemeValues']]:
return pulumi.get(self, "values")
class AwaitableGetThemeResult(GetThemeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetThemeResult(
app_id=self.app_id,
created_at=self.created_at,
environment_name=self.environment_name,
id=self.id,
modified_at=self.modified_at,
name=self.name,
overrides=self.overrides,
values=self.values)
def get_theme(app_id: Optional[str] = None,
environment_name: Optional[str] = None,
id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetThemeResult:
"""
Definition of AWS::AmplifyUIBuilder::Theme Resource Type
"""
__args__ = dict()
__args__['appId'] = app_id
__args__['environmentName'] = environment_name
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:amplifyuibuilder:getTheme', __args__, opts=opts, typ=GetThemeResult).value
return AwaitableGetThemeResult(
app_id=__ret__.app_id,
created_at=__ret__.created_at,
environment_name=__ret__.environment_name,
id=__ret__.id,
modified_at=__ret__.modified_at,
name=__ret__.name,
overrides=__ret__.overrides,
values=__ret__.values)
@_utilities.lift_output_func(get_theme)
def get_theme_output(app_id: Optional[pulumi.Input[str]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetThemeResult]:
"""
Definition of AWS::AmplifyUIBuilder::Theme Resource Type
"""
...
| 2,437 | 606 | 71 |
e7573205ad57285fdc719344e937849e5fb2b370 | 7,325 | py | Python | batch_nas_algorithms.py | ntienvu/TW_NAS | 72a6d3c933978663c583661eee765bc316f66572 | [
"Apache-2.0"
] | 4 | 2021-11-01T14:01:39.000Z | 2022-02-28T03:04:27.000Z | batch_nas_algorithms.py | ntienvu/TW_NAS | 72a6d3c933978663c583661eee765bc316f66572 | [
"Apache-2.0"
] | null | null | null | batch_nas_algorithms.py | ntienvu/TW_NAS | 72a6d3c933978663c583661eee765bc316f66572 | [
"Apache-2.0"
] | 2 | 2021-06-08T09:13:03.000Z | 2021-11-01T14:01:45.000Z | import sys
sys.path.insert(0,'..')
import pickle
import sys
import copy
import numpy as np
from argparse import Namespace
from data import Data
#from acquisition_functions import acq_fn
#from bo.bo.probo import ProBO
#from bo.dom.list import ListDomain
from bo.pp.pp_gp_my_distmat import MyGpDistmatPP
#from argparse import Namespace
from tqdm import tqdm
from cyDPP.decompose_kernel import decompose_kernel
from cyDPP.sample_dpp import sample_dpp
def compute_best_test_losses(data, k, total_queries):
"""
Given full data from a completed nas algorithm,
output the test error of the arch with the best val error
after every multiple of k
"""
results = []
for query in range(k, total_queries + k, k):
best_arch = sorted(data[:query], key=lambda i:i[3])[0]
test_error = best_arch[3]
results.append((query, test_error))
return results
def compute_best_val_losses(data, k, total_queries):
"""
Given full data from a completed nas algorithm,
output the test error of the arch with the best val error
after every multiple of k
"""
results = []
for query in range(k, total_queries + k, k):
best_arch = sorted(data[:query], key=lambda i:i[2])[0]
test_error = best_arch[2]
results.append((query, test_error))
return results
def random_search(search_space,
total_queries=100,
k=10,
allow_isomorphisms=False,
deterministic=True,
verbose=1):
"""
random search
"""
data = search_space.generate_random_dataset(num=total_queries,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=deterministic)
val_losses = [d[2] for d in data]
#top 10
val_losses = [np.asscalar(d[2]) for d in data]
top_arches_idx = np.argsort(np.asarray(val_losses))[:10] # descending
top_arches=[data[ii][0] for ii in top_arches_idx]
pickle.dump([top_arches,val_losses], open( "10_best_architectures.p", "wb" ) )
print(val_losses[top_arches_idx[0]])
if verbose:
top_5_loss = sorted([d[2] for d in data])[:min(5, len(data))]
print('Query {}, top 5 val losses {}'.format(total_queries, top_5_loss))
return data
# def GP_KDPP(myGP,xtrain,ytrain,xtest,newls,batch_size=5) :
# # KDPP for sampling diverse + quality items
# localGP=copy.deepcopy(myGP)
# mu_test,sig_test=localGP.gp_post(xtrain,ytrain,xtest,ls=newls,alpha=1,sigma=1e-3)
# #qualityK=np.zeros((N,N))+np.eye(N)*mu_test.reshape((-1,1))
# L=sig_test
#
# # decompose it into eigenvalues and eigenvectors
# vals, vecs = decompose_kernel(L)
# dpp_sample = sample_dpp(vals, vecs, k=batch_size)
# x_t_all=[ xtest[ii] for ii in dpp_sample]
# return x_t_all,dpp_sample
def gp_batch_bayesopt_search(search_space,
num_init=10,
batch_size=5,
total_queries=100,
distance='edit_distance',
algo_name='gp_bucb',
deterministic=True,
nppred=1000):
"""
Bayesian optimization with a GP prior
"""
num_iterations = total_queries - num_init
# black-box function that bayesopt will optimize
# this is GP
modelp = Namespace(kernp=Namespace(ls=0.11, alpha=1, sigma=1e-5), #ls=0.11 for tw
infp=Namespace(niter=num_iterations, nwarmup=5),#500
distance=distance, search_space=search_space.get_type())
modelp.distance=distance
# Set up initial data
init_data = search_space.generate_random_dataset(num=num_init,
deterministic_loss=deterministic)
xtrain = [d[0] for d in init_data]
ytrain = np.array([[d[2]] for d in init_data])
# init
data = Namespace()
data.X = xtrain
data.y = ytrain
myGP=MyGpDistmatPP(data,modelp,printFlag=False)
for ii in tqdm(range(num_iterations)):##
ytrain_scale=(ytrain-np.mean(ytrain))/np.std(ytrain)
data = Namespace()
data.X = xtrain
data.y = ytrain_scale
myGP.set_data(data) #update new data
xtest=search_space.get_candidate_xtest(xtrain,ytrain)
xtest=xtest[:100]
# this is to enforce to reupdate the K22 between test points
myGP.K22_d=None
myGP.K22_d1=None
# generate xtest # check here, could be wrong
#xtest = mylist.unif_rand_sample(500)
if ii%5==0:
newls=optimize_GP_hyper(myGP,xtrain,ytrain_scale,distance)
# select a batch of candidate
x_batch,idx_batch=GP_KDPP_Quality(myGP,xtrain,ytrain_scale,xtest,newls,batch_size)
# evaluate the black-box function
for xt in x_batch:
yt=fn(xt)
xtrain=np.append(xtrain,xt)
ytrain=np.append(ytrain,yt)
print(np.min(ytrain))
# get the validation and test loss for all architectures chosen by BayesOpt
results = []
for arch in xtrain:
archtuple = search_space.query_arch(arch,deterministic=deterministic)
results.append(archtuple)
return results
| 29.776423 | 91 | 0.629625 | import sys
sys.path.insert(0,'..')
import pickle
import sys
import copy
import numpy as np
from argparse import Namespace
from data import Data
#from acquisition_functions import acq_fn
#from bo.bo.probo import ProBO
#from bo.dom.list import ListDomain
from bo.pp.pp_gp_my_distmat import MyGpDistmatPP
#from argparse import Namespace
from tqdm import tqdm
from cyDPP.decompose_kernel import decompose_kernel
from cyDPP.sample_dpp import sample_dpp
def run_batch_nas_algorithm(search_space,algo_params):
# run nas algorithm
ps = copy.deepcopy(algo_params)
algo_name = ps['algo_name']
#algo_name = ps.pop('algo_name')
if algo_name == 'random':
ps.pop('algo_name')
ps.pop('batch_size')
data = random_search(search_space, **ps)
elif "gp" in algo_name:
data = gp_batch_bayesopt_search(search_space, **ps)
else:
print('invalid algorithm name')
sys.exit()
k = 1
if 'k' in ps:
k = ps['k']
result_val=compute_best_val_losses(data, k, len(data))
result_test=compute_best_test_losses(data, k, len(data))
return result_val,result_test
def compute_best_test_losses(data, k, total_queries):
"""
Given full data from a completed nas algorithm,
output the test error of the arch with the best val error
after every multiple of k
"""
results = []
for query in range(k, total_queries + k, k):
best_arch = sorted(data[:query], key=lambda i:i[3])[0]
test_error = best_arch[3]
results.append((query, test_error))
return results
def compute_best_val_losses(data, k, total_queries):
"""
Given full data from a completed nas algorithm,
output the test error of the arch with the best val error
after every multiple of k
"""
results = []
for query in range(k, total_queries + k, k):
best_arch = sorted(data[:query], key=lambda i:i[2])[0]
test_error = best_arch[2]
results.append((query, test_error))
return results
def random_search(search_space,
total_queries=100,
k=10,
allow_isomorphisms=False,
deterministic=True,
verbose=1):
"""
random search
"""
data = search_space.generate_random_dataset(num=total_queries,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=deterministic)
val_losses = [d[2] for d in data]
#top 10
val_losses = [np.asscalar(d[2]) for d in data]
top_arches_idx = np.argsort(np.asarray(val_losses))[:10] # descending
top_arches=[data[ii][0] for ii in top_arches_idx]
pickle.dump([top_arches,val_losses], open( "10_best_architectures.p", "wb" ) )
print(val_losses[top_arches_idx[0]])
if verbose:
top_5_loss = sorted([d[2] for d in data])[:min(5, len(data))]
print('Query {}, top 5 val losses {}'.format(total_queries, top_5_loss))
return data
def GP_KDPP_Quality(myGP,xtrain,ytrain,xtest,newls,batch_size=5) :
# KDPP for sampling diverse + quality items
localGP=copy.deepcopy(myGP)
#data = Namespace()
#data.X = xtrain
#data.y = ytrain
#localGP.set_data(data)
N=len(xtest)
mu_test,sig_test=localGP.gp_post(xtrain,ytrain,xtest,ls=newls,alpha=1,sigma=1e-3)
score=np.exp(-mu_test)
qualityK=np.zeros((N,N))+np.eye(N)*score.reshape((-1,1))
L=qualityK*sig_test*qualityK
# decompose it into eigenvalues and eigenvectors
vals, vecs = decompose_kernel(L)
dpp_sample = sample_dpp(vals, vecs, k=batch_size)
x_t_all=[ xtest[ii] for ii in dpp_sample]
return x_t_all,dpp_sample
# def GP_KDPP(myGP,xtrain,ytrain,xtest,newls,batch_size=5) :
# # KDPP for sampling diverse + quality items
# localGP=copy.deepcopy(myGP)
# mu_test,sig_test=localGP.gp_post(xtrain,ytrain,xtest,ls=newls,alpha=1,sigma=1e-3)
# #qualityK=np.zeros((N,N))+np.eye(N)*mu_test.reshape((-1,1))
# L=sig_test
#
# # decompose it into eigenvalues and eigenvectors
# vals, vecs = decompose_kernel(L)
# dpp_sample = sample_dpp(vals, vecs, k=batch_size)
# x_t_all=[ xtest[ii] for ii in dpp_sample]
# return x_t_all,dpp_sample
def optimize_GP_hyper(myGP,xtrain,ytrain,distance):
# optimizing the GP hyperparameters
if distance =="tw_distance" or distance=="tw_2_distance" or distance=="tw_2g_distance":
newls=myGP.optimise_gp_hyperparameter_v3(xtrain,ytrain,alpha=1,sigma=1e-4)
else:
newls=myGP.optimise_gp_hyperparameter(xtrain,ytrain,alpha=1,sigma=1e-3)
return newls
def gp_batch_bayesopt_search(search_space,
num_init=10,
batch_size=5,
total_queries=100,
distance='edit_distance',
algo_name='gp_bucb',
deterministic=True,
nppred=1000):
"""
Bayesian optimization with a GP prior
"""
num_iterations = total_queries - num_init
# black-box function that bayesopt will optimize
def fn(arch):
return search_space.query_arch(arch, deterministic=deterministic)[2]
# this is GP
modelp = Namespace(kernp=Namespace(ls=0.11, alpha=1, sigma=1e-5), #ls=0.11 for tw
infp=Namespace(niter=num_iterations, nwarmup=5),#500
distance=distance, search_space=search_space.get_type())
modelp.distance=distance
# Set up initial data
init_data = search_space.generate_random_dataset(num=num_init,
deterministic_loss=deterministic)
xtrain = [d[0] for d in init_data]
ytrain = np.array([[d[2]] for d in init_data])
# init
data = Namespace()
data.X = xtrain
data.y = ytrain
myGP=MyGpDistmatPP(data,modelp,printFlag=False)
for ii in tqdm(range(num_iterations)):##
ytrain_scale=(ytrain-np.mean(ytrain))/np.std(ytrain)
data = Namespace()
data.X = xtrain
data.y = ytrain_scale
myGP.set_data(data) #update new data
xtest=search_space.get_candidate_xtest(xtrain,ytrain)
xtest=xtest[:100]
# this is to enforce to reupdate the K22 between test points
myGP.K22_d=None
myGP.K22_d1=None
# generate xtest # check here, could be wrong
#xtest = mylist.unif_rand_sample(500)
if ii%5==0:
newls=optimize_GP_hyper(myGP,xtrain,ytrain_scale,distance)
# select a batch of candidate
x_batch,idx_batch=GP_KDPP_Quality(myGP,xtrain,ytrain_scale,xtest,newls,batch_size)
# evaluate the black-box function
for xt in x_batch:
yt=fn(xt)
xtrain=np.append(xtrain,xt)
ytrain=np.append(ytrain,yt)
print(np.min(ytrain))
# get the validation and test loss for all architectures chosen by BayesOpt
results = []
for arch in xtrain:
archtuple = search_space.query_arch(arch,deterministic=deterministic)
results.append(archtuple)
return results
| 1,757 | 0 | 95 |
dca940391dee39844187a43021eaf8685becdc7f | 716 | py | Python | tests/test_cockpit/settings.py | wx-b/cockpit | af91391ddab2a8aef85905b081ccf67d94c1a0e5 | [
"MIT"
] | 367 | 2021-02-12T17:22:55.000Z | 2022-03-29T20:47:35.000Z | tests/test_cockpit/settings.py | wx-b/cockpit | af91391ddab2a8aef85905b081ccf67d94c1a0e5 | [
"MIT"
] | 11 | 2021-04-30T07:58:51.000Z | 2022-02-22T15:54:42.000Z | tests/test_cockpit/settings.py | wx-b/cockpit | af91391ddab2a8aef85905b081ccf67d94c1a0e5 | [
"MIT"
] | 19 | 2021-07-14T12:16:13.000Z | 2022-02-17T16:48:00.000Z | """Settings used by the tests in this submodule."""
import torch
from tests.settings import SETTINGS as GLOBAL_SETTINGS
from tests.utils.data import load_toy_data
from tests.utils.models import load_toy_model
from tests.utils.problem import make_problems_with_ids
LOCAL_SETTINGS = [
{
"data_fn": lambda: load_toy_data(batch_size=5),
"model_fn": load_toy_model,
"individual_loss_function_fn": lambda: torch.nn.CrossEntropyLoss(
reduction="none"
),
"loss_function_fn": lambda: torch.nn.CrossEntropyLoss(reduction="mean"),
"iterations": 1,
},
]
SETTINGS = GLOBAL_SETTINGS + LOCAL_SETTINGS
PROBLEMS, PROBLEMS_IDS = make_problems_with_ids(SETTINGS)
| 29.833333 | 80 | 0.726257 | """Settings used by the tests in this submodule."""
import torch
from tests.settings import SETTINGS as GLOBAL_SETTINGS
from tests.utils.data import load_toy_data
from tests.utils.models import load_toy_model
from tests.utils.problem import make_problems_with_ids
LOCAL_SETTINGS = [
{
"data_fn": lambda: load_toy_data(batch_size=5),
"model_fn": load_toy_model,
"individual_loss_function_fn": lambda: torch.nn.CrossEntropyLoss(
reduction="none"
),
"loss_function_fn": lambda: torch.nn.CrossEntropyLoss(reduction="mean"),
"iterations": 1,
},
]
SETTINGS = GLOBAL_SETTINGS + LOCAL_SETTINGS
PROBLEMS, PROBLEMS_IDS = make_problems_with_ids(SETTINGS)
| 0 | 0 | 0 |
a5706e7d656e4ed8fd9824d5e9e5fd69a7fda25b | 238 | py | Python | cryptofeed_werks/exchanges/binance/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | 7 | 2021-12-30T02:38:17.000Z | 2022-03-08T16:14:35.000Z | cryptofeed_werks/exchanges/binance/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | cryptofeed_werks/exchanges/binance/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | 1 | 2022-01-28T00:18:45.000Z | 2022-01-28T00:18:45.000Z | BINANCE_API_KEY = "BINANCE_API_KEY"
BINANCE_MAX_WEIGHT = "BINANCE_MAX_WEIGHT"
API_URL = "https://api.binance.com/api/v3"
MAX_RESULTS = 1000
# Response 429, when x-mbx-used-weight-1m is 1200
MAX_WEIGHT = 1200
MIN_ELAPSED_PER_REQUEST = 0
| 23.8 | 49 | 0.781513 | BINANCE_API_KEY = "BINANCE_API_KEY"
BINANCE_MAX_WEIGHT = "BINANCE_MAX_WEIGHT"
API_URL = "https://api.binance.com/api/v3"
MAX_RESULTS = 1000
# Response 429, when x-mbx-used-weight-1m is 1200
MAX_WEIGHT = 1200
MIN_ELAPSED_PER_REQUEST = 0
| 0 | 0 | 0 |
f237bfbc0208b499c03817fb7c941603954defec | 1,558 | py | Python | PrepareAudio.py | CarpCodeTech/kaldi-docker | e141a930e16965b93aa60793209a6fa4a012a02b | [
"MIT"
] | null | null | null | PrepareAudio.py | CarpCodeTech/kaldi-docker | e141a930e16965b93aa60793209a6fa4a012a02b | [
"MIT"
] | null | null | null | PrepareAudio.py | CarpCodeTech/kaldi-docker | e141a930e16965b93aa60793209a6fa4a012a02b | [
"MIT"
] | null | null | null | ##This code is to prepare audio for pure kaldi prototype, it assumes audios are in wav format
""" Command-line usage:
python PrepareAudio.py Audio_folder wav_rspecifier spk2utt_rspecifier
"""
import os
import re
import shutil
from sys import exit
import sys
import getopt
import subprocess
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "", [""])
scp_dict = {}
if len(args) != 3 :
raise ValueError("Please varify your input")
Audio_folder=args[0]
wav_rspecifier=args[1]
spk2utt_rspecifier=args[2]
wav_scp=open(wav_rspecifier,"w")
spk2utt_file=open(spk2utt_rspecifier,"w")
index=0
for dirpath, dirnames, filenames in os.walk(Audio_folder):
for file in filenames:
if "wav" in file:
index+=1
utt_name=file.replace(".wav","").strip()
transfer_line="sox %s --bits 16 -e signed -r 16k -c 1 -t wav - |"%os.path.join(dirpath,file)
scp_dict[utt_name] = transfer_line
utt_name_list = list(scp_dict.keys())
utt_name_list.sort()
for utt_name in utt_name_list:
wav_scp.write("%s %s\n"%(utt_name, scp_dict[utt_name]))
spk2utt_file.write("%s %s\n"%(utt_name, utt_name))
wav_scp.close()
spk2utt_file.close()
except :
print(__doc__)
(type, value, traceback) = sys.exc_info()
print(sys.exc_info())
sys.exit(0) | 34.622222 | 112 | 0.584724 | ##This code is to prepare audio for pure kaldi prototype, it assumes audios are in wav format
""" Command-line usage:
python PrepareAudio.py Audio_folder wav_rspecifier spk2utt_rspecifier
"""
import os
import re
import shutil
from sys import exit
import sys
import getopt
import subprocess
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "", [""])
scp_dict = {}
if len(args) != 3 :
raise ValueError("Please varify your input")
Audio_folder=args[0]
wav_rspecifier=args[1]
spk2utt_rspecifier=args[2]
wav_scp=open(wav_rspecifier,"w")
spk2utt_file=open(spk2utt_rspecifier,"w")
index=0
for dirpath, dirnames, filenames in os.walk(Audio_folder):
for file in filenames:
if "wav" in file:
index+=1
utt_name=file.replace(".wav","").strip()
transfer_line="sox %s --bits 16 -e signed -r 16k -c 1 -t wav - |"%os.path.join(dirpath,file)
scp_dict[utt_name] = transfer_line
utt_name_list = list(scp_dict.keys())
utt_name_list.sort()
for utt_name in utt_name_list:
wav_scp.write("%s %s\n"%(utt_name, scp_dict[utt_name]))
spk2utt_file.write("%s %s\n"%(utt_name, utt_name))
wav_scp.close()
spk2utt_file.close()
except :
print(__doc__)
(type, value, traceback) = sys.exc_info()
print(sys.exc_info())
sys.exit(0) | 0 | 0 | 0 |
f3caa8be6c03e7eb061e1afef642f57f865b0d09 | 8,391 | py | Python | release.py | sosey/testbot | 578a3b74e921fd32711f9d50ef32d35e01ae63e0 | [
"BSD-3-Clause"
] | null | null | null | release.py | sosey/testbot | 578a3b74e921fd32711f9d50ef32d35e01ae63e0 | [
"BSD-3-Clause"
] | null | null | null | release.py | sosey/testbot | 578a3b74e921fd32711f9d50ef32d35e01ae63e0 | [
"BSD-3-Clause"
] | null | null | null | """
Get the release information from a specific repository
curl 'https://api.github.com/repos/sosey/testbot/releases'
testbot.rel is example response for the sosey/testbot repo
# used to render the markdown to HTML which can be walked
# or used in the html page as-is
pip install mistune
# Use bs4 to walk the html tree for parsing
# from bs4 import BeautifulSoup as bs
# .stripped_strings on the bs4 object will remove the html tags
# bs(m, "html.parser") # will return a bs object for parsing
"""
# SSLError: Can't connect to HTTPS URL because the SSL module is not available.
# using pycurl for now as an example
# import requests
import json
import mistune
import os
import pycurl
from io import BytesIO
def MakeSummaryPage(data=None, outpage=""):
"""Make a summary HTML page from a list of repos with release information.
Data should be a list of dictionaries
"""
if not isinstance(data, list):
raise TypeError("Expected data to be a list of dictionaries")
if not outpage:
raise TypeError("Expected outpage name")
# print them to a web page we can display for ourselves,
print("Checking for older html file")
if os.access(outpage, os.F_OK):
os.remove(outpage)
html = open(outpage, 'w')
# this section includes the javascript code and google calls for the
# interactive features (table sorting)
b = '''
<html>
<head> <title>Software Status Page </title>
<meta charset="utf-8">
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["table"]});
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable();
data.addColumn("string", "Software");
data.addColumn("string", "Version");
data.addColumn("string", "Repository Link");
data.addColumn("string", "Reprocessing Information");
data.addColumn("string", "Released");
data.addColumn("string", "Author")
data.addRows([
'''
html.write(b)
for repo in data:
# below is the google table code
software = repo['name']
version = repo['version']
descrip = RenderHTML(repo['release_notes'])
website = repo['website']
date = repo['published']
author = repo['author']
avatar = repo['avatar']
html.write("[\"{}\",\"{}\",\'<a href=\"{}\">{}</a>\',{}{}{},\"{}\",\'<a href=\"{}\">{}</a>\'],\n".format(software, version, website, "Code Repository", chr(96), descrip, chr(96), date, avatar, author))
ee = ''' ]);
var table = new google.visualization.Table(document.getElementById("table_div"));
table.draw(data, {showRowNumber: true, allowHtml: true});
}
</script>
</head>
<body>
<br>Click on the column fields to sort
<div id="table_div"></div>
</body>
</html>
'''
html.write(ee)
html.close()
def RenderHTML(md=""):
"""Turn markdown string into beautiful soup structure."""
if not md:
return ValueError("Supply a string with markdown")
m = mistune.markdown(md)
return m
def GetReleaseSpecs(data=None):
"""parse out the release information from the json object.
This assumes data release specified in data as a dictionary
"""
if not isinstance(data, dict):
raise TypeError("Wrong input data type, expected list")
specs = {}
try:
specs['release_notes'] = data['body']
except KeyError:
specs['release_notes'] = "None available"
try:
specs['name'] = data['repo_name']
except KeyError:
try:
specs['name'] = data['name']
except KeyError:
specs['name'] = "No Name Set"
try:
specs['version'] = data['tag_name']
except KeyError:
try:
specs['version'] = data['name']
except KeyError:
specs['version'] = "No versions"
try:
specs['published'] = data['published_at']
except KeyError:
specs['published'] = "No Data"
try:
specs['website'] = data['html_url']
except KeyError:
specs['website'] = 'No website provided'
try:
specs['author'] = data['author']['login']
except KeyError:
specs['author'] = "STScI"
try:
specs['avatar'] = data['author']['avatar_url']
except KeyError:
specs['avatar'] = "None Provided"
return specs
def ReadResponseFile(response=""):
"""Read a json response file."""
if not response:
raise ValueError("Please specify json file to read")
with open(response, 'r') as f:
data = json.load(f)
return data
def GetAllReleases(org="", outpage=""):
"""Get the release information for all repositories in an organization.
Returns a list of dictionaries with information on each repository
The github api only returns the first 30 repos by default.
At most it can return 100 repos at a time. Multiple calls
need to be made for more.
"""
if not org:
raise ValueError("Please supply github organization")
orgrepo_url = "https://api.github.com/orgs/{0:s}/repos?per_page=10".format(org)
repos_url = "https://api.github.com/repos/{0:s}/".format(org)
print("Examinging {0:s}....".format(orgrepo_url))
# Get a list of the repositories
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, orgrepo_url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
res = buffer.getvalue().decode('iso-8859-1')
results = json.loads(res) # list of dicts
repo_names = []
print(repo_names)
# no account for orgs without repos
for i in range(0, len(results), 1):
repo_names.append(results[i]['name'])
# Loop through all the repositories to get release information
# Repositories may have multiple releases
repo_releases = []
for name in repo_names:
data = CheckForRelease(repos_url, name) # returns a list of results
# expand the release information into separate dicts
for d in data:
relspecs = GetReleaseSpecs(d)
relspecs['repo_name'] = name
repo_releases.append(relspecs)
MakeSummaryPage(repo_releases, outpage=outpage)
def CheckForRelease(repos="", name=""):
"""Check for release information, not all repos may have releases.
Repositories without release information may have tag information
"""
rel_url = repos + ("{0:s}/releases".format(name))
tags_url = repos + ("{0:s}/tags".format(name))
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, rel_url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
results = buffer.getvalue().decode('iso-8859-1')
jdata = json.loads(results)
if len(jdata) == 0:
c = pycurl.Curl()
buffer = BytesIO()
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.URL, tags_url) # get info from tags
c.perform()
c.close()
results = buffer.getvalue().decode('iso-8859-1')
jdata = json.loads(results)
for j in jdata:
j['html_url'] = j['commit']['url']
j['tag_name'] = j['name']
j['name'] = name
return jdata
# def GetAllReleases(user="", repo=""):
# """Get all the release information for a specific repository.
#
# This currently isn't working on my mac because
# SSLError: Can't connect to HTTPS URL because the SSL
# module is not available.
# """
#
# if not user:
# raise ValueError("Please supply github user")
# if not repo:
# raise ValueError("Please supply a github repo name")
#
# repo_url = "https://api.github.com/repos"
# req = "/".join([repo_url, user, repo, "releases"])
# return requests.get(req, verify=False).json()
# make a pycurl call for now becuase of the https issue
if __name__ == "__main__":
"""Create and example output from just the test repository."""
url = "https://api.github.com/repos/sosey/testbot/releases"
page = ReadResponseFile('testbot.rel') # reads into a list of dicts
specs = GetReleaseSpecs(page.pop()) # just send in the one dict
specs['name'] = 'testbot'
MakeSummaryPage([specs], 'testbot_release.html')
| 32.523256 | 209 | 0.622929 | """
Get the release information from a specific repository
curl 'https://api.github.com/repos/sosey/testbot/releases'
testbot.rel is example response for the sosey/testbot repo
# used to render the markdown to HTML which can be walked
# or used in the html page as-is
pip install mistune
# Use bs4 to walk the html tree for parsing
# from bs4 import BeautifulSoup as bs
# .stripped_strings on the bs4 object will remove the html tags
# bs(m, "html.parser") # will return a bs object for parsing
"""
# SSLError: Can't connect to HTTPS URL because the SSL module is not available.
# using pycurl for now as an example
# import requests
import json
import mistune
import os
import pycurl
from io import BytesIO
def MakeSummaryPage(data=None, outpage=""):
"""Make a summary HTML page from a list of repos with release information.
Data should be a list of dictionaries
"""
if not isinstance(data, list):
raise TypeError("Expected data to be a list of dictionaries")
if not outpage:
raise TypeError("Expected outpage name")
# print them to a web page we can display for ourselves,
print("Checking for older html file")
if os.access(outpage, os.F_OK):
os.remove(outpage)
html = open(outpage, 'w')
# this section includes the javascript code and google calls for the
# interactive features (table sorting)
b = '''
<html>
<head> <title>Software Status Page </title>
<meta charset="utf-8">
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["table"]});
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable();
data.addColumn("string", "Software");
data.addColumn("string", "Version");
data.addColumn("string", "Repository Link");
data.addColumn("string", "Reprocessing Information");
data.addColumn("string", "Released");
data.addColumn("string", "Author")
data.addRows([
'''
html.write(b)
for repo in data:
# below is the google table code
software = repo['name']
version = repo['version']
descrip = RenderHTML(repo['release_notes'])
website = repo['website']
date = repo['published']
author = repo['author']
avatar = repo['avatar']
html.write("[\"{}\",\"{}\",\'<a href=\"{}\">{}</a>\',{}{}{},\"{}\",\'<a href=\"{}\">{}</a>\'],\n".format(software, version, website, "Code Repository", chr(96), descrip, chr(96), date, avatar, author))
ee = ''' ]);
var table = new google.visualization.Table(document.getElementById("table_div"));
table.draw(data, {showRowNumber: true, allowHtml: true});
}
</script>
</head>
<body>
<br>Click on the column fields to sort
<div id="table_div"></div>
</body>
</html>
'''
html.write(ee)
html.close()
def RenderHTML(md=""):
"""Turn markdown string into beautiful soup structure."""
if not md:
return ValueError("Supply a string with markdown")
m = mistune.markdown(md)
return m
def GetReleaseSpecs(data=None):
"""parse out the release information from the json object.
This assumes data release specified in data as a dictionary
"""
if not isinstance(data, dict):
raise TypeError("Wrong input data type, expected list")
specs = {}
try:
specs['release_notes'] = data['body']
except KeyError:
specs['release_notes'] = "None available"
try:
specs['name'] = data['repo_name']
except KeyError:
try:
specs['name'] = data['name']
except KeyError:
specs['name'] = "No Name Set"
try:
specs['version'] = data['tag_name']
except KeyError:
try:
specs['version'] = data['name']
except KeyError:
specs['version'] = "No versions"
try:
specs['published'] = data['published_at']
except KeyError:
specs['published'] = "No Data"
try:
specs['website'] = data['html_url']
except KeyError:
specs['website'] = 'No website provided'
try:
specs['author'] = data['author']['login']
except KeyError:
specs['author'] = "STScI"
try:
specs['avatar'] = data['author']['avatar_url']
except KeyError:
specs['avatar'] = "None Provided"
return specs
def ReadResponseFile(response=""):
"""Read a json response file."""
if not response:
raise ValueError("Please specify json file to read")
with open(response, 'r') as f:
data = json.load(f)
return data
def GetAllReleases(org="", outpage=""):
"""Get the release information for all repositories in an organization.
Returns a list of dictionaries with information on each repository
The github api only returns the first 30 repos by default.
At most it can return 100 repos at a time. Multiple calls
need to be made for more.
"""
if not org:
raise ValueError("Please supply github organization")
orgrepo_url = "https://api.github.com/orgs/{0:s}/repos?per_page=10".format(org)
repos_url = "https://api.github.com/repos/{0:s}/".format(org)
print("Examinging {0:s}....".format(orgrepo_url))
# Get a list of the repositories
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, orgrepo_url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
res = buffer.getvalue().decode('iso-8859-1')
results = json.loads(res) # list of dicts
repo_names = []
print(repo_names)
# no account for orgs without repos
for i in range(0, len(results), 1):
repo_names.append(results[i]['name'])
# Loop through all the repositories to get release information
# Repositories may have multiple releases
repo_releases = []
for name in repo_names:
data = CheckForRelease(repos_url, name) # returns a list of results
# expand the release information into separate dicts
for d in data:
relspecs = GetReleaseSpecs(d)
relspecs['repo_name'] = name
repo_releases.append(relspecs)
MakeSummaryPage(repo_releases, outpage=outpage)
def CheckForRelease(repos="", name=""):
"""Check for release information, not all repos may have releases.
Repositories without release information may have tag information
"""
rel_url = repos + ("{0:s}/releases".format(name))
tags_url = repos + ("{0:s}/tags".format(name))
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, rel_url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
results = buffer.getvalue().decode('iso-8859-1')
jdata = json.loads(results)
if len(jdata) == 0:
c = pycurl.Curl()
buffer = BytesIO()
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.URL, tags_url) # get info from tags
c.perform()
c.close()
results = buffer.getvalue().decode('iso-8859-1')
jdata = json.loads(results)
for j in jdata:
j['html_url'] = j['commit']['url']
j['tag_name'] = j['name']
j['name'] = name
return jdata
# def GetAllReleases(user="", repo=""):
# """Get all the release information for a specific repository.
#
# This currently isn't working on my mac because
# SSLError: Can't connect to HTTPS URL because the SSL
# module is not available.
# """
#
# if not user:
# raise ValueError("Please supply github user")
# if not repo:
# raise ValueError("Please supply a github repo name")
#
# repo_url = "https://api.github.com/repos"
# req = "/".join([repo_url, user, repo, "releases"])
# return requests.get(req, verify=False).json()
# make a pycurl call for now becuase of the https issue
if __name__ == "__main__":
"""Create and example output from just the test repository."""
url = "https://api.github.com/repos/sosey/testbot/releases"
page = ReadResponseFile('testbot.rel') # reads into a list of dicts
specs = GetReleaseSpecs(page.pop()) # just send in the one dict
specs['name'] = 'testbot'
MakeSummaryPage([specs], 'testbot_release.html')
| 0 | 0 | 0 |
35616cd00cf44f937d04f42847145b2a72318ea0 | 2,553 | py | Python | src/oop_ext/_type_checker_fixture.py | nicoddemus/oop-ext | 279c80eae56783c02d99ba7b94a8d2df7eb1aec3 | [
"MIT"
] | 12 | 2019-03-08T12:56:42.000Z | 2021-12-01T18:15:01.000Z | src/oop_ext/_type_checker_fixture.py | nicoddemus/oop-ext | 279c80eae56783c02d99ba7b94a8d2df7eb1aec3 | [
"MIT"
] | 30 | 2019-03-08T19:33:00.000Z | 2022-01-25T20:32:41.000Z | src/oop_ext/_type_checker_fixture.py | nicoddemus/oop-ext | 279c80eae56783c02d99ba7b94a8d2df7eb1aec3 | [
"MIT"
] | 1 | 2019-08-08T16:55:41.000Z | 2019-08-08T16:55:41.000Z | # mypy: disallow-untyped-defs
# mypy: disallow-any-decorated
import os
import re
from textwrap import dedent
import mypy.api
from pathlib import Path
from typing import List, Tuple
import attr
import pytest
@attr.s(auto_attribs=True)
class _Result:
"""
Encapsulates the result of a call to ``mypy.api``, providing helpful functions to check
that output.
"""
output: Tuple[str, str, int]
@property
@property
@property
@property
@attr.s(auto_attribs=True)
class TypeCheckerFixture:
"""
Fixture to help running mypy in source code and checking for success/specific errors.
This fixture is useful for libraries which provide type checking, allowing them
to ensure the type support is working as intended.
"""
path: Path
request: pytest.FixtureRequest
| 28.054945 | 91 | 0.599295 | # mypy: disallow-untyped-defs
# mypy: disallow-any-decorated
import os
import re
from textwrap import dedent
import mypy.api
from pathlib import Path
from typing import List, Tuple
import attr
import pytest
@attr.s(auto_attribs=True)
class _Result:
"""
Encapsulates the result of a call to ``mypy.api``, providing helpful functions to check
that output.
"""
output: Tuple[str, str, int]
def assert_errors(self, messages: List[str]) -> None:
assert self.error_report == ""
lines = self.report_lines
assert len(lines) == len(
messages
), f"Expected {len(messages)} failures, got {len(lines)}:\n" + "\n".join(lines)
for index, (obtained, expected) in enumerate(zip(lines, messages)):
m = re.search(expected, obtained)
assert m is not None, (
f"Expected regex at index {index}:\n"
f" {expected}\n"
f"did not match:\n"
f" {obtained}\n"
f"(note: use re.escape() to escape regex special characters)"
)
def assert_ok(self) -> None:
assert len(self.report_lines) == 0, "Expected no errors, got:\n " + "\n".join(
self.report_lines
)
assert self.exit_status == 0
@property
def normal_report(self) -> str:
return self.output[0]
@property
def error_report(self) -> str:
return self.output[1]
@property
def exit_status(self) -> int:
return self.output[2]
@property
def report_lines(self) -> List[str]:
lines = [x.strip() for x in self.normal_report.split("\n") if x.strip()]
# Drop last line (summary).
return lines[:-1]
@attr.s(auto_attribs=True)
class TypeCheckerFixture:
"""
Fixture to help running mypy in source code and checking for success/specific errors.
This fixture is useful for libraries which provide type checking, allowing them
to ensure the type support is working as intended.
"""
path: Path
request: pytest.FixtureRequest
def make_file(self, source: str) -> None:
name = self.request.node.name + ".py"
self.path.joinpath(name).write_text(dedent(source))
def run(self) -> _Result:
# Change current directory so error messages show only the relative
# path to the files.
cwd = os.getcwd()
try:
os.chdir(self.path)
x = mypy.api.run(["."])
return _Result(x)
finally:
os.chdir(cwd)
| 1,517 | 0 | 212 |
7bc19e6d0bd67432e13cc1400c9b8954ed0cc3b6 | 6,202 | py | Python | clubsandwich/ui/layout_options.py | eldarbogdanov/clubsandwich | dc1fb1f96eb8544547c4c25efef16a50bd1a79c5 | [
"MIT"
] | 66 | 2017-04-17T01:08:46.000Z | 2022-02-14T21:40:39.000Z | clubsandwich/ui/layout_options.py | eldarbogdanov/clubsandwich | dc1fb1f96eb8544547c4c25efef16a50bd1a79c5 | [
"MIT"
] | 19 | 2017-04-17T23:57:22.000Z | 2021-02-16T18:32:08.000Z | clubsandwich/ui/layout_options.py | eldarbogdanov/clubsandwich | dc1fb1f96eb8544547c4c25efef16a50bd1a79c5 | [
"MIT"
] | 17 | 2017-05-19T21:49:57.000Z | 2022-02-16T12:53:28.000Z | from collections import namedtuple
from numbers import Real
_LayoutOptions = namedtuple(
'_LayoutOptions',
['width', 'height', 'top', 'right', 'bottom', 'left'])
class LayoutOptions(_LayoutOptions):
"""
:param LayoutOptionValue width: width spec
:param LayoutOptionValue height: height spec
:param LayoutOptionValue top: top spec
:param LayoutOptionValue right: right spec
:param LayoutOptionValue bottom: bottom spec
:param LayoutOptionValue left: left spec
It is possible to define values that conflict. The behavior in these cases
is undefined.
.. py:attribute:: width
A :py:class:`LayoutOptionValue` constraining this view's width (or not).
.. py:attribute:: height
A :py:class:`LayoutOptionValue` constraining this view's height (or not).
.. py:attribute:: top
A :py:class:`LayoutOptionValue` constraining this view's distance from the
top of its superview (or not).
.. py:attribute:: right
A :py:class:`LayoutOptionValue` constraining this view's distance from the
right of its superview (or not).
.. py:attribute:: bottom
A :py:class:`LayoutOptionValue` constraining this view's distance from the
bottom of its superview (or not).
.. py:attribute:: left
A :py:class:`LayoutOptionValue` constraining this view's distance from the
left of its superview (or not).
"""
### Convenience initializers ###
@classmethod
def centered(self, width, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view in the
center of the superview with a constant width and height.
"""
return LayoutOptions(
top=None, bottom=None, left=None, right=None,
width=width, height=height)
@classmethod
def column_left(self, width):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height left column with a constant width.
"""
return LayoutOptions(
top=0, bottom=0, left=0, right=None,
width=width, height=None)
@classmethod
def column_right(self, width):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height right column with a constant width.
"""
return LayoutOptions(
top=0, bottom=0, left=None, right=0,
width=width, height=None)
@classmethod
def row_top(self, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height top row with a constant height.
"""
return LayoutOptions(
top=0, bottom=None, left=0, right=0,
width=None, height=height)
@classmethod
def row_bottom(self, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height bottom row with a constant height.
"""
return LayoutOptions(
top=None, bottom=0, left=0, right=0,
width=None, height=height)
### Convenience modifiers ###
def with_updates(self, **kwargs):
"""
Returns a new :py:class:`LayoutOptions` object with the given changes to
its attributes. For example, here's a view with a constant width, on the
right side of its superview, with half the height of its superview::
# "right column, but only half height"
LayoutOptions.column_right(10).with_updates(bottom=0.5)
"""
opts = self._asdict()
opts.update(kwargs)
return LayoutOptions(**opts)
### Semi-internal layout API ###
| 33.524324 | 80 | 0.582876 | from collections import namedtuple
from numbers import Real
_LayoutOptions = namedtuple(
'_LayoutOptions',
['width', 'height', 'top', 'right', 'bottom', 'left'])
class LayoutOptions(_LayoutOptions):
"""
:param LayoutOptionValue width: width spec
:param LayoutOptionValue height: height spec
:param LayoutOptionValue top: top spec
:param LayoutOptionValue right: right spec
:param LayoutOptionValue bottom: bottom spec
:param LayoutOptionValue left: left spec
It is possible to define values that conflict. The behavior in these cases
is undefined.
.. py:attribute:: width
A :py:class:`LayoutOptionValue` constraining this view's width (or not).
.. py:attribute:: height
A :py:class:`LayoutOptionValue` constraining this view's height (or not).
.. py:attribute:: top
A :py:class:`LayoutOptionValue` constraining this view's distance from the
top of its superview (or not).
.. py:attribute:: right
A :py:class:`LayoutOptionValue` constraining this view's distance from the
right of its superview (or not).
.. py:attribute:: bottom
A :py:class:`LayoutOptionValue` constraining this view's distance from the
bottom of its superview (or not).
.. py:attribute:: left
A :py:class:`LayoutOptionValue` constraining this view's distance from the
left of its superview (or not).
"""
def __new__(cls, width=None, height=None, top=0, right=0, bottom=0, left=0):
self = super(LayoutOptions, cls).__new__(
cls, width, height, top, right, bottom, left)
return self
### Convenience initializers ###
@classmethod
def centered(self, width, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view in the
center of the superview with a constant width and height.
"""
return LayoutOptions(
top=None, bottom=None, left=None, right=None,
width=width, height=height)
@classmethod
def column_left(self, width):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height left column with a constant width.
"""
return LayoutOptions(
top=0, bottom=0, left=0, right=None,
width=width, height=None)
@classmethod
def column_right(self, width):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height right column with a constant width.
"""
return LayoutOptions(
top=0, bottom=0, left=None, right=0,
width=width, height=None)
@classmethod
def row_top(self, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height top row with a constant height.
"""
return LayoutOptions(
top=0, bottom=None, left=0, right=0,
width=None, height=height)
@classmethod
def row_bottom(self, height):
"""
Create a :py:class:`LayoutOptions` object that positions the view as a
full-height bottom row with a constant height.
"""
return LayoutOptions(
top=None, bottom=0, left=0, right=0,
width=None, height=height)
### Convenience modifiers ###
def with_updates(self, **kwargs):
"""
Returns a new :py:class:`LayoutOptions` object with the given changes to
its attributes. For example, here's a view with a constant width, on the
right side of its superview, with half the height of its superview::
# "right column, but only half height"
LayoutOptions.column_right(10).with_updates(bottom=0.5)
"""
opts = self._asdict()
opts.update(kwargs)
return LayoutOptions(**opts)
### Semi-internal layout API ###
def get_type(self, k):
# Return one of ``{'none', 'frame', 'constant', 'fraction'}``
val = getattr(self, k)
if val is None:
return 'none'
elif val == 'frame':
return 'frame'
elif val == 'intrinsic':
return 'intrinsic'
elif isinstance(val, Real):
if val >= 1:
return 'constant'
else:
return 'fraction'
else:
raise ValueError(
"Unknown type for option {}: {}".format(k, type(k)))
def get_is_defined(self, k):
return getattr(self, k) is not None
def get_debug_string_for_keys(self, keys):
return ','.join(["{}={}".format(k, self.get_type(k)) for k in keys])
def get_value(self, k, view):
if getattr(self, k) is None:
raise ValueError("Superview isn't relevant to this value")
elif self.get_type(k) == 'constant':
return getattr(self, k)
elif self.get_type(k) == 'intrinsic':
if k == 'width':
return view.intrinsic_size.width
elif k == 'height':
return view.intrinsic_size.height
else:
raise KeyError(
"'intrinsic' can only be used with width or height.")
elif self.get_type(k) == 'frame':
if k == 'left':
return view.layout_spec.x
elif k == 'top':
return view.layout_spec.y
elif k == 'right':
return superview.bounds.width - view.layout_spec.x2
elif k == 'bottom':
return superview.bounds.height - view.layout_spec.y2
elif k == 'width':
return view.layout_spec.width
elif k == 'height':
return view.layout_spec.height
else:
raise KeyError("Unknown key:", k)
elif self.get_type(k) == 'fraction':
val = getattr(self, k)
if k in ('left', 'width', 'right'):
return view.superview.bounds.width * val
elif k in ('top', 'height', 'bottom'):
return view.superview.bounds.height * val
else:
raise KeyError("Unknown key:", k)
| 2,360 | 0 | 135 |
693ddc5ec535bf5dcd05a8d5aee9489b99a4cfe3 | 2,635 | py | Python | pseudo/tree_transformer.py | mifieldxu/pseudo-lang | 889477c094236dc36526984be6f6537a4875e5a9 | [
"MIT"
] | 661 | 2016-03-12T07:32:36.000Z | 2018-11-12T14:31:30.000Z | pseudo/tree_transformer.py | mifieldxu/pseudo-lang | 889477c094236dc36526984be6f6537a4875e5a9 | [
"MIT"
] | 21 | 2016-03-07T03:49:17.000Z | 2018-11-05T08:30:42.000Z | pseudo/tree_transformer.py | mifieldxu/pseudo-lang | 889477c094236dc36526984be6f6537a4875e5a9 | [
"MIT"
] | 45 | 2016-03-07T03:48:09.000Z | 2018-04-16T20:55:47.000Z | from pseudo.pseudo_tree import Node
class TreeTransformer:
'''
visits recursively nodes of the tree
with defined transform_<node_type> methods and transforms in place
'''
before = None
after = None
whitelist = None # if a set, transform only those nodes, optimization
current_class = None
current_function = None
# def transform_custom_exception(self, s, *w):
# # input(s)
# return s | 38.188406 | 122 | 0.57685 | from pseudo.pseudo_tree import Node
class TreeTransformer:
'''
visits recursively nodes of the tree
with defined transform_<node_type> methods and transforms in place
'''
before = None
after = None
whitelist = None # if a set, transform only those nodes, optimization
current_class = None
current_function = None
def transform(self, tree, in_block=False, assignment=None):
old_class, old_function = None, None
if isinstance(tree, Node):
if self.before:
tree = self.before(tree, in_block, assignment)
if self.whitelist and tree.type not in self.whitelist:
return tree
elif tree.type == 'class_definition' or tree.type == 'module':
old_class = self.current_class
self.current_class = tree
elif tree.type in {'function_definition', 'anonymous_function', 'constructor', 'method_definition', 'module'}:
old_function = self.current_function
self.current_function = tree
handler = getattr(self, 'transform_%s' % tree.type, None)
if handler:
tree = handler(tree, in_block, assignment)
else:
tree = self.transform_default(tree)
if self.after:
tree = self.after(tree, in_block, assignment)
self.current_function = old_function
self.current_class = old_class
return tree
elif isinstance(tree, list):
return [self.transform(child) for child in tree]
else:
return tree
def transform_default(self, tree):
for field, child in tree.__dict__.items():
if not field.endswith('type'):
# print(field)
if isinstance(child, Node):
setattr(tree, field, self.transform(child, False, tree if tree.type[-10:] == 'assignment' else None))
elif isinstance(child, list) and field == 'block' or field == 'main':
setattr(tree, field, self.transform_block(child))
elif isinstance(child, list):
setattr(tree, field, self.transform(child))
return tree
def transform_block(self, tree):
results = []
for child in tree:
result = self.transform(child, True)
if not isinstance(result, list):
results.append(result)
else:
results += result
return results
# def transform_custom_exception(self, s, *w):
# # input(s)
# return s | 2,110 | 0 | 81 |
473c69aacc4d0d3808b8a3ae3f94e32fd4039856 | 861 | py | Python | LeetCode/Problems/39_combination_sum.py | hooyao/LeetCode-Py3 | f462b66ae849f4332a4b150f206dd49c7519e83b | [
"MIT"
] | null | null | null | LeetCode/Problems/39_combination_sum.py | hooyao/LeetCode-Py3 | f462b66ae849f4332a4b150f206dd49c7519e83b | [
"MIT"
] | null | null | null | LeetCode/Problems/39_combination_sum.py | hooyao/LeetCode-Py3 | f462b66ae849f4332a4b150f206dd49c7519e83b | [
"MIT"
] | null | null | null | import sys
if __name__ == '__main__':
main(*sys.argv[1:])
| 24.6 | 58 | 0.529617 | import sys
class Solution:
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
return self.dfs(candidates, target)
def dfs(self, candidates, target):
result = []
for i in range(len(candidates)):
new_target = target - candidates[i]
if new_target == 0:
result.append([candidates[i]])
elif new_target > 0:
tmp = self.dfs(candidates[i:], new_target)
for ele in tmp:
ele.insert(0, candidates[i])
result += tmp
return result
def main(*args):
solution = Solution()
result = solution.combinationSum([3, 2, 5], 8)
print(result)
if __name__ == '__main__':
main(*sys.argv[1:])
| 516 | 233 | 46 |
21eb0142345755c518434887ad53e3986ae41455 | 4,923 | py | Python | exatrkx/src/tfgraphs/utils.py | sbconlon/exatrkx-iml2020 | 5101a3bae5a15e8e0557837a8d0b9fd9e122a026 | [
"Apache-2.0"
] | 6 | 2020-10-27T21:42:27.000Z | 2021-04-18T02:06:30.000Z | exatrkx/src/tfgraphs/utils.py | sbconlon/exatrkx-iml2020 | 5101a3bae5a15e8e0557837a8d0b9fd9e122a026 | [
"Apache-2.0"
] | null | null | null | exatrkx/src/tfgraphs/utils.py | sbconlon/exatrkx-iml2020 | 5101a3bae5a15e8e0557837a8d0b9fd9e122a026 | [
"Apache-2.0"
] | 6 | 2020-11-04T23:45:10.000Z | 2021-03-26T09:06:00.000Z | import matplotlib.pyplot as plt
import sklearn.metrics
import networkx as nx
import numpy as np
import pandas as pd
fontsize=16
minor_size=14 | 36.466667 | 110 | 0.617713 | import matplotlib.pyplot as plt
import sklearn.metrics
import networkx as nx
import numpy as np
import pandas as pd
fontsize=16
minor_size=14
def get_pos(Gp):
pos = {}
for node in Gp.nodes():
r, phi, z = Gp.nodes[node]['pos'][:3]
x = r * np.cos(phi)
y = r * np.sin(phi)
pos[node] = np.array([x, y])
return pos
def plot_nx_with_edge_cmaps(G, weight_name='predict', weight_range=(0, 1),
alpha=1.0, ax=None,
cmaps=plt.get_cmap('Greys'), threshold=0.):
if ax is None:
_, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)
pos = get_pos(G)
res = [(edge, G.edges[edge][weight_name]) for edge in G.edges() if G.edges[edge][weight_name] > threshold]
edges, weights = zip(*dict(res).items())
vmin, vmax = weight_range
nx.draw(G, pos, node_color='#A0CBE2', edge_color=weights, edge_cmap=cmaps,
edgelist=edges, width=0.5, with_labels=False,
node_size=1, edge_vmin=vmin, edge_vmax=vmax,
ax=ax, arrows=False, alpha=alpha
)
def plot_metrics(odd, tdd, odd_th=0.5, tdd_th=0.5, outname='roc_graph_nets.eps',
off_interactive=False, alternative=True):
if off_interactive:
plt.ioff()
y_pred, y_true = (odd > odd_th), (tdd > tdd_th)
fpr, tpr, _ = sklearn.metrics.roc_curve(y_true, odd)
if alternative:
results = []
labels = ['Accuracy: ', 'Precision (purity): ', 'Recall (efficiency):']
thresholds = [0.1, 0.5, 0.8]
for threshold in thresholds:
y_p, y_t = (odd > threshold), (tdd > threshold)
accuracy = sklearn.metrics.accuracy_score(y_t, y_p)
precision = sklearn.metrics.precision_score(y_t, y_p)
recall = sklearn.metrics.recall_score(y_t, y_p)
results.append((accuracy, precision, recall))
print("GNN threshold:{:11.2f} {:7.2f} {:7.2f}".format(*thresholds))
for idx,lab in enumerate(labels):
print("{} {:6.4f} {:6.4f} {:6.4f}".format(lab, *[x[idx] for x in results]))
else:
accuracy = sklearn.metrics.accuracy_score(y_true, y_pred)
precision = sklearn.metrics.precision_score(y_true, y_pred)
recall = sklearn.metrics.recall_score(y_true, y_pred)
print('Accuracy: %.6f' % accuracy)
print('Precision (purity): %.6f' % precision)
print('Recall (efficiency): %.6f' % recall)
auc = sklearn.metrics.auc(fpr, tpr)
print("AUC: %.4f" % auc)
y_p_5 = odd > 0.5
print("Fake rejection at 0.5: {:.6f}".format(1-y_true[y_p_5 & ~y_true].shape[0]/y_true[~y_true].shape[0]))
fig, axs = plt.subplots(2, 2, figsize=(12, 10), constrained_layout=True)
axs = axs.flatten()
ax0, ax1, ax2, ax3 = axs
# Plot the model outputs
# binning=dict(bins=50, range=(0,1), histtype='step', log=True)
binning=dict(bins=50, histtype='step', log=True)
ax0.hist(odd[y_true==False], lw=2, label='fake', **binning)
ax0.hist(odd[y_true], lw=2, label='true', **binning)
ax0.set_xlabel('Model output', fontsize=fontsize)
ax0.tick_params(width=2, grid_alpha=0.5, labelsize=minor_size)
ax0.legend(loc=0, fontsize=fontsize)
ax0.set_title('ROC curve, AUC = %.4f' % auc, fontsize=fontsize)
# Plot the ROC curve
ax1.plot(fpr, tpr, lw=2)
ax1.plot([0, 1], [0, 1], '--', lw=2)
ax1.set_xlabel('False positive rate', fontsize=fontsize)
ax1.set_ylabel('True positive rate', fontsize=fontsize)
ax1.set_title('ROC curve, AUC = %.4f' % auc, fontsize=fontsize)
ax1.tick_params(width=2, grid_alpha=0.5, labelsize=minor_size)
p, r, t = sklearn.metrics.precision_recall_curve(y_true, odd)
ax2.plot(t, p[:-1], label='purity', lw=2)
ax2.plot(t, r[:-1], label='efficiency', lw=2)
ax2.set_xlabel('Cut on model score', fontsize=fontsize)
ax2.tick_params(width=2, grid_alpha=0.5, labelsize=minor_size)
ax2.legend(fontsize=fontsize, loc='upper right')
ax3.plot(p, r, lw=2)
ax3.set_xlabel('Purity', fontsize=fontsize)
ax3.set_ylabel('Efficiency', fontsize=fontsize)
ax3.tick_params(width=2, grid_alpha=0.5, labelsize=minor_size)
plt.savefig(outname)
if off_interactive:
plt.close(fig)
def np_to_nx(array):
G = nx.Graph()
node_features = ['r', 'phi', 'z']
feature_scales = [1000, np.pi, 1000]
df = pd.DataFrame(array['x']*feature_scales, columns=node_features)
node_info = [
(i, dict(pos=np.array(row), hit_id=array['I'][i])) for i,row in df.iterrows()
]
G.add_nodes_from(node_info)
receivers = array['receivers']
senders = array['senders']
score = array['score']
truth = array['truth']
edge_info = [
(i, j, dict(weight=k, solution=l)) for i,j,k,l in zip(senders, receivers, score, truth)
]
G.add_edges_from(edge_info)
return G | 4,688 | 0 | 92 |
d9fd69c9fecbe6a7d97ea88a7268e134dac6ae42 | 2,469 | py | Python | scobra/analysis/Pareto.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
] | 7 | 2016-03-16T09:03:41.000Z | 2019-09-20T05:55:02.000Z | scobra/analysis/Pareto.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
] | 11 | 2019-10-03T15:04:58.000Z | 2020-05-11T17:27:10.000Z | scobra/analysis/Pareto.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
] | 6 | 2016-03-16T09:04:54.000Z | 2021-07-24T15:03:41.000Z | from ..classes.pareto import pareto
import random
def Pareto(model, objectives, objdirec, runs, GetPoints=True, tol=1e-10):
""" pre: objectives = [["reac"],{"reac2":x}]
post: turning points of Pareto front """
state = model.GetState()
rv = pareto()
model.SetObjDirec(objdirec)
anchor = []
for obj in objectives:
model.ZeroObjective()
model.SetObjective(obj)
model.Solve(PrintStatus=False)
anchor.append(model.GetObjVal())
print(anchor)
if len(anchor) == len(objectives):
for n in range(runs):
model.ZeroObjective()
coef = []
for b in range(len(objectives)):
coef.append(random.random())
sumcoef = sum(coef)
for b in range(len(objectives)):
try:
coef[b] = coef[b]/anchor[b]/sumcoef
except ZeroDivisionError:
print("Zero Division error at %s" % objectives[b])
continue
objdic = {}
for b in range(len(objectives)):
thisobjdic = {}
if isinstance(objectives[b],list):
for reac in objectives[b]:
thisobjdic[reac] = coef[b]
elif isinstance(objectives[b],dict):
for reac in objectives[b]:
thisobjdic[reac] = objectives[b][reac]*coef[b]
for r in thisobjdic:
if r in objdic.keys():
objdic[r] += thisobjdic[r]
else:
objdic[r] = thisobjdic[r]
print(objdic)
model.SetObjective(objdic)
model.Solve(PrintStatus=False)
sol = model.GetSol(IncZeroes=True)
for b in range(len(objectives)):
sol["coef"+str(b+1)] = coef[b]
if isinstance(objectives[b],list):
sol["Obj"+str(b+1)] = sol[objectives[b][0]]
elif isinstance(objectives[b],dict):
objsol = 0
for reac in objectives[b]:
objsol += sol[reac]*objectives[b][reac]
sol["Obj"+str(b+1)] = objsol
print(sol)
rv = pareto(rv.UpdateFromDic(sol))
model.SetState(state)
if len(anchor) == len(objectives) and GetPoints:
return rv.GetParetoPoints(tol)
else:
return rv
| 38.578125 | 73 | 0.498987 | from ..classes.pareto import pareto
import random
def Pareto(model, objectives, objdirec, runs, GetPoints=True, tol=1e-10):
""" pre: objectives = [["reac"],{"reac2":x}]
post: turning points of Pareto front """
state = model.GetState()
rv = pareto()
model.SetObjDirec(objdirec)
anchor = []
for obj in objectives:
model.ZeroObjective()
model.SetObjective(obj)
model.Solve(PrintStatus=False)
anchor.append(model.GetObjVal())
print(anchor)
if len(anchor) == len(objectives):
for n in range(runs):
model.ZeroObjective()
coef = []
for b in range(len(objectives)):
coef.append(random.random())
sumcoef = sum(coef)
for b in range(len(objectives)):
try:
coef[b] = coef[b]/anchor[b]/sumcoef
except ZeroDivisionError:
print("Zero Division error at %s" % objectives[b])
continue
objdic = {}
for b in range(len(objectives)):
thisobjdic = {}
if isinstance(objectives[b],list):
for reac in objectives[b]:
thisobjdic[reac] = coef[b]
elif isinstance(objectives[b],dict):
for reac in objectives[b]:
thisobjdic[reac] = objectives[b][reac]*coef[b]
for r in thisobjdic:
if r in objdic.keys():
objdic[r] += thisobjdic[r]
else:
objdic[r] = thisobjdic[r]
print(objdic)
model.SetObjective(objdic)
model.Solve(PrintStatus=False)
sol = model.GetSol(IncZeroes=True)
for b in range(len(objectives)):
sol["coef"+str(b+1)] = coef[b]
if isinstance(objectives[b],list):
sol["Obj"+str(b+1)] = sol[objectives[b][0]]
elif isinstance(objectives[b],dict):
objsol = 0
for reac in objectives[b]:
objsol += sol[reac]*objectives[b][reac]
sol["Obj"+str(b+1)] = objsol
print(sol)
rv = pareto(rv.UpdateFromDic(sol))
model.SetState(state)
if len(anchor) == len(objectives) and GetPoints:
return rv.GetParetoPoints(tol)
else:
return rv
| 0 | 0 | 0 |
ba51e543b2f51c2d862edf4058083c74dbe60178 | 1,477 | py | Python | src/print_and_read.py | Shumpei-Kikuta/RolX | d17609180f0d1da40b1ae93de4ee0e8c0366b364 | [
"MIT"
] | null | null | null | src/print_and_read.py | Shumpei-Kikuta/RolX | d17609180f0d1da40b1ae93de4ee0e8c0366b364 | [
"MIT"
] | null | null | null | src/print_and_read.py | Shumpei-Kikuta/RolX | d17609180f0d1da40b1ae93de4ee0e8c0366b364 | [
"MIT"
] | null | null | null | import json
import numpy as np
import pandas as pd
import networkx as nx
from texttable import Texttable
def data_reader(input_path):
"""
Function to read a csv edge list and transform it to a networkx graph object.
"""
data = np.array(pd.read_csv(input_path))
return data
def log_setup(args_in):
"""
Function to setup the logging hash table.
"""
log = dict()
log["times"] = []
log["losses"] = []
log["new_features_added"] = []
log["params"] = vars(args_in)
return log
def tab_printer(log):
"""
Function to print the logs in a nice tabular format.
"""
t = Texttable()
t.add_rows([["Epoch", log["losses"][-1][0]]])
print t.draw()
t = Texttable()
t.add_rows([["Loss", round(log["losses"][-1][1],3)]])
print t.draw()
def epoch_printer(repetition):
"""
Function to print the epoch number.
"""
print("")
print("Epoch " + str(repetition+1) + ". initiated.")
print("")
def log_updater(log, repetition, average_loss, optimization_time):
"""
Function to update the log object.
"""
index = repetition + 1
log["losses"] = log["losses"] + [[index, average_loss]]
log["times"] = log["times"] + [[index, optimization_time]]
return log
| 25.912281 | 105 | 0.608666 | import json
import numpy as np
import pandas as pd
import networkx as nx
from texttable import Texttable
def data_reader(input_path):
"""
Function to read a csv edge list and transform it to a networkx graph object.
"""
data = np.array(pd.read_csv(input_path))
return data
def log_setup(args_in):
"""
Function to setup the logging hash table.
"""
log = dict()
log["times"] = []
log["losses"] = []
log["new_features_added"] = []
log["params"] = vars(args_in)
return log
def tab_printer(log):
"""
Function to print the logs in a nice tabular format.
"""
t = Texttable()
t.add_rows([["Epoch", log["losses"][-1][0]]])
print t.draw()
t = Texttable()
t.add_rows([["Loss", round(log["losses"][-1][1],3)]])
print t.draw()
def epoch_printer(repetition):
"""
Function to print the epoch number.
"""
print("")
print("Epoch " + str(repetition+1) + ". initiated.")
print("")
def log_updater(log, repetition, average_loss, optimization_time):
"""
Function to update the log object.
"""
index = repetition + 1
log["losses"] = log["losses"] + [[index, average_loss]]
log["times"] = log["times"] + [[index, optimization_time]]
return log
def data_saver(features, place):
features = pd.DataFrame(features, columns = map(lambda x: "x_" + str(x), range(0,features.shape[1])))
features.to_csv(place, index = None)
| 158 | 0 | 23 |
4c50d5f807206aed7e471417c6ca93b284a799e4 | 6,774 | py | Python | addons/website_sale_coupon/tests/test_sale_coupon_multiwebsite.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_sale_coupon/tests/test_sale_coupon_multiwebsite.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_sale_coupon/tests/test_sale_coupon_multiwebsite.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_coupon.tests.test_program_numbers import TestSaleCouponProgramNumbers
from odoo.addons.website.tools import MockRequest
from odoo.exceptions import UserError
from odoo.tests import tagged
@tagged('-at_install', 'post_install')
| 45.16 | 149 | 0.618837 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_coupon.tests.test_program_numbers import TestSaleCouponProgramNumbers
from odoo.addons.website.tools import MockRequest
from odoo.exceptions import UserError
from odoo.tests import tagged
@tagged('-at_install', 'post_install')
class TestSaleCouponMultiwebsite(TestSaleCouponProgramNumbers):
def setUp(self):
super(TestSaleCouponMultiwebsite, self).setUp()
self.website = self.env['website'].browse(1)
self.website2 = self.env['website'].create({'name': 'website 2'})
def test_01_multiwebsite_checks(self):
""" Ensure the multi website compliance of programs and coupons, both in
backend and frontend.
"""
order = self.empty_order
self.env['sale.order.line'].create({
'product_id': self.largeCabinet.id,
'name': 'Large Cabinet',
'product_uom_qty': 2.0,
'order_id': order.id,
})
def _remove_reward():
order.order_line.filtered('is_reward_line').unlink()
self.assertEqual(len(order.order_line.ids), 1, "Program should have been removed")
def _apply_code(code, backend=True):
if backend:
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': code
}).process_coupon()
else:
self.env['sale.coupon.apply.code'].sudo().apply_coupon(order, code)
# ==========================================
# ========== Programs (with code) ==========
# ==========================================
# 1. Backend - Generic
_apply_code(self.p1.promo_code)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic promo program")
_remove_reward()
# 2. Frontend - Generic
with MockRequest(self.env, website=self.website):
_apply_code(self.p1.promo_code, False)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic promo program (2)")
_remove_reward()
# make program specific
self.p1.website_id = self.website.id
# 3. Backend - Specific
with self.assertRaises(UserError):
_apply_code(self.p1.promo_code) # the order has no website_id so not possible to use a website specific code
# 4. Frontend - Specific - Correct website
order.website_id = self.website.id
with MockRequest(self.env, website=self.website):
_apply_code(self.p1.promo_code, False)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a specific promo program for the correct website")
_remove_reward()
# 5. Frontend - Specific - Wrong website
self.p1.website_id = self.website2.id
with MockRequest(self.env, website=self.website):
_apply_code(self.p1.promo_code, False)
self.assertEqual(len(order.order_line.ids), 1, "Should not get the reward as wrong website")
# ==============================
# =========== Coupons ==========
# ==============================
order.website_id = False
self.env['coupon.generate.wizard'].with_context(active_id=self.discount_coupon_program.id).create({
'nbr_coupons': 4,
}).generate_coupon()
coupons = self.discount_coupon_program.coupon_ids
# 1. Backend - Generic
_apply_code(coupons[0].code)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic coupon program")
_remove_reward()
# 2. Frontend - Generic
with MockRequest(self.env, website=self.website):
_apply_code(coupons[1].code, False)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic coupon program (2)")
_remove_reward()
# make program specific
self.discount_coupon_program.website_id = self.website.id
# 3. Backend - Specific
with self.assertRaises(UserError):
_apply_code(coupons[2].code) # the order has no website_id so not possible to use a website specific code
# 4. Frontend - Specific - Correct website
order.website_id = self.website.id
with MockRequest(self.env, website=self.website):
_apply_code(coupons[2].code, False)
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a specific coupon program for the correct website")
_remove_reward()
# 5. Frontend - Specific - Wrong website
self.discount_coupon_program.website_id = self.website2.id
with MockRequest(self.env, website=self.website):
_apply_code(coupons[3].code, False)
self.assertEqual(len(order.order_line.ids), 1, "Should not get the reward as wrong website")
# ========================================
# ========== Programs (no code) ==========
# ========================================
order.website_id = False
self.p1.website_id = False
self.p1.promo_code = False
self.p1.promo_code_usage = 'no_code_needed'
# 1. Backend - Generic
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic promo program")
# 2. Frontend - Generic
with MockRequest(self.env, website=self.website):
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a generic promo program (2)")
# make program specific
self.p1.website_id = self.website.id
# 3. Backend - Specific
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 1, "The order has no website_id so not possible to use a website specific code")
# 4. Frontend - Specific - Correct website
order.website_id = self.website.id
with MockRequest(self.env, website=self.website):
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 2, "Should get the discount line as it is a specific promo program for the correct website")
# 5. Frontend - Specific - Wrong website
self.p1.website_id = self.website2.id
with MockRequest(self.env, website=self.website):
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 1, "Should not get the reward as wrong website")
| 648 | 5,777 | 22 |
14cc919df0578ea35a942e7d8f60ec3bd6e5bf7d | 5,600 | py | Python | WisRecCloud/apps/recall/task.py | DooBeDooBa/WisRecCloud | 1801fc05c8aaabc5e668ce30eb83e26c65855ef1 | [
"CC0-1.0"
] | 1 | 2019-12-13T08:48:35.000Z | 2019-12-13T08:48:35.000Z | WisRecCloud/apps/recall/task.py | DooBeDooBa/WisRecCloud | 1801fc05c8aaabc5e668ce30eb83e26c65855ef1 | [
"CC0-1.0"
] | 20 | 2020-01-28T23:12:37.000Z | 2022-03-12T00:08:52.000Z | WisRecCloud/apps/recall/task.py | DooBeDooBa/WisRecCloud | 1801fc05c8aaabc5e668ce30eb83e26c65855ef1 | [
"CC0-1.0"
] | null | null | null | import json
import redis
from celery import Celery
import pandas as pd
import math
import pymysql
app = Celery(
'get_key_words',
backend='redis://localhost:6379/8'
)
@app.task
def get_key(user_api_key, api_key):
"""
:param user_api_key:
:param api_key:
:return:
"""
conn = pymysql.connect( # 链接MYSQL
host='localhost',
user='root',
passwd='963369',
db='wisreccloud',
port=3306,
charset='utf8'
)
# 到数据库查询所需要的数据
_temp = pd.read_sql("select data_id, key_digest from jie_card_data where client_id_id = %s" % user_api_key, conn)
_id = _temp["data_id"].to_list()
_digest = _temp["key_digest"].to_list()
_result = {temp_id: temp_digest for temp_id, temp_digest in zip(_id, _digest)}
news_cor_list = list()
# 计算相似度
for new_id1 in _result.keys():
id1_tags = set(_result[new_id1].split(","))
for new_id2 in _result.keys():
id2_tags = set(_result[new_id2].split(","))
if new_id1 != new_id2:
cor = (len(id1_tags & id2_tags)) / len(id1_tags | id2_tags)
if cor > 0.1:
news_cor_list.append([new_id1, new_id2, format(cor, ".2f")])
# 替换redis中推荐数据
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True, db=8)
conn = redis.StrictRedis(connection_pool=pool)
try: # 如何未查询到数据,则是因为第一次创建则新建数据
for redis_name in conn.keys():
if api_key == json.loads(conn.get(redis_name))["result"]["api_key"]:
conn.delete(redis_name)
except KeyError:
pass
result = {
"api_key": api_key,
"result": news_cor_list
}
return result
def load_data(user_api_key):
"""
加载数据库中的数据集
:param user_api_key:
:return:
"""
conn = pymysql.connect( # 链接MYSQL
host='localhost',
user='root',
passwd='963369',
db='wisreccloud',
port=3306,
charset='utf8'
)
_temp = pd.read_sql("select user_id, movie_id, ratting from movie_cf_data where client_id_id = %s" % user_api_key,
conn)
data, new_data = list(), dict()
for user_id, item_id, record in zip(_temp["user_id"], _temp["movie_id"], _temp["ratting"]):
data.append((user_id, item_id, record))
for user, item, record in data:
new_data.setdefault(user, {})
new_data[user][item] = record
return new_data
@app.task
def UserSimilarityBest(user_api_key, api_key):
"""
计算用户之间的相似度
:return:
"""
data = load_data(user_api_key)
item_users = dict() # 存储哪些item被用户评价过
for u, items in data.items(): # user_id {item_id: rating}
for i in items.keys(): # 得到每个item被哪些user评价过
item_users.setdefault(i, set())
if data[u][i] > 0:
item_users[i].add(u) # {'1193': {'1', '15', '2', '28', '18', '19', '24', '12', '33', '17'}}
count, user_item_count = dict(), dict()
for i, users in item_users.items(): # item_id, set(user_id1, user_id2)
for u in users: # user_id
user_item_count.setdefault(u, 0) # user_id: 0
user_item_count[u] += 1
count.setdefault(u, {}) # user_id: {}
for v in users: # user_id
count[u].setdefault(v, 0)
if u == v:
continue
count[u][v] += 1 / math.log(1 + len(users)) # {'33': 391, '19': 255, '28': 107, '12': 23}
userSim = dict()
for u, related_users in count.items():
userSim.setdefault(u, {})
for v, cuv in related_users.items():
if u == v:
continue
userSim[u].setdefault(v, 0.0)
userSim[u][v] = cuv / math.sqrt(user_item_count[u] * user_item_count[v])
# 替换redis中推荐数据
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True, db=8)
conn = redis.StrictRedis(connection_pool=pool)
try: # 如何未查询到数据,则是因为第一次创建则新建数据
for redis_name in conn.keys():
if api_key == json.loads(conn.get(redis_name))["result"]["api_key"]:
conn.delete(redis_name)
except KeyError:
pass
result = {
"api_key": api_key,
"result": userSim
}
return result
@app.task
| 32.748538 | 118 | 0.575179 | import json
import redis
from celery import Celery
import pandas as pd
import math
import pymysql
app = Celery(
'get_key_words',
backend='redis://localhost:6379/8'
)
@app.task
def get_key(user_api_key, api_key):
"""
:param user_api_key:
:param api_key:
:return:
"""
conn = pymysql.connect( # 链接MYSQL
host='localhost',
user='root',
passwd='963369',
db='wisreccloud',
port=3306,
charset='utf8'
)
# 到数据库查询所需要的数据
_temp = pd.read_sql("select data_id, key_digest from jie_card_data where client_id_id = %s" % user_api_key, conn)
_id = _temp["data_id"].to_list()
_digest = _temp["key_digest"].to_list()
_result = {temp_id: temp_digest for temp_id, temp_digest in zip(_id, _digest)}
news_cor_list = list()
# 计算相似度
for new_id1 in _result.keys():
id1_tags = set(_result[new_id1].split(","))
for new_id2 in _result.keys():
id2_tags = set(_result[new_id2].split(","))
if new_id1 != new_id2:
cor = (len(id1_tags & id2_tags)) / len(id1_tags | id2_tags)
if cor > 0.1:
news_cor_list.append([new_id1, new_id2, format(cor, ".2f")])
# 替换redis中推荐数据
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True, db=8)
conn = redis.StrictRedis(connection_pool=pool)
try: # 如何未查询到数据,则是因为第一次创建则新建数据
for redis_name in conn.keys():
if api_key == json.loads(conn.get(redis_name))["result"]["api_key"]:
conn.delete(redis_name)
except KeyError:
pass
result = {
"api_key": api_key,
"result": news_cor_list
}
return result
def load_data(user_api_key):
"""
加载数据库中的数据集
:param user_api_key:
:return:
"""
conn = pymysql.connect( # 链接MYSQL
host='localhost',
user='root',
passwd='963369',
db='wisreccloud',
port=3306,
charset='utf8'
)
_temp = pd.read_sql("select user_id, movie_id, ratting from movie_cf_data where client_id_id = %s" % user_api_key,
conn)
data, new_data = list(), dict()
for user_id, item_id, record in zip(_temp["user_id"], _temp["movie_id"], _temp["ratting"]):
data.append((user_id, item_id, record))
for user, item, record in data:
new_data.setdefault(user, {})
new_data[user][item] = record
return new_data
@app.task
def UserSimilarityBest(user_api_key, api_key):
"""
计算用户之间的相似度
:return:
"""
data = load_data(user_api_key)
item_users = dict() # 存储哪些item被用户评价过
for u, items in data.items(): # user_id {item_id: rating}
for i in items.keys(): # 得到每个item被哪些user评价过
item_users.setdefault(i, set())
if data[u][i] > 0:
item_users[i].add(u) # {'1193': {'1', '15', '2', '28', '18', '19', '24', '12', '33', '17'}}
count, user_item_count = dict(), dict()
for i, users in item_users.items(): # item_id, set(user_id1, user_id2)
for u in users: # user_id
user_item_count.setdefault(u, 0) # user_id: 0
user_item_count[u] += 1
count.setdefault(u, {}) # user_id: {}
for v in users: # user_id
count[u].setdefault(v, 0)
if u == v:
continue
count[u][v] += 1 / math.log(1 + len(users)) # {'33': 391, '19': 255, '28': 107, '12': 23}
userSim = dict()
for u, related_users in count.items():
userSim.setdefault(u, {})
for v, cuv in related_users.items():
if u == v:
continue
userSim[u].setdefault(v, 0.0)
userSim[u][v] = cuv / math.sqrt(user_item_count[u] * user_item_count[v])
# 替换redis中推荐数据
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True, db=8)
conn = redis.StrictRedis(connection_pool=pool)
try: # 如何未查询到数据,则是因为第一次创建则新建数据
for redis_name in conn.keys():
if api_key == json.loads(conn.get(redis_name))["result"]["api_key"]:
conn.delete(redis_name)
except KeyError:
pass
result = {
"api_key": api_key,
"result": userSim
}
return result
@app.task
def ItemSimilarityBest(user_api_key, api_key):
data = load_data(user_api_key)
itemSim, item_user_count, count = dict(), dict(), dict() # 构造共现矩阵
for user, _item in data.items():
for i in _item.keys():
item_user_count.setdefault(i, 0)
if data[int(user)][i] > 0.0:
item_user_count[i] += 1
for j in _item.keys():
count.setdefault(i, {}).setdefault(j, 0)
if data[int(user)][i] > 0.0 and data[int(user)][j] > 0.0 and i != j:
count[i][j] += 1
for i, related_items in count.items():
itemSim.setdefault(i, dict())
for j, cuv in related_items.items():
itemSim[i].setdefault(j, 0)
itemSim[i][j] = cuv / math.sqrt(item_user_count[i] * item_user_count[j])
# 替换redis中推荐数据
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True, db=8)
conn = redis.StrictRedis(connection_pool=pool)
try: # 如何未查询到数据,则是因为第一次创建则新建数据
for redis_name in conn.keys():
if api_key == json.loads(conn.get(redis_name))["result"]["api_key"]:
conn.delete(redis_name)
except KeyError:
pass
result = {
"api_key": api_key,
"result": itemSim
}
return result
| 1,344 | 0 | 22 |
1eba706beb117ef3b2905afc1fd42a03afc20fc4 | 4,102 | py | Python | sandbox/larva_brain.py | neurodata/bgm | b04162f84820f81cf719e8a5ddd4dae34d8f5f41 | [
"MIT"
] | 1 | 2022-03-29T14:53:11.000Z | 2022-03-29T14:53:11.000Z | sandbox/larva_brain.py | neurodata/bgm | b04162f84820f81cf719e8a5ddd4dae34d8f5f41 | [
"MIT"
] | null | null | null | sandbox/larva_brain.py | neurodata/bgm | b04162f84820f81cf719e8a5ddd4dae34d8f5f41 | [
"MIT"
] | null | null | null | #%% [markdown]
# # Matching when including the contralateral connections
#%% [markdown]
# ## Preliminaries
#%%
import datetime
import os
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import adjplot, matched_stripplot, matrixplot
from numba import jit
from pkg.data import load_maggot_graph, load_matched
from pkg.io import OUT_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.match import BisectedGraphMatchSolver, GraphMatchSolver
from pkg.plot import method_palette, set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, get_seeds
from scipy.optimize import linear_sum_assignment
from scipy.stats import wilcoxon
FILENAME = "larva_brain"
DISPLAY_FIGS = True
OUT_PATH = OUT_PATH / FILENAME
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
#%% [markdown]
# ### Load the data
#%%
left_adj, left_nodes = load_matched("left")
right_adj, right_nodes = load_matched("right")
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
all_nodes = pd.concat((left_nodes, right_nodes))
all_nodes["inds"] = range(len(all_nodes))
left_nodes.iloc[seeds[0]]["pair_id"]
assert len(left_nodes) == len(right_nodes)
#%%
mg = load_maggot_graph()
mg = mg.node_subgraph(all_nodes.index)
adj = mg.sum.adj
n = len(left_nodes)
left_inds = np.arange(n)
right_inds = np.arange(n) + n
glue("n_nodes", n)
#%% [markdown]
# ### Run the graph matching experiment
n_sims = 25
glue("n_initializations", n_sims)
RERUN_SIMS = False
if RERUN_SIMS:
seeds = rng.integers(np.iinfo(np.int32).max, size=n_sims)
rows = []
for sim, seed in enumerate(seeds):
for Solver, method in zip(
[BisectedGraphMatchSolver, GraphMatchSolver], ["BGM", "GM"]
):
run_start = time.time()
solver = Solver(adj, left_inds, right_inds, rng=seed)
solver.solve()
match_ratio = (solver.permutation_ == np.arange(n)).mean()
elapsed = time.time() - run_start
print(f"{elapsed:.3f} seconds elapsed.")
rows.append(
{
"match_ratio": match_ratio,
"sim": sim,
"method": method,
"seed": seed,
"elapsed": elapsed,
"converged": solver.converged,
"n_iter": solver.n_iter,
"score": solver.score_,
}
)
results = pd.DataFrame(rows)
results.to_csv(OUT_PATH / "larva_comparison.csv")
else:
results = pd.read_csv(OUT_PATH / "larva_comparison.csv", index_col=0)
results.head()
#%%
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
matched_stripplot(
data=results,
x="method",
y="match_ratio",
match="sim",
order=["GM", "BGM"],
hue="method",
palette=method_palette,
ax=ax,
jitter=0.25,
)
sns.move_legend(ax, "upper left", title="Method")
mean1 = results[results["method"] == "GM"]["match_ratio"].mean()
mean2 = results[results["method"] == "BGM"]["match_ratio"].mean()
ax.set_yticks([mean1, mean2])
ax.set_yticklabels([f"{mean1:.2f}", f"{mean2:.2f}"])
ax.tick_params(which="both", length=7)
ax.set_ylabel("Match ratio")
ax.set_xlabel("Method")
gluefig("match_ratio_larva", fig)
# %%
bgm_results = results[results["method"] == "BGM"]
gm_results = results[results["method"] == "GM"]
stat, pvalue = wilcoxon(
bgm_results["match_ratio"].values, gm_results["match_ratio"].values
)
glue("match_ratio_pvalue", pvalue, form="pvalue")
mean_bgm = bgm_results["match_ratio"].mean()
glue("mean_match_ratio_bgm", mean_bgm)
mean_gm = gm_results["match_ratio"].mean()
glue("mean_match_ratio_gm", mean_gm)
| 24.562874 | 73 | 0.661872 | #%% [markdown]
# # Matching when including the contralateral connections
#%% [markdown]
# ## Preliminaries
#%%
import datetime
import os
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import adjplot, matched_stripplot, matrixplot
from numba import jit
from pkg.data import load_maggot_graph, load_matched
from pkg.io import OUT_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.match import BisectedGraphMatchSolver, GraphMatchSolver
from pkg.plot import method_palette, set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, get_seeds
from scipy.optimize import linear_sum_assignment
from scipy.stats import wilcoxon
FILENAME = "larva_brain"
DISPLAY_FIGS = True
OUT_PATH = OUT_PATH / FILENAME
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
#%% [markdown]
# ### Load the data
#%%
left_adj, left_nodes = load_matched("left")
right_adj, right_nodes = load_matched("right")
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
all_nodes = pd.concat((left_nodes, right_nodes))
all_nodes["inds"] = range(len(all_nodes))
left_nodes.iloc[seeds[0]]["pair_id"]
assert len(left_nodes) == len(right_nodes)
#%%
mg = load_maggot_graph()
mg = mg.node_subgraph(all_nodes.index)
adj = mg.sum.adj
n = len(left_nodes)
left_inds = np.arange(n)
right_inds = np.arange(n) + n
glue("n_nodes", n)
#%% [markdown]
# ### Run the graph matching experiment
n_sims = 25
glue("n_initializations", n_sims)
RERUN_SIMS = False
if RERUN_SIMS:
seeds = rng.integers(np.iinfo(np.int32).max, size=n_sims)
rows = []
for sim, seed in enumerate(seeds):
for Solver, method in zip(
[BisectedGraphMatchSolver, GraphMatchSolver], ["BGM", "GM"]
):
run_start = time.time()
solver = Solver(adj, left_inds, right_inds, rng=seed)
solver.solve()
match_ratio = (solver.permutation_ == np.arange(n)).mean()
elapsed = time.time() - run_start
print(f"{elapsed:.3f} seconds elapsed.")
rows.append(
{
"match_ratio": match_ratio,
"sim": sim,
"method": method,
"seed": seed,
"elapsed": elapsed,
"converged": solver.converged,
"n_iter": solver.n_iter,
"score": solver.score_,
}
)
results = pd.DataFrame(rows)
results.to_csv(OUT_PATH / "larva_comparison.csv")
else:
results = pd.read_csv(OUT_PATH / "larva_comparison.csv", index_col=0)
results.head()
#%%
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
matched_stripplot(
data=results,
x="method",
y="match_ratio",
match="sim",
order=["GM", "BGM"],
hue="method",
palette=method_palette,
ax=ax,
jitter=0.25,
)
sns.move_legend(ax, "upper left", title="Method")
mean1 = results[results["method"] == "GM"]["match_ratio"].mean()
mean2 = results[results["method"] == "BGM"]["match_ratio"].mean()
ax.set_yticks([mean1, mean2])
ax.set_yticklabels([f"{mean1:.2f}", f"{mean2:.2f}"])
ax.tick_params(which="both", length=7)
ax.set_ylabel("Match ratio")
ax.set_xlabel("Method")
gluefig("match_ratio_larva", fig)
# %%
bgm_results = results[results["method"] == "BGM"]
gm_results = results[results["method"] == "GM"]
stat, pvalue = wilcoxon(
bgm_results["match_ratio"].values, gm_results["match_ratio"].values
)
glue("match_ratio_pvalue", pvalue, form="pvalue")
mean_bgm = bgm_results["match_ratio"].mean()
glue("mean_match_ratio_bgm", mean_bgm)
mean_gm = gm_results["match_ratio"].mean()
glue("mean_match_ratio_gm", mean_gm)
| 198 | 0 | 46 |
7d58e6bfbca6ddc1fd109a5af6ed2758d6c322d1 | 5,671 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_holidays/report/holidays_summary_report.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_holidays/report/holidays_summary_report.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_holidays/report/holidays_summary_report.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models
| 47.655462 | 137 | 0.587903 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models
class HrHolidaySummaryReport(models.AbstractModel):
_name = 'report.hr_holidays.report_holidayssummary'
def _get_header_info(self, start_date, holiday_type):
st_date = fields.Date.from_string(start_date)
return {
'start_date': fields.Date.to_string(st_date),
'end_date': fields.Date.to_string(st_date + relativedelta(days=59)),
'holiday_type': 'Confirmed and Approved' if holiday_type == 'both' else holiday_type
}
def _get_day(self, start_date):
res = []
start_date = fields.Date.from_string(start_date)
for x in range(0, 60):
color = '#ababab' if start_date.strftime('%a') == 'Sat' or start_date.strftime('%a') == 'Sun' else ''
res.append({'day_str': start_date.strftime('%a'), 'day': start_date.day , 'color': color})
start_date = start_date + relativedelta(days=1)
return res
def _get_months(self, start_date):
# it works for geting month name between two dates.
res = []
start_date = fields.Date.from_string(start_date)
end_date = start_date + relativedelta(days=59)
while start_date <= end_date:
last_date = start_date + relativedelta(day=1, months=+1, days=-1)
if last_date > end_date:
last_date = end_date
month_days = (last_date - start_date).days + 1
res.append({'month_name': start_date.strftime('%B'), 'days': month_days})
start_date += relativedelta(day=1, months=+1)
return res
def _get_leaves_summary(self, start_date, empid, holiday_type):
res = []
count = 0
start_date = fields.Date.from_string(start_date)
end_date = start_date + relativedelta(days=59)
for index in range(0, 60):
current = start_date + timedelta(index)
res.append({'day': current.day, 'color': ''})
if current.strftime('%a') == 'Sat' or current.strftime('%a') == 'Sun':
res[index]['color'] = '#ababab'
# count and get leave summary details.
holiday_type = ['confirm','validate'] if holiday_type == 'both' else ['confirm'] if holiday_type == 'Confirmed' else ['validate']
holidays = self.env['hr.holidays'].search([
('employee_id', '=', empid), ('state', 'in', holiday_type),
('type', '=', 'remove'), ('date_from', '<=', str(end_date)),
('date_to', '>=', str(start_date))
])
for holiday in holidays:
# Convert date to user timezone, otherwise the report will not be consistent with the
# value displayed in the interface.
date_from = fields.Datetime.from_string(holiday.date_from)
date_from = fields.Datetime.context_timestamp(holiday, date_from).date()
date_to = fields.Datetime.from_string(holiday.date_to)
date_to = fields.Datetime.context_timestamp(holiday, date_to).date()
for index in range(0, ((date_to - date_from).days + 1)):
if date_from >= start_date and date_from <= end_date:
res[(date_from-start_date).days]['color'] = holiday.holiday_status_id.color_name
count+=1
date_from += timedelta(1)
self.sum = count
return res
def _get_data_from_report(self, data):
res = []
Employee = self.env['hr.employee']
if 'depts' in data:
for department in self.env['hr.department'].browse(data['depts']):
res.append({'dept' : department.name, 'data': [], 'color': self._get_day(data['date_from'])})
for emp in Employee.search([('department_id', '=', department.id)]):
res[len(res)-1]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(data['date_from'], emp.id, data['holiday_type']),
'sum': self.sum
})
elif 'emp' in data:
res.append({'data':[]})
for emp in Employee.browse(data['emp']):
res[0]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(data['date_from'], emp.id, data['holiday_type']),
'sum': self.sum
})
return res
def _get_holidays_status(self):
res = []
for holiday in self.env['hr.holidays.status'].search([]):
res.append({'color': holiday.color_name, 'name': holiday.name})
return res
@api.model
def render_html(self, docids, data=None):
Report = self.env['report']
holidays_report = Report._get_report_from_name('hr_holidays.report_holidayssummary')
holidays = self.env['hr.holidays'].browse(self.ids)
docargs = {
'doc_ids': self.ids,
'doc_model': holidays_report.model,
'docs': holidays,
'get_header_info': self._get_header_info(data['form']['date_from'], data['form']['holiday_type']),
'get_day': self._get_day(data['form']['date_from']),
'get_months': self._get_months(data['form']['date_from']),
'get_data_from_report': self._get_data_from_report(data['form']),
'get_holidays_status': self._get_holidays_status(),
}
return Report.render('hr_holidays.report_holidayssummary', docargs)
| 5,140 | 290 | 23 |
5581c1ebee357f3dfeb30ae89b2502f626c5ed33 | 521 | py | Python | ex075.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex075.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex075.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | v1 = int(input('Digite o primeiro valor: '))
v2 = int(input('Digite o segundo valor: '))
v3 = int(input('Digite o terceiro valor: '))
v4 = int(input('Digite o quarto valor: '))
v5 = int(input('Digite o quinto valor: '))
lista = (v1, v2, v3, v4, v5)
print(f'O 9 apareceu {lista.count(9)} vezes.')
if lista.count(3) > 0:
print(f'O primeiro 3 esta na {lista.index(3) + 1} posição.')
else:
print('Não tem nenhum 3')
print('Os números pares são: ', end='')
for n in lista:
if n % 2 == 0:
print(n, end=' ')
| 32.5625 | 64 | 0.612284 | v1 = int(input('Digite o primeiro valor: '))
v2 = int(input('Digite o segundo valor: '))
v3 = int(input('Digite o terceiro valor: '))
v4 = int(input('Digite o quarto valor: '))
v5 = int(input('Digite o quinto valor: '))
lista = (v1, v2, v3, v4, v5)
print(f'O 9 apareceu {lista.count(9)} vezes.')
if lista.count(3) > 0:
print(f'O primeiro 3 esta na {lista.index(3) + 1} posição.')
else:
print('Não tem nenhum 3')
print('Os números pares são: ', end='')
for n in lista:
if n % 2 == 0:
print(n, end=' ')
| 0 | 0 | 0 |
f4656671d5efe09db1beb6b7b73d157a0ca91436 | 3,616 | py | Python | ppcd/core/infer.py | geoyee/PdRSCD | 4a1a7256320f006c15e3e5b5b238fdfba8198853 | [
"Apache-2.0"
] | 44 | 2021-04-21T02:41:55.000Z | 2022-03-09T03:01:16.000Z | ppcd/core/infer.py | MinZHANG-WHU/PdRSCD | 612976225201d78adc7ff99529ada17b41fedc5d | [
"Apache-2.0"
] | 2 | 2021-09-30T07:52:47.000Z | 2022-02-12T09:05:35.000Z | ppcd/core/infer.py | MinZHANG-WHU/PdRSCD | 612976225201d78adc7ff99529ada17b41fedc5d | [
"Apache-2.0"
] | 6 | 2021-07-23T02:18:39.000Z | 2022-01-14T01:15:50.000Z | import os
import cv2
import paddle
from tqdm import tqdm
# from paddle.io import DataLoader
from ppcd.datasets import DataLoader
from ppcd.tools import splicing_list, save_tif
# 进行滑框预测 | 37.278351 | 87 | 0.575774 | import os
import cv2
import paddle
from tqdm import tqdm
# from paddle.io import DataLoader
from ppcd.datasets import DataLoader
from ppcd.tools import splicing_list, save_tif
def Infer(model,
infer_data,
params_path=None,
save_img_path=None,
threshold=0.5):
# 数据读取器
infer_loader = DataLoader(infer_data, batch_size=1)
# 开始预测
if save_img_path is not None:
if os.path.exists(save_img_path) == False:
os.mkdir(save_img_path)
model.eval()
para_state_dict = paddle.load(params_path)
model.set_dict(para_state_dict)
lens = len(infer_data)
for idx, infer_load_data in enumerate(infer_loader):
if infer_load_data is None:
break
img, name = infer_load_data
pred_list = model(img)
# img = paddle.concat([A_img, B_img], axis=1)
# pred_list = model(img)
num_class, H, W = pred_list[0].shape[1:]
if num_class == 2:
save_img = (paddle.argmax(pred_list[0], axis=1). \
squeeze().numpy() * 255).astype('uint8')
elif num_class == 1:
save_img = ((pred_list[0] > threshold).numpy(). \
astype('uint8') * 255).reshape([H, W])
else:
save_img = (paddle.argmax(pred_list[0], axis=1). \
squeeze().numpy()).astype('uint8')
save_path = os.path.join(save_img_path, (name[0] + '.png'))
print('[Infer] ' + str(idx + 1) + '/' + str(lens) + ' file_path: ' + save_path)
cv2.imwrite(save_path, save_img)
# 进行滑框预测
def Slide_Infer(model,
infer_data,
params_path=None,
save_img_path=None,
threshold=0.5,
name='result'):
# 信息修改与读取
infer_data.out_mode = 'slide' # 滑框模式
raw_size = infer_data.raw_size # 原图大小
is_tif = infer_data.is_tif
if infer_data.is_tif == True:
geoinfo = infer_data.geoinfo
# 数据读取器
# infer_loader = paddle.io.DataLoader(infer_data, batch_size=1)
infer_loader = DataLoader(infer_data, batch_size=1)
# 开始预测
if save_img_path is not None:
if os.path.exists(save_img_path) == False:
os.mkdir(save_img_path)
model.eval()
para_state_dict = paddle.load(params_path)
model.set_dict(para_state_dict)
# lens = len(infer_data)
inf_imgs = [] # 保存块
# for idx, infer_load_data in qenumerate(infer_loader):
for infer_load_data in tqdm(infer_loader):
if infer_load_data is None:
break
img = infer_load_data
pred_list = model(img)
# img = paddle.concat([A_img, B_img], axis=1)
# pred_list = model(img)
num_class, H, W = pred_list[0].shape[1:]
if num_class == 2:
inf_imgs.append((paddle.argmax(pred_list[0], axis=1). \
squeeze().numpy() * 255).astype('uint8'))
elif num_class == 1:
inf_imgs.append(((pred_list[0] > threshold).numpy(). \
astype('uint8') * 255).reshape([H, W]))
else:
inf_imgs.append((paddle.argmax(pred_list[0], axis=1). \
squeeze().numpy()).astype('uint8'))
# print('[Infer] ' + str(idx + 1) + '/' + str(lens))
fix_img = splicing_list(inf_imgs, raw_size) # 拼接
if is_tif == True:
save_path = os.path.join(save_img_path, (name + '.tif'))
save_tif(fix_img, geoinfo, save_path)
else:
save_path = os.path.join(save_img_path, (name + '.png'))
cv2.imwrite(save_path, fix_img) | 3,460 | 0 | 45 |
6bda4b7629b500829e21a60127c096aba2859b72 | 5,651 | py | Python | insurancecompany/insurancecompany/controllers.py | karthikpalavalli/csci5448 | 4d2c84f5ee9080e032e7d73c33c7378f8a813938 | [
"MIT"
] | null | null | null | insurancecompany/insurancecompany/controllers.py | karthikpalavalli/csci5448 | 4d2c84f5ee9080e032e7d73c33c7378f8a813938 | [
"MIT"
] | null | null | null | insurancecompany/insurancecompany/controllers.py | karthikpalavalli/csci5448 | 4d2c84f5ee9080e032e7d73c33c7378f8a813938 | [
"MIT"
] | null | null | null | from models import Admin, UserProxy, Customer
from db_models import db, db_session, UserDB, AppointmentsDB, InsurancePlanDB
from insurance_plan import BasicHealthPlan, CancerCare, CardiacCare, BasicLifePlan, ULIPBenefits, ComboPlan
if __name__ == "__main__":
# Add new user
new_user = add_user()
# Fetch existing user
new_user = get_user('leja@ic.com')
# View current plan for a user
# view_current_plan(new_user)
# Customer buys a new plan
buy_plan(new_user)
# Schedule a call
# schedule_call(new_user)
| 31.747191 | 114 | 0.606972 | from models import Admin, UserProxy, Customer
from db_models import db, db_session, UserDB, AppointmentsDB, InsurancePlanDB
from insurance_plan import BasicHealthPlan, CancerCare, CardiacCare, BasicLifePlan, ULIPBenefits, ComboPlan
def add_user():
default_admin = Admin(username='karthik',
email='karthik@ic.com',
password='idontremember',
phone_no='7777777777',
postal_address='1, Beverly Park Circle, California')
username = input("username: ")
email = input("email: ")
password = input("password: ")
phone_no = input("phone no: ")
postal_address = input("postal address: ")
role = input("role: ")
res = default_admin.add_user(username=username,
email=email,
password=password,
phone_no=phone_no,
postal_address=postal_address,
role=role,
session_id=None)
print('User Added')
return res
def get_user(email):
curr_user = db_session.query(UserDB).filter(UserDB.email == email).one()
username = curr_user.name
email = curr_user.email
password = curr_user.password
mod_additional_metadata = eval(curr_user.additional_metadata.replace('null', 'None'))
phone_no = mod_additional_metadata.get('phone_no', 'UNK')
postal_address = mod_additional_metadata.get('postal_address', 'UNK')
curr_user_obj = Customer(username=username,
email=email,
password=password,
phone_no=phone_no,
postal_address=postal_address)
curr_user_obj.additional_metadata = mod_additional_metadata
return curr_user_obj
def view_current_plan(curr_user):
user_proxy = UserProxy(curr_user)
print("In order to access your profile please enter your credentials.")
email = input('email: ')
password = input('password: ')
res = user_proxy.plan_details(email, password)
if isinstance(res, str):
print(res)
elif isinstance(res, dict):
if not res:
print("Currently there are no plans active under your name.")
for key, value in res.items():
print(key, ' : ', value)
def buy_plan(curr_user):
plans_available = ['Basic health plan', 'Basic health plan + cancer care', 'Basic health plan + cardiac care',
'Basic life plan', 'Basic life plan + ULIP', 'Combo plan']
print('Currently the following plans are available for purchase: ')
for index, plan in enumerate(plans_available):
print(index+1, '. ', plan)
choice = int(input('Please enter the choice you wish to buy: '))
basic_health_plan = BasicHealthPlan()
add_details = dict()
add_details['illness_covered'] = ['flu', 'fever', 'headache']
add_details['co-pay'] = 26.0
add_details['total-cost'] = 280
basic_health_plan.add_plan_details(plan_id=choice,
plan_name=plans_available[choice-1],
additional_details=add_details)
basic_life_plan = BasicLifePlan()
add_details = dict()
add_details['pay-term'] = 120
add_details['vesting-period'] = 40
add_details['total-cost'] = 240
add_details['premium-amount'] = 90
basic_life_plan.add_plan_details(plan_id=choice,
plan_name=plans_available[choice-1],
additional_details=add_details)
if choice == 1:
plan_details = basic_health_plan.get_plan_details(choice)
elif choice == 2:
cancer_care = CancerCare(basic_health_plan)
# Modifying already existing object
cancer_care.add_plan_details(plan_id=choice,
plan_name=plans_available[choice-1],
additional_details=dict())
plan_details = cancer_care.get_plan_details(choice)
elif choice == 3:
cardiac_care = CardiacCare(basic_health_plan)
# Modifying already existing object
cardiac_care.add_plan_details(plan_id=choice,
plan_name=plans_available[choice - 1],
additional_details=dict())
plan_details = cardiac_care.get_plan_details(choice)
elif choice == 4:
plan_details = basic_life_plan.get_plan_details(choice)
elif choice == 5:
ulip_add_on = ULIPBenefits(basic_life_plan)
# Modifying already existing object
ulip_add_on.add_plan_details(plan_id=choice,
plan_name=plans_available[choice - 1],
additional_details=dict())
plan_details = ulip_add_on.get_plan_details(choice)
else:
combo_plan = ComboPlan()
combo_plan.append(basic_health_plan)
combo_plan.append(basic_life_plan)
plan_details = combo_plan.additional_details
print("Plan successfully bought")
curr_user.buy_plan(plan_details=plan_details[1])
def schedule_call(curr_user):
curr_user.request_sales_rep()
if __name__ == "__main__":
# Add new user
new_user = add_user()
# Fetch existing user
new_user = get_user('leja@ic.com')
# View current plan for a user
# view_current_plan(new_user)
# Customer buys a new plan
buy_plan(new_user)
# Schedule a call
# schedule_call(new_user)
| 4,981 | 0 | 115 |
ce5a2416c780442544d0d5e9283fbaff98d9c5b6 | 9,882 | py | Python | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Python-Pinboard
Python script for downloading your saved stories and saved comments on Hacker News.
"""
__version__ = "1.1"
__license__ = "BSD"
__copyright__ = "Copyright 2013-2014, Luciano Fiandesio"
__author__ = "Luciano Fiandesio <http://fiandes.io/> & John David Pressman <http://jdpressman.com>"
import argparse
import json
import os
import sys
import time
import urllib
import pdfkit
import requests
import tqdm
from bs4 import BeautifulSoup
from lxml import html
HACKERNEWS = 'https://news.ycombinator.com'
parser = argparse.ArgumentParser()
parser.add_argument("username", help="The Hacker News username to grab the stories from.")
parser.add_argument("password", help="The password to login with using the username.")
parser.add_argument("-f", "--file", help="Filepath to store the JSON document at.")
parser.add_argument("-n", "--number", default=1, type=int, help="Number of pages to grab, default 1. 0 grabs all pages.")
parser.add_argument("-s", "--stories", action="store_true", help="Grab stories only.")
parser.add_argument("-c", "--comments", action="store_true", help="Grab comments only.")
parser.add_argument("-pdf", "--pdf", default=1, type=bool, help="Save to PDF")
parser.add_argument("-o", "--output_folder", default="output/", type=str, help="Output Folder for PDF")
arguments = parser.parse_args()
def getSavedStories(session, hnuser, page_range):
"""Return a list of story IDs representing your saved stories.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by story ID."""
story_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'subtext'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
story_id = a_tag['href'].split('id=')[1]
story_ids.append(story_id)
break
return story_ids
def getSavedComments(session, hnuser, page_range):
"""Return a list of IDs representing your saved comments.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by ID."""
comment_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&comments=t" + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'default'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
comment_id = a_tag['href'].split('id=')[1]
comment_ids.append(comment_id)
break
return comment_ids
def getHackerNewsItem(item_id):
"""Get an 'item' as specified in the HackerNews v0 API."""
time.sleep(0.2)
item_json_link = "https://hacker-news.firebaseio.com/v0/item/" + item_id + ".json"
try:
with urllib.request.urlopen(item_json_link) as item_json:
current_story = json.loads(item_json.read().decode('utf-8'))
if "kids" in current_story:
del current_story["kids"]
# Escape / in name for a later use
current_story["title"] = current_story["title"].replace("/", "-")
return current_story
except urllib.error.URLError:
return {"title": "Item " + item_id + " could not be retrieved",
"id": item_id}
if __name__ == "__main__":
main()
| 40.334694 | 121 | 0.589152 | #!/usr/bin/env python
"""Python-Pinboard
Python script for downloading your saved stories and saved comments on Hacker News.
"""
__version__ = "1.1"
__license__ = "BSD"
__copyright__ = "Copyright 2013-2014, Luciano Fiandesio"
__author__ = "Luciano Fiandesio <http://fiandes.io/> & John David Pressman <http://jdpressman.com>"
import argparse
import json
import os
import sys
import time
import urllib
import pdfkit
import requests
import tqdm
from bs4 import BeautifulSoup
from lxml import html
HACKERNEWS = 'https://news.ycombinator.com'
parser = argparse.ArgumentParser()
parser.add_argument("username", help="The Hacker News username to grab the stories from.")
parser.add_argument("password", help="The password to login with using the username.")
parser.add_argument("-f", "--file", help="Filepath to store the JSON document at.")
parser.add_argument("-n", "--number", default=1, type=int, help="Number of pages to grab, default 1. 0 grabs all pages.")
parser.add_argument("-s", "--stories", action="store_true", help="Grab stories only.")
parser.add_argument("-c", "--comments", action="store_true", help="Grab comments only.")
parser.add_argument("-pdf", "--pdf", default=1, type=bool, help="Save to PDF")
parser.add_argument("-o", "--output_folder", default="output/", type=str, help="Output Folder for PDF")
arguments = parser.parse_args()
def save_to_disk(formatted_dict, in_folder):
options = {
'quiet': ''
}
pbar = tqdm.tqdm(formatted_dict)
for e in pbar:
pbar.set_description("Processing %s" % e["title"])
folder = in_folder + e["title"] + "/"
if (not os.path.isdir(folder)):
os.mkdir(folder)
filename = e["title"] + '.pdf'
# ARTICLE
if not os.path.exists(folder + filename):
if e['url'].endswith("pdf"):
response = requests.get(e['url'])
with open(folder + filename, 'wb') as f:
f.write(response.content)
else:
try:
pdfkit.from_url(e["url"], folder + filename, options=options)
# open(folder+filename, 'wb').write(pdf)
except Exception as error:
print("Could not load url ", e["url"], " error : ", error)
# Comments
if not os.path.exists(folder + "comments_" + filename):
url = "https://news.ycombinator.com/item?id=" + str(e["id"])
try:
pdfkit.from_url(url, folder + "comments_" + filename, options=options)
# open(folder + "comments_" + filename, 'wb').write(pdf)
except:
print("Could not load url ", url)
if not os.path.exists(folder + filename):
print("\n--Error, empty file for ", e["url"])
else:
statinfo = os.stat(folder + filename)
if statinfo.st_size <= 2048:
# e.append(0)
print("\n--Error, empty file for ", e["url"])
def getSavedStories(session, hnuser, page_range):
"""Return a list of story IDs representing your saved stories.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by story ID."""
story_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'subtext'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
story_id = a_tag['href'].split('id=')[1]
story_ids.append(story_id)
break
return story_ids
def getSavedComments(session, hnuser, page_range):
"""Return a list of IDs representing your saved comments.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by ID."""
comment_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&comments=t" + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'default'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
comment_id = a_tag['href'].split('id=')[1]
comment_ids.append(comment_id)
break
return comment_ids
def loginToHackerNews(username, password):
s = requests.Session() # init a session (use cookies across requests)
headers = { # we need to specify an header to get the right cookie
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0',
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
}
# Build the login POST data and make the login request.
payload = {
'whence': 'news',
'acct': username,
'pw': password
}
auth = s.post(HACKERNEWS + '/login', data=payload, headers=headers)
if 'Bad login' in str(auth.content):
raise Exception("Hacker News authentication failed!")
if not username in str(auth.content):
raise Exception("Hacker News didn't succeed, username not displayed.")
return s # return the http session
def getHackerNewsItem(item_id):
"""Get an 'item' as specified in the HackerNews v0 API."""
time.sleep(0.2)
item_json_link = "https://hacker-news.firebaseio.com/v0/item/" + item_id + ".json"
try:
with urllib.request.urlopen(item_json_link) as item_json:
current_story = json.loads(item_json.read().decode('utf-8'))
if "kids" in current_story:
del current_story["kids"]
# Escape / in name for a later use
current_story["title"] = current_story["title"].replace("/", "-")
return current_story
except urllib.error.URLError:
return {"title": "Item " + item_id + " could not be retrieved",
"id": item_id}
def item2stderr(item_id, item_count, item_total):
sys.stderr.write("Got item " + item_id + ". ({} of {})\n".format(item_count,
item_total))
def get_links(session, url):
print("Fetching", url)
response = session.get(url)
tree = html.fromstring(response.content)
morelink = tree.xpath('string(//a[@class="morelink"]/@href)')
return morelink
def main():
json_items = {"saved_stories": list(), "saved_comments": list()}
if arguments.stories and arguments.comments:
# Assume that if somebody uses both flags they mean to grab both
arguments.stories = False
arguments.comments = False
item_count = 0
session = loginToHackerNews(arguments.username, arguments.password)
# if n = 0 -> Get the number of pages and parse them
nb_pages = arguments.number
if nb_pages == 0:
nb_pages = 1
morelink = get_links(session, 'https://news.ycombinator.com/upvoted?id=' + arguments.username)
while morelink:
morelink = get_links(session, "https://news.ycombinator.com/" + morelink)
nb_pages += 1
print('nb_pages ', nb_pages)
page_range = range(1, nb_pages + 1)
if arguments.stories or (not arguments.stories and not arguments.comments):
print("Getting Stories as JSON")
story_ids = getSavedStories(session,
arguments.username,
page_range)
pbar = tqdm.tqdm(story_ids)
for story_id in pbar:
should_analyse = True
# Load the previous json file and check if we already analysed it before
if os.path.exists(arguments.file) and os.stat(arguments.file).st_size != 0:
with open(arguments.file) as outfile:
data = json.load(outfile)
if "saved_stories" in data:
for story in data["saved_stories"]:
# print(stories)
if story_id == str(story["id"]):
# print("same")
# pbar.set_description("Processing %s" % e[0])
should_analyse = False
json_items["saved_stories"].append(story)
if should_analyse:
json_items["saved_stories"].append(getHackerNewsItem(story_id))
if arguments.comments or (not arguments.stories and not arguments.comments):
item_count = 0
comment_ids = getSavedComments(session,
arguments.username,
page_range)
for comment_id in comment_ids:
json_items["saved_comments"].append(getHackerNewsItem(comment_id))
item_count += 1
item2stderr(comment_id, item_count, len(comment_ids))
if arguments.file:
with open(arguments.file, 'w') as outfile:
json.dump(json_items, outfile, indent=4)
if arguments.pdf:
print("Exporting to PDF")
output_folder = arguments.output_folder
if (not os.path.isdir(output_folder)):
os.mkdir(output_folder)
save_to_disk(json_items["saved_stories"], output_folder)
if __name__ == "__main__":
main()
| 5,727 | 0 | 115 |
82b747acfb6cfbe2be68b0df919e205e2f108117 | 1,679 | py | Python | server/routes/static.py | fpernice-google/website | e2675629b42701f65722471b0d3b552babd2a6c5 | [
"Apache-2.0"
] | null | null | null | server/routes/static.py | fpernice-google/website | e2675629b42701f65722471b0d3b552babd2a6c5 | [
"Apache-2.0"
] | null | null | null | server/routes/static.py | fpernice-google/website | e2675629b42701f65722471b0d3b552babd2a6c5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons static content routes."""
from flask import Blueprint, render_template
from lib.gcs import list_blobs
_SA_FEED_BUCKET = 'datacommons-frog-feed'
_MAX_BLOBS = 1
bp = Blueprint(
'static',
__name__
)
@bp.route('/')
@bp.route('/about')
@bp.route('/faq')
@bp.route('/disclaimers')
@bp.route('/datasets')
@bp.route('/getinvolved')
@bp.route('/special_announcement')
@bp.route('/special_announcement/faq') | 24.691176 | 74 | 0.753425 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons static content routes."""
from flask import Blueprint, render_template
from lib.gcs import list_blobs
_SA_FEED_BUCKET = 'datacommons-frog-feed'
_MAX_BLOBS = 1
bp = Blueprint(
'static',
__name__
)
@bp.route('/')
def homepage():
return render_template('static/homepage.html')
@bp.route('/about')
def about():
return render_template('static/about.html')
@bp.route('/faq')
def faq():
return render_template('static/faq.html')
@bp.route('/disclaimers')
def disclaimers():
return render_template('static/disclaimers.html')
@bp.route('/datasets')
def datasets():
return render_template('static/datasets.html')
@bp.route('/getinvolved')
def get_involved():
return render_template('static/get_involved.html')
@bp.route('/special_announcement')
def special_announcement():
recent_blobs = list_blobs(_SA_FEED_BUCKET, _MAX_BLOBS)
return render_template(
'static/special_announcement.html', recent_blobs=recent_blobs)
@bp.route('/special_announcement/faq')
def special_announcement_faq():
return render_template('static/special_announcement_faq.html') | 491 | 0 | 176 |
661836a327f7265f3f12be21776da646ba529953 | 1,231 | py | Python | arxiv/decorator.py | hbristow/django-arxiv | 04e499e064de74ef3aebe64e8445dc2c56536a2a | [
"BSD-3-Clause"
] | null | null | null | arxiv/decorator.py | hbristow/django-arxiv | 04e499e064de74ef3aebe64e8445dc2c56536a2a | [
"BSD-3-Clause"
] | null | null | null | arxiv/decorator.py | hbristow/django-arxiv | 04e499e064de74ef3aebe64e8445dc2c56536a2a | [
"BSD-3-Clause"
] | null | null | null | import functools
import collections
# ----------------------------------------------------------------------------
# Memoization/Caching
# ----------------------------------------------------------------------------
class cached(object):
"""Last 100 value memoization for functions of any arguments"""
def __init__(self, func):
"""Cache the function/method of any arguments"""
self.func = func
self.cache = collections.OrderedDict()
def __repr__(self):
"""Return the original function's docstring"""
return self.func.__doc__
def __get__(self, obj, cls):
"""Support instance methods"""
return functools.partial(self.__call__, obj)
| 32.394737 | 78 | 0.515028 | import functools
import collections
# ----------------------------------------------------------------------------
# Memoization/Caching
# ----------------------------------------------------------------------------
class cached(object):
"""Last 100 value memoization for functions of any arguments"""
def __init__(self, func):
"""Cache the function/method of any arguments"""
self.func = func
self.cache = collections.OrderedDict()
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# not hashable
return self.func(*args)
elif args in self.cache:
# cached
return self.cache[args]
else:
# new
value = self.func(*args)
self.cache[args] = value
# TODO: Make the number of stored evaluations a variable
if len(self.cache) > 100:
self.cache.popitem(last=False)
return value
def __repr__(self):
"""Return the original function's docstring"""
return self.func.__doc__
def __get__(self, obj, cls):
"""Support instance methods"""
return functools.partial(self.__call__, obj)
| 498 | 0 | 27 |
15df36598c12c993de9c380506f7d9fd1078269a | 2,766 | py | Python | backend/mypkg/trained_bot.py | DKeen0123/SentiMind | 0ffb702e88879b3e2e02d3d94a703b1f8a785bd3 | [
"MIT"
] | 5 | 2018-04-09T16:47:53.000Z | 2018-07-05T11:03:25.000Z | backend/mypkg/trained_bot.py | DKeen0123/SentiMind | 0ffb702e88879b3e2e02d3d94a703b1f8a785bd3 | [
"MIT"
] | 2 | 2018-04-09T17:40:40.000Z | 2020-07-07T21:12:07.000Z | backend/mypkg/trained_bot.py | marcusfgardiner/SentiMind | d14b366ab36190df0bf3c867a149b7260ed1e2e4 | [
"MIT"
] | 3 | 2018-04-12T22:14:55.000Z | 2018-04-17T10:36:58.000Z | import wheel
import pandas as pd
import nltk
import numpy
import sklearn as skl
import pickle
f = open('./mypkg/bananoulli_20k.pickle', 'rb')
classifier = pickle.load(f)
f.close
df = pd.DataFrame(pd.read_csv('./mypkg/testingdataset.csv'))
sentiment_column = (df.iloc[:, [1]])
sentiment_array = sentiment_column.values
text_column = (df.iloc[:, [6]])
text_array = text_column.values
text = []
for words in text_array:
words_filtered = [e.lower() for e in words[0].split() if len(e) >= 3]
text.append((words_filtered))
testing_tweets = []
count = 0
for words in text:
tweet = (words, sentiment_array[count][0])
count += 1
testing_tweets.append(tweet)
# print (get_words_in_tweets(tweets))
word_features = get_word_features(get_words_in_tweets(testing_tweets))
# print(word_features)
# ----------------------------------------------------------------------
# Final classification methods
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Testing the ML model
# ----------------------------------------------------------------------
# test_tweet = 'lovely happy beautiful joy'
#
# print(classify_tweet(test_tweet))
#
# print(probability_positive(test_tweet))
#
# # ----------------------------------------------------------------------
# # Accuracy of the ML model
# # ----------------------------------------------------------------------
#
# testing_set = nltk.classify.apply_features(extract_features, testing_tweets)
#
# print("MultinomialNB accuracy percent:", nltk.classify.accuracy(classifier, testing_set))
| 28.515464 | 91 | 0.612437 | import wheel
import pandas as pd
import nltk
import numpy
import sklearn as skl
import pickle
f = open('./mypkg/bananoulli_20k.pickle', 'rb')
classifier = pickle.load(f)
f.close
df = pd.DataFrame(pd.read_csv('./mypkg/testingdataset.csv'))
sentiment_column = (df.iloc[:, [1]])
sentiment_array = sentiment_column.values
text_column = (df.iloc[:, [6]])
text_array = text_column.values
text = []
for words in text_array:
words_filtered = [e.lower() for e in words[0].split() if len(e) >= 3]
text.append((words_filtered))
testing_tweets = []
count = 0
for words in text:
tweet = (words, sentiment_array[count][0])
count += 1
testing_tweets.append(tweet)
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
# print (get_words_in_tweets(tweets))
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
word_features = get_word_features(get_words_in_tweets(testing_tweets))
# print(word_features)
def extract_features(text):
# this creates a unique immutable set of words from the one fed in document 'text'
text_words = set(text)
features = {}
# this iterates through all unique words in the text and adds it to the features hash
for word in word_features:
features['contains(%s)' % word] = (word in text_words)
return features
def process_tweet_for_classification(tweet):
return extract_features(tweet.split())
# ----------------------------------------------------------------------
# Final classification methods
# ----------------------------------------------------------------------
def classify_tweet(tweet):
processed_tweet = process_tweet_for_classification(tweet)
return classifier.classify(processed_tweet)
def probability_positive(tweet):
processed_tweet = process_tweet_for_classification(tweet)
dist = classifier.prob_classify(processed_tweet)
for label in dist.samples():
if label == 4:
return (((dist.prob(label))*2)-1)
# ----------------------------------------------------------------------
# Testing the ML model
# ----------------------------------------------------------------------
# test_tweet = 'lovely happy beautiful joy'
#
# print(classify_tweet(test_tweet))
#
# print(probability_positive(test_tweet))
#
# # ----------------------------------------------------------------------
# # Accuracy of the ML model
# # ----------------------------------------------------------------------
#
# testing_set = nltk.classify.apply_features(extract_features, testing_tweets)
#
# print("MultinomialNB accuracy percent:", nltk.classify.accuracy(classifier, testing_set))
| 983 | 0 | 138 |
0ce3a0003147fa52ff085740d54a1eff50de0ab4 | 181 | py | Python | djvision/dashboard/forms.py | carthage-college/django-djvision | 90af7e1da56f9abd35d87444e0cf4a0b46c9d999 | [
"MIT"
] | null | null | null | djvision/dashboard/forms.py | carthage-college/django-djvision | 90af7e1da56f9abd35d87444e0cf4a0b46c9d999 | [
"MIT"
] | 1 | 2020-07-16T20:38:59.000Z | 2020-07-16T20:38:59.000Z | djvision/dashboard/forms.py | carthage-college/django-djvision | 90af7e1da56f9abd35d87444e0cf4a0b46c9d999 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django import forms
| 16.454545 | 61 | 0.662983 | # -*- coding: utf-8 -*-
from django import forms
class DetailCreatedForm(forms.Form):
created_at = forms.DateField(label="Created on or after")
class Meta:
pass
| 0 | 108 | 23 |
4deb9652fac25a75cd254d2b6953dcca8aee71d4 | 669 | py | Python | boto/dynamodb/exceptions.py | krux/boto | 496adaff8988164e61c2d6f259b7eda671899079 | [
"BSD-3-Clause"
] | null | null | null | boto/dynamodb/exceptions.py | krux/boto | 496adaff8988164e61c2d6f259b7eda671899079 | [
"BSD-3-Clause"
] | null | null | null | boto/dynamodb/exceptions.py | krux/boto | 496adaff8988164e61c2d6f259b7eda671899079 | [
"BSD-3-Clause"
] | null | null | null | """
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
| 23.068966 | 75 | 0.715994 | """
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
| 0 | 0 | 0 |
ca7d4dfb6b9cf6148be6352040539f797a96377a | 1,288 | py | Python | src/count_min_sketch.py | doksketch/effective-bassoon | 2b8a6bbe6a82b96d1443f521061cfbffafe4d9c8 | [
"MIT"
] | null | null | null | src/count_min_sketch.py | doksketch/effective-bassoon | 2b8a6bbe6a82b96d1443f521061cfbffafe4d9c8 | [
"MIT"
] | null | null | null | src/count_min_sketch.py | doksketch/effective-bassoon | 2b8a6bbe6a82b96d1443f521061cfbffafe4d9c8 | [
"MIT"
] | null | null | null | # Count-Min Sketch - вероятностная структура данных для быстрого примерного подсчёта частоты встречаемости элементов
import random
from collections import Counter
if __name__ == '__main__':
data = [random.randint(0, 5) for i in range(100)]
print(Counter(data))
cms = CountMinSketch(top_k=3)
for i in range(len(data)):
# key = data[random.randint(0, random.randint(0, len(data) - 1))]
cms.increment_value(key=data[i])
print(cms.get_minimum(0))
print(cms.get_minimum(1))
print(cms.get_minimum(2))
print(cms.get_minimum(3))
print(cms.get_minimum(4))
print(cms.get_minimum(5)) | 32.2 | 116 | 0.64441 | # Count-Min Sketch - вероятностная структура данных для быстрого примерного подсчёта частоты встречаемости элементов
import random
from collections import Counter
class CountMinSketch:
def __init__(self, top_k):
self.total_hashes = top_k
self.min_sketch = [[0] * self.total_hashes ** 2] * self.total_hashes
def get_hash(self, key):
return [hash(key)
for _ in range(self.total_hashes)]
def increment_value(self, key):
for i, hash_value in enumerate(self.get_hash(key)):
self.min_sketch[i][hash_value] += 1
return self
def get_minimum(self, key):
minimum = min([self.min_sketch[i][hash_value] for i, hash_value in enumerate(self.get_hash(key))])
key_min = key, minimum
return key_min
if __name__ == '__main__':
data = [random.randint(0, 5) for i in range(100)]
print(Counter(data))
cms = CountMinSketch(top_k=3)
for i in range(len(data)):
# key = data[random.randint(0, random.randint(0, len(data) - 1))]
cms.increment_value(key=data[i])
print(cms.get_minimum(0))
print(cms.get_minimum(1))
print(cms.get_minimum(2))
print(cms.get_minimum(3))
print(cms.get_minimum(4))
print(cms.get_minimum(5)) | 501 | 0 | 131 |
faf7aed00416d5bc183b1f1e0f0c7bccf21b314b | 2,671 | py | Python | stimulus_presentation/generate_spatial_gratings.py | gzoumpourlis/muse-lsl | 309d339b475e2b8914f2a96616ea0fb9d014b84e | [
"BSD-3-Clause"
] | 5 | 2019-01-22T11:24:11.000Z | 2022-03-29T04:59:59.000Z | stimulus_presentation/generate_spatial_gratings.py | gzoumpourlis/muse-lsl | 309d339b475e2b8914f2a96616ea0fb9d014b84e | [
"BSD-3-Clause"
] | null | null | null | stimulus_presentation/generate_spatial_gratings.py | gzoumpourlis/muse-lsl | 309d339b475e2b8914f2a96616ea0fb9d014b84e | [
"BSD-3-Clause"
] | 4 | 2018-03-12T06:56:20.000Z | 2020-12-24T07:53:21.000Z | """
Generate spatial gratings
=========================
Stimulus presentation based on gratings of different spatial frequencies
for generating ERPs, high frequency oscillations, and alpha reset.
Inspired from:
> Hermes, Dora, K. J. Miller, B. A. Wandell, and Jonathan Winawer. "Stimulus
dependence of gamma oscillations in human visual cortex." Cerebral Cortex 25,
no. 9 (2015): 2951-2959.
"""
from time import time
from optparse import OptionParser
import numpy as np
import pandas as pd
from psychopy import visual, core, event
from pylsl import StreamInfo, StreamOutlet, local_clock
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=400,
help="duration of the recording in seconds.")
(options, args) = parser.parse_args()
# Create markers stream outlet
info = StreamInfo('Markers', 'Markers', 3, 0, 'float32', 'myuidw43536')
channels = info.desc().append_child("channels")
for c in ['Frequency', 'Contrast', 'Orientation']:
channels.append_child("channel") \
.append_child_value("label", c)
outlet = StreamOutlet(info)
start = time()
# Set up trial parameters
n_trials = 2010
iti = 1.0
soa = 1.5
jitter = 0.5
record_duration = np.float32(options.duration)
# Setup trial list
frequency = np.random.binomial(1, 0.5, n_trials)
contrast = np.ones(n_trials, dtype=int)
orientation = np.random.randint(0, 4, n_trials) * 45
trials = pd.DataFrame(dict(frequency=frequency,
contrast=contrast,
orientation=orientation))
# graphics
mywin = visual.Window([1920, 1080], monitor="testMonitor", units="deg",
fullscr=True)
grating = visual.GratingStim(win=mywin, mask='circle', size=40, sf=4)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,
rgb=[1, 0, 0])
rs = np.random.RandomState(42)
core.wait(2)
for ii, trial in trials.iterrows():
# onset
fre = trials['frequency'].iloc[ii]
contrast = trials['contrast'].iloc[ii]
ori = trials['orientation'].iloc[ii]
grating.sf = 4 * fre + 0.1
grating.ori = ori
grating.contrast = contrast
grating.draw()
fixation.draw()
# Send marker
outlet.push_sample([fre + 1, contrast, ori], local_clock())
mywin.flip()
# offset
core.wait(soa)
fixation.draw()
outlet.push_sample([fre + 3, contrast, ori], local_clock())
mywin.flip()
if len(event.getKeys()) > 0 or (time() - start) > record_duration:
break
event.clearEvents()
# Intertrial interval
core.wait(iti + np.random.rand() * jitter)
# Cleanup
mywin.close()
| 26.186275 | 77 | 0.655185 | """
Generate spatial gratings
=========================
Stimulus presentation based on gratings of different spatial frequencies
for generating ERPs, high frequency oscillations, and alpha reset.
Inspired from:
> Hermes, Dora, K. J. Miller, B. A. Wandell, and Jonathan Winawer. "Stimulus
dependence of gamma oscillations in human visual cortex." Cerebral Cortex 25,
no. 9 (2015): 2951-2959.
"""
from time import time
from optparse import OptionParser
import numpy as np
import pandas as pd
from psychopy import visual, core, event
from pylsl import StreamInfo, StreamOutlet, local_clock
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=400,
help="duration of the recording in seconds.")
(options, args) = parser.parse_args()
# Create markers stream outlet
info = StreamInfo('Markers', 'Markers', 3, 0, 'float32', 'myuidw43536')
channels = info.desc().append_child("channels")
for c in ['Frequency', 'Contrast', 'Orientation']:
channels.append_child("channel") \
.append_child_value("label", c)
outlet = StreamOutlet(info)
start = time()
# Set up trial parameters
n_trials = 2010
iti = 1.0
soa = 1.5
jitter = 0.5
record_duration = np.float32(options.duration)
# Setup trial list
frequency = np.random.binomial(1, 0.5, n_trials)
contrast = np.ones(n_trials, dtype=int)
orientation = np.random.randint(0, 4, n_trials) * 45
trials = pd.DataFrame(dict(frequency=frequency,
contrast=contrast,
orientation=orientation))
# graphics
mywin = visual.Window([1920, 1080], monitor="testMonitor", units="deg",
fullscr=True)
grating = visual.GratingStim(win=mywin, mask='circle', size=40, sf=4)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,
rgb=[1, 0, 0])
rs = np.random.RandomState(42)
core.wait(2)
for ii, trial in trials.iterrows():
# onset
fre = trials['frequency'].iloc[ii]
contrast = trials['contrast'].iloc[ii]
ori = trials['orientation'].iloc[ii]
grating.sf = 4 * fre + 0.1
grating.ori = ori
grating.contrast = contrast
grating.draw()
fixation.draw()
# Send marker
outlet.push_sample([fre + 1, contrast, ori], local_clock())
mywin.flip()
# offset
core.wait(soa)
fixation.draw()
outlet.push_sample([fre + 3, contrast, ori], local_clock())
mywin.flip()
if len(event.getKeys()) > 0 or (time() - start) > record_duration:
break
event.clearEvents()
# Intertrial interval
core.wait(iti + np.random.rand() * jitter)
# Cleanup
mywin.close()
| 0 | 0 | 0 |
47440badfacebb3df762d7bd5080ce06af9cc492 | 4,523 | py | Python | executables/rfoutlets_coffee.py | mjvandermeulen/rpi-automation | 0b328cab8876929e46235482d217dc4771dfdc6a | [
"MIT"
] | null | null | null | executables/rfoutlets_coffee.py | mjvandermeulen/rpi-automation | 0b328cab8876929e46235482d217dc4771dfdc6a | [
"MIT"
] | null | null | null | executables/rfoutlets_coffee.py | mjvandermeulen/rpi-automation | 0b328cab8876929e46235482d217dc4771dfdc6a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# EQUAL PARTS VINEGAR AND WATER
#
# https://www.goodhousekeeping.com/home/cleaning/tips/a26565/cleaning-coffee-maker/
#
# Fill the reservoir with equal parts vinegar and water, and place a paper filter
# into the machine's empty basket. Position the pot in place, and "brew" the solution
# halfway. Turn off the machine, and let it sit for 30 minutes. Then, turn the
# coffee maker back on, finish the brewing, and dump the full pot of vinegar and water.
# Rinse everything out by putting in a new paper filter and brewing a full pot
# of clean water. Repeat once.
import time
import argparse
import collections
import math
# from settings.automation_settings import AUTOMATION_EXECUTABLES_PATH
from remote_frequency_outlets import rfoutlets as rfo
from settings import automation_settings
# schedule_brew(args.outlet_group, schedule_time, settings.brew_time,)
settings = automation_settings.coffee_settings["default"]
cleaning_instructions = "Add vinegar and water 1 : 1 in coffeemaker. Fill MrCoffee to 12 cups when using default settings."
try:
parser = argparse.ArgumentParser(
description="Mr Coffee 12 cup coffeemaker programmer using a remote frequency outlet.")
parser.add_argument("outlet_group")
parser.add_argument('--delay', '-d',
help='delay start of brewing in minutes',
type=float, default=automation_settings.coffee_default_delay,
metavar='min')
maintenance_group = parser.add_mutually_exclusive_group()
maintenance_group.add_argument('--clean', '-c',
action='store_true',
help='cleaning cycle for full 12 cup MrCoffee 1/2 vinegar 1/2 water')
maintenance_group.add_argument('--rinse', '-r',
action='store_true',
help='rinse the coffeepot after the cleaning cycle')
maintenance_group.add_argument('--test',
action="store_true",
help='used by pytest, to run a quicker test'
)
args = parser.parse_args()
if args.test:
settings = automation_settings.coffee_settings["test"]
elif args.clean:
settings = automation_settings.coffee_settings["clean"]
elif args.rinse:
settings = automation_settings.coffee_settings["rinse"]
args_dict = vars(args)
for key in args_dict:
print(key + ' -> ' + str(args_dict[key]))
total_hours = (
args.delay * 60 +
(settings.pause * (settings.cycles - 1) +
settings.brew_time * settings.cycles) / (60.0 * 60.0)
)
print
print(cleaning_instructions)
print
print("The brewing process will start in {:3d} minutes, and will be finished {:.2f} hours from now...".format(
args.delay, total_hours))
rv = ''
schedule_time = args.delay * 60
for i in range(settings.cycles):
# PAUSE
if i > 0:
schedule_time += settings.pause
# BREW:
minutes_from_now = int(math.ceil(schedule_time / 60))
if settings.brew_time < 3 * 60:
# schedule once and use 1 blink for length of brew
schedule_brew(args.outlet_group, minutes_from_now,
settings.brew_time)
else:
# schedule twice: turn on and turn off
rfo.rfo_schedule_in_minutes(
args.outlet_group, 'on', minutes_from_now, 3, 1)
minutes_from_now = int(math.ceil(
(schedule_time + settings.brew_time) / 60))
rfo.rfo_schedule_in_minutes(
args.outlet_group, 'off', minutes_from_now, 3, 1)
schedule_time += settings.brew_time
except KeyboardInterrupt:
rfo.switch_outlet_group(args.outlet_group, 'off')
print
print("KeyboardInterrupt")
print
except Exception as error:
rfo.switch_outlet_group(args.outlet_group, 'off')
print
print("An error occured. I'm super sorry: ")
print("error: ")
print(error)
print
else:
print
print("DONE, no exceptions")
| 35.614173 | 123 | 0.636746 | #!/usr/bin/env python
# EQUAL PARTS VINEGAR AND WATER
#
# https://www.goodhousekeeping.com/home/cleaning/tips/a26565/cleaning-coffee-maker/
#
# Fill the reservoir with equal parts vinegar and water, and place a paper filter
# into the machine's empty basket. Position the pot in place, and "brew" the solution
# halfway. Turn off the machine, and let it sit for 30 minutes. Then, turn the
# coffee maker back on, finish the brewing, and dump the full pot of vinegar and water.
# Rinse everything out by putting in a new paper filter and brewing a full pot
# of clean water. Repeat once.
import time
import argparse
import collections
import math
# from settings.automation_settings import AUTOMATION_EXECUTABLES_PATH
from remote_frequency_outlets import rfoutlets as rfo
from settings import automation_settings
# schedule_brew(args.outlet_group, schedule_time, settings.brew_time,)
def schedule_brew(group, minutes_from_now, brew_time):
mode = 'off' # final state
attempts = 3
delay = 1
blink = (1, brew_time, 0)
time_string = 'now + {} minute'.format(int(math.ceil(minutes_from_now)))
rfo.rfo_schedule(time_string, group, mode, minutes_from_now,
attempts, delay, blink)
settings = automation_settings.coffee_settings["default"]
cleaning_instructions = "Add vinegar and water 1 : 1 in coffeemaker. Fill MrCoffee to 12 cups when using default settings."
try:
parser = argparse.ArgumentParser(
description="Mr Coffee 12 cup coffeemaker programmer using a remote frequency outlet.")
parser.add_argument("outlet_group")
parser.add_argument('--delay', '-d',
help='delay start of brewing in minutes',
type=float, default=automation_settings.coffee_default_delay,
metavar='min')
maintenance_group = parser.add_mutually_exclusive_group()
maintenance_group.add_argument('--clean', '-c',
action='store_true',
help='cleaning cycle for full 12 cup MrCoffee 1/2 vinegar 1/2 water')
maintenance_group.add_argument('--rinse', '-r',
action='store_true',
help='rinse the coffeepot after the cleaning cycle')
maintenance_group.add_argument('--test',
action="store_true",
help='used by pytest, to run a quicker test'
)
args = parser.parse_args()
if args.test:
settings = automation_settings.coffee_settings["test"]
elif args.clean:
settings = automation_settings.coffee_settings["clean"]
elif args.rinse:
settings = automation_settings.coffee_settings["rinse"]
args_dict = vars(args)
for key in args_dict:
print(key + ' -> ' + str(args_dict[key]))
total_hours = (
args.delay * 60 +
(settings.pause * (settings.cycles - 1) +
settings.brew_time * settings.cycles) / (60.0 * 60.0)
)
print
print(cleaning_instructions)
print
print("The brewing process will start in {:3d} minutes, and will be finished {:.2f} hours from now...".format(
args.delay, total_hours))
rv = ''
schedule_time = args.delay * 60
for i in range(settings.cycles):
# PAUSE
if i > 0:
schedule_time += settings.pause
# BREW:
minutes_from_now = int(math.ceil(schedule_time / 60))
if settings.brew_time < 3 * 60:
# schedule once and use 1 blink for length of brew
schedule_brew(args.outlet_group, minutes_from_now,
settings.brew_time)
else:
# schedule twice: turn on and turn off
rfo.rfo_schedule_in_minutes(
args.outlet_group, 'on', minutes_from_now, 3, 1)
minutes_from_now = int(math.ceil(
(schedule_time + settings.brew_time) / 60))
rfo.rfo_schedule_in_minutes(
args.outlet_group, 'off', minutes_from_now, 3, 1)
schedule_time += settings.brew_time
except KeyboardInterrupt:
rfo.switch_outlet_group(args.outlet_group, 'off')
print
print("KeyboardInterrupt")
print
except Exception as error:
rfo.switch_outlet_group(args.outlet_group, 'off')
print
print("An error occured. I'm super sorry: ")
print("error: ")
print(error)
print
else:
print
print("DONE, no exceptions")
| 313 | 0 | 23 |
f6c593febed2e63f1abe9c1ff092a8e95e3f2f01 | 538 | py | Python | hasher-matcher-actioner/hmalib/common/tests/test_actioner_models.py | king40or1/ThreatExchange | 95680d1568241bf63249f91480bbf1c7bbe9b699 | [
"BSD-3-Clause"
] | null | null | null | hasher-matcher-actioner/hmalib/common/tests/test_actioner_models.py | king40or1/ThreatExchange | 95680d1568241bf63249f91480bbf1c7bbe9b699 | [
"BSD-3-Clause"
] | null | null | null | hasher-matcher-actioner/hmalib/common/tests/test_actioner_models.py | king40or1/ThreatExchange | 95680d1568241bf63249f91480bbf1c7bbe9b699 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from hmalib.common.actioner_models import Label
| 31.647059 | 70 | 0.702602 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from hmalib.common.actioner_models import Label
class LabelsTestCase(unittest.TestCase):
def test_label_validation(self):
l = Label("some key", "some value")
# Just validate that no error is raised
def test_label_serde(self):
# serde is serialization/deserialization
l = Label("some key", "some value")
serded_l = Label.from_dynamodb_dict(l.to_dynamodb_dict())
self.assertEqual(l, serded_l)
| 306 | 19 | 76 |
a20559159ffc09aa188a9b5b13110229a0e2c738 | 3,350 | py | Python | src/models/model_trainer.py | arturgontijo/NN_compression | 4ba46b244eadf0ff492276d4df8777943e79f48a | [
"MIT"
] | 1 | 2021-03-29T17:06:11.000Z | 2021-03-29T17:06:11.000Z | src/models/model_trainer.py | arturgontijo/NN_compression | 4ba46b244eadf0ff492276d4df8777943e79f48a | [
"MIT"
] | null | null | null | src/models/model_trainer.py | arturgontijo/NN_compression | 4ba46b244eadf0ff492276d4df8777943e79f48a | [
"MIT"
] | null | null | null | from __future__ import print_function
import tensorflow as tf
import sys
| 37.640449 | 133 | 0.575821 | from __future__ import print_function
import tensorflow as tf
import sys
class ModelTrainer:
def vocab_encode(self, text):
return [self.config.vocab.index(x) for x in text if x in self.config.vocab]
def vocab_decode(self, array):
return ''.join([self.config.vocab[x] for x in array])
def read_data(self, data_path):
window = self.config.max_length
overlap = 1
for text in open(data_path):
text = self.vocab_encode(text)
for start in range(0, len(text) - window - 1, overlap):
chunk = text[start: start + window + 1]
chunk += [0] * (window+1 - len(chunk))
yield chunk
def get_batch(self,stream):
input_batch = []
label_batch = []
for element in stream:
input_batch.append(element[:-1])
label_batch.append(element[1:])
if len(label_batch) == self.config.batch_size:
data_tuple = (input_batch, label_batch)
yield data_tuple
input_batch = []
label_batch = []
# yield batch
def run_validation(self, sess):
state = None
epoch_loss = 0
num_batches = 0
for batch in self.get_batch(self.read_data(self.config.validate_path)):
_input = batch[0]
_labels = batch[1]
batch_loss, state = self.model.loss_on_batch(sess, _input, _labels, state)
epoch_loss += batch_loss
num_batches += 1
epoch_loss = epoch_loss/num_batches
return epoch_loss
def run_epoch(self, sess, epoch, writer=None):
state = None
for batch in self.get_batch(self.read_data(self.config.data_path)):
_input = batch[0]
_labels = batch[1]
batch_loss, state, global_step, summary = self.model.train_on_batch(sess, _input , _labels, state, self.config.dropout)
writer.add_summary(summary, global_step)
if (global_step + 1) % self.config.print_every == 0:
print('Epoch: {} Global Iter {}: Loss {}'.format(epoch, global_step, batch_loss) )
# if we want to validate
if self.config.validate_every > 0:
if (global_step + 1) % self.config.validate_every == 0:
val_loss = self.run_validation(sess)
print('Epoch: {} Global Iter {}: Validation Loss {}'.format(epoch, global_step, val_loss) )
summary = tf.Summary()
summary.value.add(tag="Validation_Loss", simple_value=val_loss)
writer.add_summary(summary, global_step)
if val_loss < self.config.entropy + 0.1:
sys.exit("stopping as learning is complete")
def do_training(self):
saver = tf.train.Saver()
merged_summaries = tf.summary.merge_all()
with tf.Session() as sess:
writer = tf.summary.FileWriter(self.config.summary_path, sess.graph)
sess.run(tf.global_variables_initializer())
for epoch in range(self.config.num_epochs):
self.run_epoch(sess, epoch, writer)
writer.close()
def __init__(self, config, model):
self.config = config
self.model = model
| 3,039 | -2 | 238 |
2736277a53237f46e0acbb4ec1ae3029afa37982 | 2,623 | py | Python | oosc/oosc/absence/views.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null | oosc/oosc/absence/views.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null | oosc/oosc/absence/views.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from oosc.absence.models import Absence
from oosc.absence.serializers import AbsenceSerializer
from rest_framework import generics
from oosc.attendance.models import Attendance
from django.db.models import Count,Case,When,IntegerField,Q,Value,CharField,TextField
from datetime import datetime,timedelta
from oosc.absence.models import Absence
from oosc.students.models import Students
from oosc.config.settings import DROPOUT_MIN_COUNT
from oosc.schools.models import Schools
from rest_framework import generics
#
# class GetTheDropOuts()
#from oosc.absence.views import GenerateReport as d
#Calculating the droupouts weekly
| 43.716667 | 230 | 0.767061 | from django.shortcuts import render
# Create your views here.
from oosc.absence.models import Absence
from oosc.absence.serializers import AbsenceSerializer
from rest_framework import generics
from oosc.attendance.models import Attendance
from django.db.models import Count,Case,When,IntegerField,Q,Value,CharField,TextField
from datetime import datetime,timedelta
from oosc.absence.models import Absence
from oosc.students.models import Students
from oosc.config.settings import DROPOUT_MIN_COUNT
from oosc.schools.models import Schools
from rest_framework import generics
class GetEditAbsence(generics.RetrieveUpdateAPIView):
queryset = Absence.objects.all()
serializer_class = AbsenceSerializer
#
# class GetTheDropOuts()
#from oosc.absence.views import GenerateReport as d
#Calculating the droupouts weekly
def d(school):
now=datetime.now().date()
then=now-timedelta(days=14)
attend=Attendance.objects.all().filter(date__range=[then,now],student__class_id__school_id=school)
attendances= attend.order_by('student_id').values("student_id").annotate(present_count=Count(Case(When(status=1,then=1),output_field=IntegerField())),absent_count=Count(Case(When(status=0,then=1),output_field=IntegerField())))
##Filter for students with 0 present (Not a single attendance in the last two weeks)
drops=attendances.filter(present_count=0)
#print ([[len(drops),len(attendances),len(Attendance.objects.all().values("student_id").annotate(count=Count(Case(When(status=1,then=1),output_field=IntegerField())))),d] for d in drops])
#print([d["student_id"] for d in drops])
## Get Students absent from school for the last 2 weeks continous
students=[d["student_id"] if d["absent_count"]>=DROPOUT_MIN_COUNT else None for d in drops]
while None in students:students.remove(None)
##Get Students already with an open absence record
former_absents=[s.student_id for s in Absence.objects.filter(status=True,student_id__in=students)]
##Remove students with an existing open absence record
[students.remove(d) if d in students else '1' for d in former_absents]
##Get the students in the list
students=Students.objects.filter(id__in=students)
##Create absence records for students without open Absence records
absences=[Absence(student_id=d.id,_class=d.class_id,status=True,date_from=then) for d in students]
##Bulk Create the records
Absence.objects.bulk_create(absences)
#print(len(absences),len(students))
def GenerateReport():
schools=[sh.id for sh in Schools.objects.all()]
for s in schools:
d(s)
print (s)
| 1,751 | 110 | 68 |
c4b0826e953f6f1ab95cbc97226284183a18fa5a | 11,750 | py | Python | python/taichi/examples/graph/stable_fluid_graph.py | DongqiShen/taichi | 974aab98f3a039f64335554286f447f64c2ea393 | [
"MIT"
] | null | null | null | python/taichi/examples/graph/stable_fluid_graph.py | DongqiShen/taichi | 974aab98f3a039f64335554286f447f64c2ea393 | [
"MIT"
] | null | null | null | python/taichi/examples/graph/stable_fluid_graph.py | DongqiShen/taichi | 974aab98f3a039f64335554286f447f64c2ea393 | [
"MIT"
] | null | null | null | # References:
# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html
# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation
# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4
# https://github.com/ShaneFX/GAMES201/tree/master/HW01
import argparse
import numpy as np
import taichi as ti
ti.init(arch=ti.vulkan)
res = 512
dt = 0.03
p_jacobi_iters = 500 # 40 for a quicker but less accurate result
f_strength = 10000.0
curl_strength = 0
time_c = 2
maxfps = 60
dye_decay = 1 - 1 / (maxfps * time_c)
force_radius = res / 2.0
gravity = True
paused = False
@ti.func
@ti.func
@ti.func
# 3rd order Runge-Kutta
@ti.func
@ti.kernel
@ti.kernel
@ti.kernel
@ti.kernel
@ti.kernel
mouse_data_ti = ti.ndarray(ti.f32, shape=(8, ))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--baseline',
action='store_true')
args, unknown = parser.parse_known_args()
gui = ti.GUI('Stable Fluid', (res, res))
md_gen = MouseDataGen()
_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_new_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_velocity_divs = ti.ndarray(float, shape=(res, res))
velocity_curls = ti.ndarray(float, shape=(res, res))
_pressures = ti.ndarray(float, shape=(res, res))
_new_pressures = ti.ndarray(float, shape=(res, res))
_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
_new_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
if args.baseline:
velocities_pair = TexPair(_velocities, _new_velocities)
pressures_pair = TexPair(_pressures, _new_pressures)
dyes_pair = TexPair(_dye_buffer, _new_dye_buffer)
else:
print('running in graph mode')
velocities_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'velocities_pair_cur',
ti.f32,
element_shape=(2, ))
velocities_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'velocities_pair_nxt',
ti.f32,
element_shape=(2, ))
dyes_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'dyes_pair_cur',
ti.f32,
element_shape=(3, ))
dyes_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'dyes_pair_nxt',
ti.f32,
element_shape=(3, ))
pressures_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'pressures_pair_cur', ti.f32)
pressures_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'pressures_pair_nxt', ti.f32)
velocity_divs = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'velocity_divs',
ti.f32)
mouse_data = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'mouse_data', ti.f32)
g1_builder = ti.graph.GraphBuilder()
g1_builder.dispatch(advect, velocities_pair_cur, velocities_pair_cur,
velocities_pair_nxt)
g1_builder.dispatch(advect, velocities_pair_cur, dyes_pair_cur, dyes_pair_nxt)
g1_builder.dispatch(apply_impulse, velocities_pair_nxt, dyes_pair_nxt, mouse_data)
g1_builder.dispatch(divergence, velocities_pair_nxt, velocity_divs)
# swap is unrolled in the loop so we only need p_jacobi_iters // 2 iterations.
for _ in range(p_jacobi_iters // 2):
g1_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt,
velocity_divs)
g1_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur,
velocity_divs)
g1_builder.dispatch(subtract_gradient, velocities_pair_nxt, pressures_pair_cur)
g1 = g1_builder.compile()
g2_builder = ti.graph.GraphBuilder()
g2_builder.dispatch(advect, velocities_pair_nxt, velocities_pair_nxt,
velocities_pair_cur)
g2_builder.dispatch(advect, velocities_pair_nxt, dyes_pair_nxt, dyes_pair_cur)
g2_builder.dispatch(apply_impulse, velocities_pair_cur, dyes_pair_cur, mouse_data)
g2_builder.dispatch(divergence, velocities_pair_cur, velocity_divs)
for _ in range(p_jacobi_iters // 2):
g2_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt,
velocity_divs)
g2_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur,
velocity_divs)
g2_builder.dispatch(subtract_gradient, velocities_pair_cur, pressures_pair_cur)
g2 = g2_builder.compile()
swap = True
while gui.running:
if gui.get_event(ti.GUI.PRESS):
e = gui.event
if e.key == ti.GUI.ESCAPE:
break
elif e.key == 'r':
paused = False
reset()
elif e.key == 's':
if curl_strength:
curl_strength = 0
else:
curl_strength = 7
elif e.key == 'g':
gravity = not gravity
elif e.key == 'p':
paused = not paused
if not paused:
_mouse_data = md_gen(gui)
if args.baseline:
step_orig(_mouse_data)
gui.set_image(dyes_pair.cur.to_numpy())
else:
invoke_args = {
'mouse_data': _mouse_data,
'velocities_pair_cur': _velocities,
'velocities_pair_nxt': _new_velocities,
'dyes_pair_cur': _dye_buffer,
'dyes_pair_nxt': _new_dye_buffer,
'pressures_pair_cur': _pressures,
'pressures_pair_nxt': _new_pressures,
'velocity_divs': _velocity_divs
}
if swap:
g1.run(invoke_args)
gui.set_image(_dye_buffer.to_numpy())
swap = False
else:
g2.run(invoke_args)
gui.set_image(_new_dye_buffer.to_numpy())
swap = True
gui.show()
| 34.457478 | 90 | 0.549362 | # References:
# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html
# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation
# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4
# https://github.com/ShaneFX/GAMES201/tree/master/HW01
import argparse
import numpy as np
import taichi as ti
ti.init(arch=ti.vulkan)
res = 512
dt = 0.03
p_jacobi_iters = 500 # 40 for a quicker but less accurate result
f_strength = 10000.0
curl_strength = 0
time_c = 2
maxfps = 60
dye_decay = 1 - 1 / (maxfps * time_c)
force_radius = res / 2.0
gravity = True
paused = False
class TexPair:
def __init__(self, cur, nxt):
self.cur = cur
self.nxt = nxt
def swap(self):
self.cur, self.nxt = self.nxt, self.cur
@ti.func
def sample(qf: ti.template(), u, v):
I = ti.Vector([int(u), int(v)])
I = max(0, min(res - 1, I))
return qf[I]
@ti.func
def lerp(vl, vr, frac):
# frac: [0.0, 1.0]
return vl + frac * (vr - vl)
@ti.func
def bilerp(vf: ti.template(), p):
u, v = p
s, t = u - 0.5, v - 0.5
# floor
iu, iv = ti.floor(s), ti.floor(t)
# fract
fu, fv = s - iu, t - iv
a = sample(vf, iu, iv)
b = sample(vf, iu + 1, iv)
c = sample(vf, iu, iv + 1)
d = sample(vf, iu + 1, iv + 1)
return lerp(lerp(a, b, fu), lerp(c, d, fu), fv)
# 3rd order Runge-Kutta
@ti.func
def backtrace(vf: ti.template(), p, dt: ti.template()):
v1 = bilerp(vf, p)
p1 = p - 0.5 * dt * v1
v2 = bilerp(vf, p1)
p2 = p - 0.75 * dt * v2
v3 = bilerp(vf, p2)
p -= dt * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3)
return p
@ti.kernel
def advect(vf: ti.types.ndarray(field_dim=2),
qf: ti.types.ndarray(field_dim=2),
new_qf: ti.types.ndarray(field_dim=2)):
for i, j in vf:
p = ti.Vector([i, j]) + 0.5
p = backtrace(vf, p, dt)
new_qf[i, j] = bilerp(qf, p) * dye_decay
@ti.kernel
def apply_impulse(vf: ti.types.ndarray(field_dim=2),
dyef: ti.types.ndarray(field_dim=2),
imp_data: ti.types.ndarray(field_dim=1)):
g_dir = -ti.Vector([0, 9.8]) * 300
for i, j in vf:
omx, omy = imp_data[2], imp_data[3]
mdir = ti.Vector([imp_data[0], imp_data[1]])
dx, dy = (i + 0.5 - omx), (j + 0.5 - omy)
d2 = dx * dx + dy * dy
# dv = F * dt
factor = ti.exp(-d2 / force_radius)
dc = dyef[i, j]
a = dc.norm()
momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt
v = vf[i, j]
vf[i, j] = v + momentum
# add dye
if mdir.norm() > 0.5:
dc += ti.exp(-d2 * (4 / (res / 15)**2)) * ti.Vector(
[imp_data[4], imp_data[5], imp_data[6]])
dyef[i, j] = dc
@ti.kernel
def divergence(vf: ti.types.ndarray(field_dim=2),
velocity_divs: ti.types.ndarray(field_dim=2)):
for i, j in vf:
vl = sample(vf, i - 1, j)
vr = sample(vf, i + 1, j)
vb = sample(vf, i, j - 1)
vt = sample(vf, i, j + 1)
vc = sample(vf, i, j)
if i == 0:
vl.x = -vc.x
if i == res - 1:
vr.x = -vc.x
if j == 0:
vb.y = -vc.y
if j == res - 1:
vt.y = -vc.y
velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5
@ti.kernel
def pressure_jacobi(pf: ti.types.ndarray(field_dim=2),
new_pf: ti.types.ndarray(field_dim=2),
velocity_divs: ti.types.ndarray(field_dim=2)):
for i, j in pf:
pl = sample(pf, i - 1, j)
pr = sample(pf, i + 1, j)
pb = sample(pf, i, j - 1)
pt = sample(pf, i, j + 1)
div = velocity_divs[i, j]
new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25
@ti.kernel
def subtract_gradient(vf: ti.types.ndarray(field_dim=2),
pf: ti.types.ndarray(field_dim=2)):
for i, j in vf:
pl = sample(pf, i - 1, j)
pr = sample(pf, i + 1, j)
pb = sample(pf, i, j - 1)
pt = sample(pf, i, j + 1)
vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb])
def solve_pressure_jacobi():
for _ in range(p_jacobi_iters):
pressure_jacobi(pressures_pair.cur, pressures_pair.nxt, _velocity_divs)
pressures_pair.swap()
def step_orig(mouse_data):
advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt)
advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt)
velocities_pair.swap()
dyes_pair.swap()
apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data)
divergence(velocities_pair.cur, _velocity_divs)
solve_pressure_jacobi()
subtract_gradient(velocities_pair.cur, pressures_pair.cur)
mouse_data_ti = ti.ndarray(ti.f32, shape=(8, ))
class MouseDataGen(object):
def __init__(self):
self.prev_mouse = None
self.prev_color = None
def __call__(self, gui):
# [0:2]: normalized delta direction
# [2:4]: current mouse xy
# [4:7]: color
mouse_data = np.zeros(8, dtype=np.float32)
if gui.is_pressed(ti.GUI.LMB):
mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res
if self.prev_mouse is None:
self.prev_mouse = mxy
# Set lower bound to 0.3 to prevent too dark colors
self.prev_color = (np.random.rand(3) * 0.7) + 0.3
else:
mdir = mxy - self.prev_mouse
mdir = mdir / (np.linalg.norm(mdir) + 1e-5)
mouse_data[0], mouse_data[1] = mdir[0], mdir[1]
mouse_data[2], mouse_data[3] = mxy[0], mxy[1]
mouse_data[4:7] = self.prev_color
self.prev_mouse = mxy
else:
self.prev_mouse = None
self.prev_color = None
mouse_data_ti.from_numpy(mouse_data)
return mouse_data_ti
def reset():
velocities_pair.cur.fill(0)
pressures_pair.cur.fill(0)
dyes_pair.cur.fill(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--baseline',
action='store_true')
args, unknown = parser.parse_known_args()
gui = ti.GUI('Stable Fluid', (res, res))
md_gen = MouseDataGen()
_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_new_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_velocity_divs = ti.ndarray(float, shape=(res, res))
velocity_curls = ti.ndarray(float, shape=(res, res))
_pressures = ti.ndarray(float, shape=(res, res))
_new_pressures = ti.ndarray(float, shape=(res, res))
_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
_new_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
if args.baseline:
velocities_pair = TexPair(_velocities, _new_velocities)
pressures_pair = TexPair(_pressures, _new_pressures)
dyes_pair = TexPair(_dye_buffer, _new_dye_buffer)
else:
print('running in graph mode')
velocities_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'velocities_pair_cur',
ti.f32,
element_shape=(2, ))
velocities_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'velocities_pair_nxt',
ti.f32,
element_shape=(2, ))
dyes_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'dyes_pair_cur',
ti.f32,
element_shape=(3, ))
dyes_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'dyes_pair_nxt',
ti.f32,
element_shape=(3, ))
pressures_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'pressures_pair_cur', ti.f32)
pressures_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY,
'pressures_pair_nxt', ti.f32)
velocity_divs = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'velocity_divs',
ti.f32)
mouse_data = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'mouse_data', ti.f32)
g1_builder = ti.graph.GraphBuilder()
g1_builder.dispatch(advect, velocities_pair_cur, velocities_pair_cur,
velocities_pair_nxt)
g1_builder.dispatch(advect, velocities_pair_cur, dyes_pair_cur, dyes_pair_nxt)
g1_builder.dispatch(apply_impulse, velocities_pair_nxt, dyes_pair_nxt, mouse_data)
g1_builder.dispatch(divergence, velocities_pair_nxt, velocity_divs)
# swap is unrolled in the loop so we only need p_jacobi_iters // 2 iterations.
for _ in range(p_jacobi_iters // 2):
g1_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt,
velocity_divs)
g1_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur,
velocity_divs)
g1_builder.dispatch(subtract_gradient, velocities_pair_nxt, pressures_pair_cur)
g1 = g1_builder.compile()
g2_builder = ti.graph.GraphBuilder()
g2_builder.dispatch(advect, velocities_pair_nxt, velocities_pair_nxt,
velocities_pair_cur)
g2_builder.dispatch(advect, velocities_pair_nxt, dyes_pair_nxt, dyes_pair_cur)
g2_builder.dispatch(apply_impulse, velocities_pair_cur, dyes_pair_cur, mouse_data)
g2_builder.dispatch(divergence, velocities_pair_cur, velocity_divs)
for _ in range(p_jacobi_iters // 2):
g2_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt,
velocity_divs)
g2_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur,
velocity_divs)
g2_builder.dispatch(subtract_gradient, velocities_pair_cur, pressures_pair_cur)
g2 = g2_builder.compile()
swap = True
while gui.running:
if gui.get_event(ti.GUI.PRESS):
e = gui.event
if e.key == ti.GUI.ESCAPE:
break
elif e.key == 'r':
paused = False
reset()
elif e.key == 's':
if curl_strength:
curl_strength = 0
else:
curl_strength = 7
elif e.key == 'g':
gravity = not gravity
elif e.key == 'p':
paused = not paused
if not paused:
_mouse_data = md_gen(gui)
if args.baseline:
step_orig(_mouse_data)
gui.set_image(dyes_pair.cur.to_numpy())
else:
invoke_args = {
'mouse_data': _mouse_data,
'velocities_pair_cur': _velocities,
'velocities_pair_nxt': _new_velocities,
'dyes_pair_cur': _dye_buffer,
'dyes_pair_nxt': _new_dye_buffer,
'pressures_pair_cur': _pressures,
'pressures_pair_nxt': _new_pressures,
'velocity_divs': _velocity_divs
}
if swap:
g1.run(invoke_args)
gui.set_image(_dye_buffer.to_numpy())
swap = False
else:
g2.run(invoke_args)
gui.set_image(_new_dye_buffer.to_numpy())
swap = True
gui.show()
| 4,792 | -1 | 419 |
3e568985abf2f3cfda71ff8ca8fe57a1de05b7f3 | 7,658 | py | Python | data_functions.py | Cadarn/agn_spectra_app | 11041ea2b4607d6d0ed98856f0ddada2bdb738fb | [
"MIT"
] | 1 | 2021-03-03T12:02:17.000Z | 2021-03-03T12:02:17.000Z | data_functions.py | Cadarn/agn_spectra_app | 11041ea2b4607d6d0ed98856f0ddada2bdb738fb | [
"MIT"
] | 2 | 2020-10-31T02:39:53.000Z | 2020-10-31T02:41:31.000Z | data_functions.py | Cadarn/agn_spectra_app | 11041ea2b4607d6d0ed98856f0ddada2bdb738fb | [
"MIT"
] | null | null | null | """
AGN spectral model generation functions
Copyright: Adam Hill (2020)
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.interpolate import RegularGridInterpolator
def merge_dict_dfs(d, common_column):
"""
Main purpose:
- merges all the dataframes collected in the
d dictionary
- Prints the duplicates in the merged dataframe and removes them
NOTE: Each table must have the common_column to match on
"""
d_copy = d.copy()
merged = d_copy[0]
if 0 in d_copy:
del d_copy[0]
else:
print("No 0 dataframe found... This shouldn't have happened.")
with tqdm(total = len(d_copy), position = 0, desc = "Merging tables") as pbar:
for name, df in d_copy.items():
# print(name)
# print(merged.shape)
merged = pd.merge(merged, df, how = "left", on = "E_keV")
pbar.update(1)
print(merged.shape)
dupe_mask = merged.duplicated(subset = ["E_keV"], keep = "last")
dupes = merged[dupe_mask]
print(dupes.columns)
print(str(len(dupes)) + " duplicates")
print("Now removing duplicates...")
merged = merged[~dupe_mask]
for c in merged.columns:
print(c)
return merged
def sed(PhoIndex, Ecut, logNHtor, CFtor, thInc, A_Fe, z, factor):
"""
Need to manually stitch together the spectra
"""
PhoIndex_str = np.array(["p3=%.5f" %par_val for par_val in PhoIndex.ravel()])
Ecut_str = np.array(["p4=%.5f" %par_val for par_val in Ecut.ravel()])
logNHtor_str = np.array(["p5=%.5f" %par_val for par_val in logNHtor.ravel()])
CFtor_str = np.array(["p6=%.5f" %par_val for par_val in CFtor.ravel()])
thInc_str = np.array(["p7=%.5f" %par_val for par_val in thInc.ravel()])
A_Fe_str = np.array(["p8=%.5f" %par_val for par_val in A_Fe.ravel()])
z_str = np.array(["p9=%.5f" %par_val for par_val in z.ravel()])
factor_str = np.array(["p17=%.5f" %par_val for par_val in factor.ravel()])
trans = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
repro = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
scatt = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
## note must stitch together in increasing order of length of parameter arrays
for i, PhoIndex_val in enumerate(PhoIndex_str):
for j, Ecut_val in enumerate(Ecut_str):
for k, A_Fe_val in enumerate(A_Fe_str):
for l, factor_val in enumerate(factor_str):
for m, z_val in enumerate(z_str):
for n, logNHtor_val in enumerate(logNHtor_str):
for o, CFtor_val in enumerate(CFtor_str):
for p, thInc_val in enumerate(thInc_str):
df_column = "%(PhoIndex_val)s_%(Ecut_val)s_%(logNHtor_val)s_%(CFtor_val)s_%(thInc_val)s_%(A_Fe_val)s_%(z_val)s_%(factor_val)s" %locals()
trans[i, j, k, l, m, n, o, p, :] = df_master["TRANS_" + df_column].values
repro[i, j, k, l, m, n, o, p, :] = df_master["REPR_" + df_column].values
scatt[i, j, k, l, m, n, o, p, :] = df_master["SCATT_" + df_column].values
return trans, repro, scatt, temp["E_keV"].values
## load in the hefty dataset
## note -- need to figure out a more efficient way of storing this data
## Xspec uses a fits table, but unsure how we can generate the Python RegularGridInterpolator from that
df_dict = {}
for a, csvfile in enumerate(glob.glob("./borus_stepped/borus_step*.csv")):
df_dict[a] = pd.read_csv(csvfile)
df_master = merge_dict_dfs(df_dict, "E_keV")
parLen = 5
PhoIndex = np.linspace(1.45, 2.55, 3)
Ecut = np.logspace(2., 3., 3)
logNHtor = np.linspace(22., 25.5, parLen)
CFtor = np.linspace(0.15, 0.95, parLen)
thInc = np.linspace(20., 85, parLen)
A_Fe = np.logspace(-1., 1., 3)
z = np.logspace(-3., 0., 4)
factor = np.logspace(-5., -1., 3)
params = np.meshgrid(PhoIndex, Ecut, logNHtor, CFtor, thInc, A_Fe, z, factor, indexing='ij', sparse=True)
trans_sed, repro_sed, scatt_sed, E_keV = sed(*params)
print(np.shape(SEDs))
trans_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), trans_sed)
repro_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), repro_sed)
scatt_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), scatt_sed)
def generate_spectra(PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc):
"""
This is a place holder for a proper function
Args:
PhoIndex: float, powerlaw slope of the intrinsic spectrum (1.45--2.55)
Ecut: float, high-energy exponentional cut-off of the intrinsic powerlaw (100.--1000.)
A_Fe: float, abundance of iron in the obscurer (0.1--10.)
factor: float, percentage of scattered emission in the warm mirror (1.e-5--1.e-1)
z: float, redshift of the source (1.e-3--1.)
logNHtor: float, logarithm of the column density of the obscurer (22.--22.5)
CFtor: float, covering factor of the obscurer (0.15--0.95)
thInc: float, inclination angle of the obscurer (20.--85.), note: edge-on = 90.
Returns:
dataframe: a dataframe with columns for the energy in keV, the transmitted X-ray flux,
the reprocessed X-ray flux, the Thomson-scattered X-ray flux, and the total X-ray flux
"""
spectral_df = pd.DataFrame(
{
"Energy": E_keV,
"Transmitted": trans_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
"Reprocessed": trans_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
"Scattered": scatt_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
}
)
spectral_df.loc[:, "Total"] = spectral_df[["Transmitted", "Reprocessed", "Scattered"]].sum()
return spectral_df
# def generate_spectra(angle1, angle2, logNH):
# """
# This is a place holder for a proper function
# Args:
# angle1: float, inclination angle in degrees (0-90) of the AGN view
# angle2: float, torus opening angle in degrees (0-90) of the AGN
# logNH: float, logarithm of the obscuring column density within the AGN environment
# Returns:
# dataframe: a dataframe of with columns for the energy in keV, the transmitted X-ray flux,
# the reprocessed X-ray flux, and the total X-ray flux
# """
# _degs_to_rads = lambda x: np.pi * x / 180.0
# degrees = np.arange(1, 1001, 1)
# radians = np.array(list(map(_degs_to_rads, degrees)))
# linear_component = radians * (logNH / 9.657) + 2
# transmitted_flux = (angle1 / 5) * np.cos(
# _degs_to_rads(angle1) + radians * (logNH / 1.5)
# ) + linear_component
# reprocessed_flux = (angle2 / 10) * np.sin(
# _degs_to_rads(angle2) + radians * (logNH / 5.0)
# ) + 5.0
# total_flux = transmitted_flux + reprocessed_flux
# spectral_df = pd.DataFrame(
# {
# "Energy": degrees,
# "Transmitted": transmitted_flux,
# "Reprocessed": reprocessed_flux,
# "Summed": total_flux,
# }
# )
# return spectral_df
| 44.523256 | 172 | 0.629799 | """
AGN spectral model generation functions
Copyright: Adam Hill (2020)
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.interpolate import RegularGridInterpolator
def merge_dict_dfs(d, common_column):
"""
Main purpose:
- merges all the dataframes collected in the
d dictionary
- Prints the duplicates in the merged dataframe and removes them
NOTE: Each table must have the common_column to match on
"""
d_copy = d.copy()
merged = d_copy[0]
if 0 in d_copy:
del d_copy[0]
else:
print("No 0 dataframe found... This shouldn't have happened.")
with tqdm(total = len(d_copy), position = 0, desc = "Merging tables") as pbar:
for name, df in d_copy.items():
# print(name)
# print(merged.shape)
merged = pd.merge(merged, df, how = "left", on = "E_keV")
pbar.update(1)
print(merged.shape)
dupe_mask = merged.duplicated(subset = ["E_keV"], keep = "last")
dupes = merged[dupe_mask]
print(dupes.columns)
print(str(len(dupes)) + " duplicates")
print("Now removing duplicates...")
merged = merged[~dupe_mask]
for c in merged.columns:
print(c)
return merged
def sed(PhoIndex, Ecut, logNHtor, CFtor, thInc, A_Fe, z, factor):
"""
Need to manually stitch together the spectra
"""
PhoIndex_str = np.array(["p3=%.5f" %par_val for par_val in PhoIndex.ravel()])
Ecut_str = np.array(["p4=%.5f" %par_val for par_val in Ecut.ravel()])
logNHtor_str = np.array(["p5=%.5f" %par_val for par_val in logNHtor.ravel()])
CFtor_str = np.array(["p6=%.5f" %par_val for par_val in CFtor.ravel()])
thInc_str = np.array(["p7=%.5f" %par_val for par_val in thInc.ravel()])
A_Fe_str = np.array(["p8=%.5f" %par_val for par_val in A_Fe.ravel()])
z_str = np.array(["p9=%.5f" %par_val for par_val in z.ravel()])
factor_str = np.array(["p17=%.5f" %par_val for par_val in factor.ravel()])
trans = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
repro = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
scatt = np.empty(shape = [len(PhoIndex_str, len(Ecut_str), len(logNHtor_str), len(CFtor_str), len(thInc_str), len(A_Fe_str), len(z_str), len(factor_str)), 40500])
## note must stitch together in increasing order of length of parameter arrays
for i, PhoIndex_val in enumerate(PhoIndex_str):
for j, Ecut_val in enumerate(Ecut_str):
for k, A_Fe_val in enumerate(A_Fe_str):
for l, factor_val in enumerate(factor_str):
for m, z_val in enumerate(z_str):
for n, logNHtor_val in enumerate(logNHtor_str):
for o, CFtor_val in enumerate(CFtor_str):
for p, thInc_val in enumerate(thInc_str):
df_column = "%(PhoIndex_val)s_%(Ecut_val)s_%(logNHtor_val)s_%(CFtor_val)s_%(thInc_val)s_%(A_Fe_val)s_%(z_val)s_%(factor_val)s" %locals()
trans[i, j, k, l, m, n, o, p, :] = df_master["TRANS_" + df_column].values
repro[i, j, k, l, m, n, o, p, :] = df_master["REPR_" + df_column].values
scatt[i, j, k, l, m, n, o, p, :] = df_master["SCATT_" + df_column].values
return trans, repro, scatt, temp["E_keV"].values
## load in the hefty dataset
## note -- need to figure out a more efficient way of storing this data
## Xspec uses a fits table, but unsure how we can generate the Python RegularGridInterpolator from that
df_dict = {}
for a, csvfile in enumerate(glob.glob("./borus_stepped/borus_step*.csv")):
df_dict[a] = pd.read_csv(csvfile)
df_master = merge_dict_dfs(df_dict, "E_keV")
parLen = 5
PhoIndex = np.linspace(1.45, 2.55, 3)
Ecut = np.logspace(2., 3., 3)
logNHtor = np.linspace(22., 25.5, parLen)
CFtor = np.linspace(0.15, 0.95, parLen)
thInc = np.linspace(20., 85, parLen)
A_Fe = np.logspace(-1., 1., 3)
z = np.logspace(-3., 0., 4)
factor = np.logspace(-5., -1., 3)
params = np.meshgrid(PhoIndex, Ecut, logNHtor, CFtor, thInc, A_Fe, z, factor, indexing='ij', sparse=True)
trans_sed, repro_sed, scatt_sed, E_keV = sed(*params)
print(np.shape(SEDs))
trans_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), trans_sed)
repro_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), repro_sed)
scatt_interp = RegularGridInterpolator((PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc), scatt_sed)
def generate_spectra(PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc):
"""
This is a place holder for a proper function
Args:
PhoIndex: float, powerlaw slope of the intrinsic spectrum (1.45--2.55)
Ecut: float, high-energy exponentional cut-off of the intrinsic powerlaw (100.--1000.)
A_Fe: float, abundance of iron in the obscurer (0.1--10.)
factor: float, percentage of scattered emission in the warm mirror (1.e-5--1.e-1)
z: float, redshift of the source (1.e-3--1.)
logNHtor: float, logarithm of the column density of the obscurer (22.--22.5)
CFtor: float, covering factor of the obscurer (0.15--0.95)
thInc: float, inclination angle of the obscurer (20.--85.), note: edge-on = 90.
Returns:
dataframe: a dataframe with columns for the energy in keV, the transmitted X-ray flux,
the reprocessed X-ray flux, the Thomson-scattered X-ray flux, and the total X-ray flux
"""
spectral_df = pd.DataFrame(
{
"Energy": E_keV,
"Transmitted": trans_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
"Reprocessed": trans_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
"Scattered": scatt_interp([PhoIndex, Ecut, A_Fe, factor, z, logNHtor, CFtor, thInc]),
}
)
spectral_df.loc[:, "Total"] = spectral_df[["Transmitted", "Reprocessed", "Scattered"]].sum()
return spectral_df
# def generate_spectra(angle1, angle2, logNH):
# """
# This is a place holder for a proper function
# Args:
# angle1: float, inclination angle in degrees (0-90) of the AGN view
# angle2: float, torus opening angle in degrees (0-90) of the AGN
# logNH: float, logarithm of the obscuring column density within the AGN environment
# Returns:
# dataframe: a dataframe of with columns for the energy in keV, the transmitted X-ray flux,
# the reprocessed X-ray flux, and the total X-ray flux
# """
# _degs_to_rads = lambda x: np.pi * x / 180.0
# degrees = np.arange(1, 1001, 1)
# radians = np.array(list(map(_degs_to_rads, degrees)))
# linear_component = radians * (logNH / 9.657) + 2
# transmitted_flux = (angle1 / 5) * np.cos(
# _degs_to_rads(angle1) + radians * (logNH / 1.5)
# ) + linear_component
# reprocessed_flux = (angle2 / 10) * np.sin(
# _degs_to_rads(angle2) + radians * (logNH / 5.0)
# ) + 5.0
# total_flux = transmitted_flux + reprocessed_flux
# spectral_df = pd.DataFrame(
# {
# "Energy": degrees,
# "Transmitted": transmitted_flux,
# "Reprocessed": reprocessed_flux,
# "Summed": total_flux,
# }
# )
# return spectral_df
| 0 | 0 | 0 |
c4c400f5b32fdbe5ce4be7931734db96f1825fdb | 1,709 | py | Python | data/train/python/c4c400f5b32fdbe5ce4be7931734db96f1825fdburls.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/c4c400f5b32fdbe5ce4be7931734db96f1825fdburls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/c4c400f5b32fdbe5ce4be7931734db96f1825fdburls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from django.conf.urls import patterns, include, url
from django.contrib import admin
from tastypie.api import Api
from centros.api import *
from clientes.api import *
from compras.api import *
from contactos.api import *
from cotizaciones.api import *
from equipos.api import *
from familias.api import *
from historiales.api import *
from lugares.api import *
from productos.api import *
from proveedores.api import *
from solicitudes.api import *
from tiposequipos.api import *
from usuarios.api import *
from usuarios.views import *
v1_api = Api(api_name="v1")
v1_api.register(CentroResource())
v1_api.register(ClienteResource())
v1_api.register(CompraResource())
v1_api.register(ContactoResource())
v1_api.register(CotizacionResource())
v1_api.register(EquipoResource())
v1_api.register(FamiliaResource())
v1_api.register(HistorialResource())
v1_api.register(LugarResource())
v1_api.register(ProductoResource())
v1_api.register(NombreProductoResource())
v1_api.register(FotoProductoResource())
v1_api.register(UnidadProductoResource())
v1_api.register(PrecioMesProductoResource())
v1_api.register(ProveedorResource())
v1_api.register(SolicitudResource())
v1_api.register(ProductoSolicitudResource())
v1_api.register(TipoEquipoResource())
v1_api.register(UsuarioResource())
v1_api.register(ConsolidadorSolicitanteResource())
v1_api.register(SolicitanteCodificadorResource())
v1_api.register(AprobadorSolicitudesSolicitanteResource())
v1_api.register(AprobadorSolicitudesCompradorResource())
v1_api.register(CompradorAprobadorComprasResource())
v1_api.register(AprobadorComprasAlmacenistaResource())
urlpatterns = patterns("",
(r"^api/", include(v1_api.urls)),
(r"^admin/", include(admin.site.urls))
) | 32.865385 | 58 | 0.818607 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from tastypie.api import Api
from centros.api import *
from clientes.api import *
from compras.api import *
from contactos.api import *
from cotizaciones.api import *
from equipos.api import *
from familias.api import *
from historiales.api import *
from lugares.api import *
from productos.api import *
from proveedores.api import *
from solicitudes.api import *
from tiposequipos.api import *
from usuarios.api import *
from usuarios.views import *
v1_api = Api(api_name="v1")
v1_api.register(CentroResource())
v1_api.register(ClienteResource())
v1_api.register(CompraResource())
v1_api.register(ContactoResource())
v1_api.register(CotizacionResource())
v1_api.register(EquipoResource())
v1_api.register(FamiliaResource())
v1_api.register(HistorialResource())
v1_api.register(LugarResource())
v1_api.register(ProductoResource())
v1_api.register(NombreProductoResource())
v1_api.register(FotoProductoResource())
v1_api.register(UnidadProductoResource())
v1_api.register(PrecioMesProductoResource())
v1_api.register(ProveedorResource())
v1_api.register(SolicitudResource())
v1_api.register(ProductoSolicitudResource())
v1_api.register(TipoEquipoResource())
v1_api.register(UsuarioResource())
v1_api.register(ConsolidadorSolicitanteResource())
v1_api.register(SolicitanteCodificadorResource())
v1_api.register(AprobadorSolicitudesSolicitanteResource())
v1_api.register(AprobadorSolicitudesCompradorResource())
v1_api.register(CompradorAprobadorComprasResource())
v1_api.register(AprobadorComprasAlmacenistaResource())
urlpatterns = patterns("",
(r"^api/", include(v1_api.urls)),
(r"^admin/", include(admin.site.urls))
) | 0 | 0 | 0 |
64ab3544724934a991a0858b5b7718856273fb9b | 1,488 | py | Python | tests/unit/samplers/test_mesh_samplers.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 1,142 | 2016-10-10T08:55:30.000Z | 2022-03-30T04:46:16.000Z | tests/unit/samplers/test_mesh_samplers.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 195 | 2016-10-10T08:30:37.000Z | 2022-02-17T12:51:17.000Z | tests/unit/samplers/test_mesh_samplers.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 215 | 2017-02-28T00:50:29.000Z | 2022-03-22T17:01:31.000Z | import pytest
from pyntcloud.samplers import RandomMeshSampler
@pytest.mark.parametrize("n", [
1,
5,
10,
50,
100
])
@pytest.mark.usefixtures("diamond")
@pytest.mark.parametrize("rgb,normals", [
(False, False),
(True, False),
(True, True),
(False, True)
])
@pytest.mark.usefixtures("diamond")
@pytest.mark.parametrize("n", [
1,
5,
10,
50,
100
])
@pytest.mark.usefixtures("diamond")
| 21.565217 | 81 | 0.612231 | import pytest
from pyntcloud.samplers import RandomMeshSampler
@pytest.mark.parametrize("n", [
1,
5,
10,
50,
100
])
@pytest.mark.usefixtures("diamond")
def test_RandomMeshSampler_n_argument(diamond, n):
sampler = RandomMeshSampler(
pyntcloud=diamond,
n=n,
rgb=True,
normals=True)
sampler.extract_info()
sample = sampler.compute()
assert len(sample) == n
@pytest.mark.parametrize("rgb,normals", [
(False, False),
(True, False),
(True, True),
(False, True)
])
@pytest.mark.usefixtures("diamond")
def test_RandomMeshSampler_rgb_normals_optional_arguments(diamond, rgb, normals):
sampler = RandomMeshSampler(
pyntcloud=diamond,
n=10,
rgb=rgb,
normals=normals)
sampler.extract_info()
sample = sampler.compute()
for x in ["red", "green", "blue"]:
assert (x in sample) == rgb
for x in ["nx", "ny", "nz"]:
assert (x in sample) == normals
@pytest.mark.parametrize("n", [
1,
5,
10,
50,
100
])
@pytest.mark.usefixtures("diamond")
def test_RandomMeshSampler_sampled_points_bounds(diamond, n):
sampler = RandomMeshSampler(
pyntcloud=diamond,
n=n,
rgb=True,
normals=True)
sampler.extract_info()
sample = sampler.compute()
assert all(sample[["x", "y", "z"]].values.max(0) <= diamond.xyz.max(0))
assert all(sample[["x", "y", "z"]].values.min(0) >= diamond.xyz.min(0))
| 977 | 0 | 66 |
1518c7ea6a64cfa5331babb8a8dc61e8ec29325d | 304 | py | Python | notifications/tests/helpers.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 1 | 2021-11-06T12:08:26.000Z | 2021-11-06T12:08:26.000Z | notifications/tests/helpers.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | null | null | null | notifications/tests/helpers.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | null | null | null | from notifications.tests.factories import SupplierEmailNotificationFactory
| 33.777778 | 74 | 0.819079 | from notifications.tests.factories import SupplierEmailNotificationFactory
def build_suppier_email_notification_factory(SupplierEmailNotification):
class HistoricFactory(SupplierEmailNotificationFactory):
class Meta:
model = SupplierEmailNotification
return HistoricFactory
| 205 | 0 | 23 |
d9c383cc9cc941244a8de219cabe478afc811a9b | 7,733 | py | Python | vault/datadog_checks/vault/vault.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | vault/datadog_checks/vault/vault.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | null | null | null | vault/datadog_checks/vault/vault.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:21:51.000Z | 2021-06-21T19:21:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import warnings
from time import time as timestamp
import requests
from six import string_types
from urllib3.exceptions import InsecureRequestWarning
from datadog_checks.checks import AgentCheck
from datadog_checks.config import is_affirmative
from datadog_checks.utils.containers import hash_mutable
from .errors import ApiUnreachable
| 39.055556 | 108 | 0.585025 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import warnings
from time import time as timestamp
import requests
from six import string_types
from urllib3.exceptions import InsecureRequestWarning
from datadog_checks.checks import AgentCheck
from datadog_checks.config import is_affirmative
from datadog_checks.utils.containers import hash_mutable
from .errors import ApiUnreachable
class Vault(AgentCheck):
CHECK_NAME = 'vault'
DEFAULT_API_VERSION = '1'
EVENT_LEADER_CHANGE = 'vault.leader_change'
SERVICE_CHECK_CONNECT = 'vault.can_connect'
SERVICE_CHECK_UNSEALED = 'vault.unsealed'
SERVICE_CHECK_INITIALIZED = 'vault.initialized'
def __init__(self, name, init_config, agentConfig, instances=None):
super(Vault, self).__init__(name, init_config, agentConfig, instances)
self.api_versions = {
'1': {
'functions': {
'check_leader': self.check_leader_v1,
'check_health': self.check_health_v1,
}
},
}
self.config = {}
def check(self, instance):
config = self.get_config(instance)
if config is None:
return
api = config['api']
tags = list(config['tags'])
# We access the version of the Vault API corresponding to each instance's `api_url`.
try:
api['check_leader'](config, tags)
api['check_health'](config, tags)
except ApiUnreachable:
return
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.OK, tags=tags)
def check_leader_v1(self, config, tags):
url = config['api_url'] + '/sys/leader'
leader_data = self.access_api(url, config, tags).json()
is_leader = is_affirmative(leader_data.get('is_self'))
tags.append('is_leader:{}'.format('true' if is_leader else 'false'))
self.gauge('vault.is_leader', int(is_leader), tags=tags)
current_leader = leader_data.get('leader_address')
previous_leader = config['leader']
if config['detect_leader'] and current_leader:
if previous_leader is not None and current_leader != previous_leader:
self.event({
'timestamp': timestamp(),
'event_type': self.EVENT_LEADER_CHANGE,
'msg_title': 'Leader change',
'msg_text': 'Leader changed from `{}` to `{}`.'.format(previous_leader, current_leader),
'alert_type': 'info',
'source_type_name': self.CHECK_NAME,
'host': self.hostname,
'tags': tags,
})
config['leader'] = current_leader
def check_health_v1(self, config, tags):
url = config['api_url'] + '/sys/health'
health_data = self.access_api(url, config, tags).json()
cluster_name = health_data.get('cluster_name')
if cluster_name:
tags.append('cluster_name:{}'.format(cluster_name))
vault_version = health_data.get('version')
if vault_version:
tags.append('vault_version:{}'.format(vault_version))
unsealed = not is_affirmative(health_data.get('sealed'))
if unsealed:
self.service_check(self.SERVICE_CHECK_UNSEALED, AgentCheck.OK, tags=tags)
else:
self.service_check(self.SERVICE_CHECK_UNSEALED, AgentCheck.CRITICAL, tags=tags)
initialized = is_affirmative(health_data.get('initialized'))
if initialized:
self.service_check(self.SERVICE_CHECK_INITIALIZED, AgentCheck.OK, tags=tags)
else:
self.service_check(self.SERVICE_CHECK_INITIALIZED, AgentCheck.CRITICAL, tags=tags)
def get_config(self, instance):
instance_id = hash_mutable(instance)
config = self.config.get(instance_id)
if config is None:
config = {}
try:
api_url = instance['api_url']
api_version = api_url[-1]
if api_version not in self.api_versions:
self.log.warning(
'Unknown Vault API version `{}`, using version '
'`{}`'.format(api_version, self.DEFAULT_API_VERSION)
)
api_url = api_url[:-1] + self.DEFAULT_API_VERSION
api_version = self.DEFAULT_API_VERSION
config['api_url'] = api_url
config['api'] = self.api_versions[api_version]['functions']
except KeyError:
self.log.error('Vault configuration setting `api_url` is required')
return
client_token = instance.get('client_token')
config['headers'] = {'X-Vault-Token': client_token} if client_token else None
username = instance.get('username')
password = instance.get('password')
config['auth'] = (username, password) if username and password else None
ssl_cert = instance.get('ssl_cert')
ssl_private_key = instance.get('ssl_private_key')
if isinstance(ssl_cert, string_types):
if isinstance(ssl_private_key, string_types):
config['ssl_cert'] = (ssl_cert, ssl_private_key)
else:
config['ssl_cert'] = ssl_cert
else:
config['ssl_cert'] = None
if isinstance(instance.get('ssl_ca_cert'), string_types):
config['ssl_verify'] = instance['ssl_ca_cert']
else:
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', True))
config['ssl_ignore_warning'] = is_affirmative(instance.get('ssl_ignore_warning', False))
config['proxies'] = self.get_instance_proxy(instance, config['api_url'])
config['timeout'] = int(instance.get('timeout', 20))
config['tags'] = instance.get('tags', [])
# Keep track of the previous cluster leader to detect changes.
config['leader'] = None
config['detect_leader'] = is_affirmative(instance.get('detect_leader'))
self.config[instance_id] = config
return config
def access_api(self, url, config, tags):
try:
with warnings.catch_warnings():
if config['ssl_ignore_warning']:
warnings.simplefilter('ignore', InsecureRequestWarning)
response = requests.get(
url,
auth=config['auth'],
cert=config['ssl_cert'],
verify=config['ssl_verify'],
proxies=config['proxies'],
timeout=config['timeout'],
headers=config['headers']
)
except requests.exceptions.Timeout:
msg = 'Vault endpoint `{}` timed out after {} seconds'.format(url, config['timeout'])
self.service_check(
self.SERVICE_CHECK_CONNECT,
AgentCheck.CRITICAL,
message=msg,
tags=tags
)
self.log.exception(msg)
raise ApiUnreachable
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
msg = 'Error accessing Vault endpoint `{}`'.format(url)
self.service_check(
self.SERVICE_CHECK_CONNECT,
AgentCheck.CRITICAL,
message=msg,
tags=tags
)
self.log.exception(msg)
raise ApiUnreachable
return response
| 6,850 | 414 | 23 |
c96d5b62c7c62750a78dc609fb8b00de2a672e4a | 424 | py | Python | problem_3.py | vineeths96/Pattern-Recognition-1 | b7cee4f59bf037fad76e66dd24ff66c1d3fe9049 | [
"MIT"
] | null | null | null | problem_3.py | vineeths96/Pattern-Recognition-1 | b7cee4f59bf037fad76e66dd24ff66c1d3fe9049 | [
"MIT"
] | null | null | null | problem_3.py | vineeths96/Pattern-Recognition-1 | b7cee4f59bf037fad76e66dd24ff66c1d3fe9049 | [
"MIT"
] | 1 | 2021-08-15T17:21:16.000Z | 2021-08-15T17:21:16.000Z | import os
from problem_3.load_data import load_data
from problem_3.problem_3a import problem_3a
from problem_3.problem_3b import problem_3b
# Create results directory
os.makedirs('results', exist_ok=True)
# Problem 3a
X_train, Y_train, X_test, Y_test = load_data('a')
problem_3a(X_train, Y_train, X_test, Y_test)
# Problem 3b
X_train, Y_train, X_test, Y_test = load_data('b')
problem_3b(X_train, Y_train, X_test, Y_test) | 26.5 | 49 | 0.792453 | import os
from problem_3.load_data import load_data
from problem_3.problem_3a import problem_3a
from problem_3.problem_3b import problem_3b
# Create results directory
os.makedirs('results', exist_ok=True)
# Problem 3a
X_train, Y_train, X_test, Y_test = load_data('a')
problem_3a(X_train, Y_train, X_test, Y_test)
# Problem 3b
X_train, Y_train, X_test, Y_test = load_data('b')
problem_3b(X_train, Y_train, X_test, Y_test) | 0 | 0 | 0 |
0a3a1d806499dced14fcb67cd4b59b22e9d08d77 | 849 | py | Python | setup.py | satyaog/pybenzinaparse | ff97e5b26555afee7a0ceaf9b0bd1a7e92374be3 | [
"MIT"
] | null | null | null | setup.py | satyaog/pybenzinaparse | ff97e5b26555afee7a0ceaf9b0bd1a7e92374be3 | [
"MIT"
] | null | null | null | setup.py | satyaog/pybenzinaparse | ff97e5b26555afee7a0ceaf9b0bd1a7e92374be3 | [
"MIT"
] | null | null | null | import glob
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert("README.md", "rst")
except(IOError, ImportError):
long_description = open("README.md").read()
setup(
name="pybenzinaparse",
version="0.2.2",
packages=find_packages(exclude=["test_*"]),
url="https://github.com/satyaog/pybenzinaparse",
license="The MIT License",
author="Satya Ortiz-Gagné",
author_email="satya.ortiz-gagne@mila.quebec",
description="MP4 / ISO base media file format (ISO/IEC 14496-12 - MPEG-4 Part 12) file parser",
requires=["bitstring"],
install_requires=["bitstring"],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
long_description=long_description,
data_files=[("", ["README.md", ]),
("tests", glob.glob("data/*"))]
)
| 31.444444 | 99 | 0.6702 | import glob
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert("README.md", "rst")
except(IOError, ImportError):
long_description = open("README.md").read()
setup(
name="pybenzinaparse",
version="0.2.2",
packages=find_packages(exclude=["test_*"]),
url="https://github.com/satyaog/pybenzinaparse",
license="The MIT License",
author="Satya Ortiz-Gagné",
author_email="satya.ortiz-gagne@mila.quebec",
description="MP4 / ISO base media file format (ISO/IEC 14496-12 - MPEG-4 Part 12) file parser",
requires=["bitstring"],
install_requires=["bitstring"],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
long_description=long_description,
data_files=[("", ["README.md", ]),
("tests", glob.glob("data/*"))]
)
| 0 | 0 | 0 |
edf7a7dece23b7613df27359ee9783353e1d4cf4 | 6,202 | py | Python | proj5/pghw05.py | insomniaccat/deepLearning_spring2017 | 2c770809d0bb7896d37db527e4353e899ba49420 | [
"MIT"
] | 1 | 2020-03-04T06:50:38.000Z | 2020-03-04T06:50:38.000Z | proj5/pghw05.py | insomniaccat/deepLearning_spring2017 | 2c770809d0bb7896d37db527e4353e899ba49420 | [
"MIT"
] | null | null | null | proj5/pghw05.py | insomniaccat/deepLearning_spring2017 | 2c770809d0bb7896d37db527e4353e899ba49420 | [
"MIT"
] | null | null | null | #Author: Usama Munir Sheikh
#The following code implements an LSTM recurrent neural network
#for classifying tweets as positive or negative
#in the sentiment140 dataset http://help.sentiment140.com/for-students/
#It was written for my Intro to Deep Learning Course
#taught by Professor Qiang Ji in Spring 2017
import tensorflow as tf
import numpy as np
import json
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import time
if __name__ == "__main__":
main() | 32.471204 | 115 | 0.706224 | #Author: Usama Munir Sheikh
#The following code implements an LSTM recurrent neural network
#for classifying tweets as positive or negative
#in the sentiment140 dataset http://help.sentiment140.com/for-students/
#It was written for my Intro to Deep Learning Course
#taught by Professor Qiang Ji in Spring 2017
import tensorflow as tf
import numpy as np
import json
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import time
def main():
time_initial = time.time() #To see how much time required for the entire code to run
#Load Training and Validation Data
npzfile = np.load("train_and_val.npz")
train_x = npzfile["train_x"]
train_y = npzfile["train_y"]
train_mask = npzfile["train_mask"]
val_x = npzfile["val_x"]
val_y = npzfile["val_y"]
val_mask = npzfile["val_mask"]
#Parameters
N = 400000
N_val = 50000;
B = 1000 #batch_size
#Network Parameters
max_sequence_length = 25
vocab_size = 8745
word_embedding_size = 300
cell_size = 128 #rnn cell size
eta = 0.001 #learning rate
#Make Tensor Flow Variables and Placeholders
X = tf.placeholder(tf.int32,[None,max_sequence_length])
Y = tf.placeholder(tf.float32,[None])
Mask = tf.placeholder(tf.int32,[None,max_sequence_length])
w_embed = tf.Variable(tf.random_uniform([vocab_size, word_embedding_size], minval=-0.1, maxval=0.1, seed = 1230))
W = tf.get_variable("W", shape=[cell_size, 1], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros([1]))
#Write Tensorflow equations and models
rnn_input = tf.nn.embedding_lookup(w_embed, X) #Word Embedding
cell = tf.nn.rnn_cell.LSTMCell(cell_size) #create LSTM rnn cell
output, state = tf.nn.dynamic_rnn(cell, rnn_input, dtype=tf.float32, time_major=False) #Propagate through rnn cell
#Masking
length = tf.cast(tf.reduce_sum(Mask,reduction_indices=1), tf.int32)
batch_size = tf.shape(X)[0]
max_length = tf.shape(output)[1]
out_size = int(output.get_shape()[2])
flat = tf.reshape(output, [-1, out_size])
index = tf.range(0, batch_size)*max_length + (length - 1)
relevant = tf.gather(flat, index)
yout = tf.matmul(relevant, W) + b #estimated output
Y_reshaped = tf.reshape(Y, [batch_size,1])
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=yout, labels=Y_reshaped)
cost = tf.reduce_mean(cross_entropy) #cross entropy cost
optimizer_step = tf.train.AdamOptimizer(learning_rate=eta).minimize(cost) #run optimizer
#Accuracy Calculations
Y_int = tf.cast(Y_reshaped, tf.int64)
yout_sigmoid = tf.nn.sigmoid(yout)
predict_op = tf.cast(tf.round(yout_sigmoid), tf.int64)
correct_prediction = tf.equal(predict_op, Y_int)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Create the collection.
tf.get_collection("validation_nodes")
#Add stuff to the collection.
tf.add_to_collection("validation_nodes", X)
tf.add_to_collection("validation_nodes", Mask)
tf.add_to_collection("validation_nodes", predict_op)
#Save Model
saver = tf.train.Saver()
#Create Empty Matrices to Save results
loss_plot = []
accuracy_train_plot = []
accuracy_test_plot = []
n = 50
num_epochs = 10 #number of epochs
num_itr = int(np.divide(N,B)) #number of iterations per epoch
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for j in range(num_epochs):
epoch_number = j+1
for i in range(num_itr):
itr_number = i+1
#Pick Batch for Training
indices = np.arange(B) + (i*B)
data_X = train_x[indices]
data_Y = train_y[indices]
mask = train_mask[indices]
#Train
loss_np = cost.eval(feed_dict={X: data_X, Y: data_Y, Mask: mask})
optimizer_step.run(feed_dict={X: data_X, Y: data_Y, Mask: mask})
#Accuracy #PrintValues # SaveResults #EveryFiftyIterations
if((itr_number % n == 0) or (itr_number == 1)):
print('----------' + repr(i+1) + '----------')
print(' ')
#print('Learning Rate: ' + repr(eta_np))
loss_plot.append(loss_np)
print('Loss: ' + repr(loss_np))
accuracy_train_np = accuracy.eval(feed_dict={X: data_X, Y: data_Y, Mask: mask}) #training accuracy
accuracy_test_np = accuracy.eval(feed_dict={X:val_x,Y:val_y, Mask: val_mask}) #validation accuracy
accuracy_train_plot.append(accuracy_train_np*100)
accuracy_test_plot.append(accuracy_test_np*100)
print('Training Accuracy: ' + repr(accuracy_train_np*100))
print('Test Accuracy: '+ repr(accuracy_test_np*100))
print(' ')
print('------------------------')
if (accuracy_test_np*100) > 84.1:
break
if (accuracy_test_np*100) > 84.1:
break
word_embedding_matrix = w_embed.eval() #save word embedding matrix for visualization
#save session
save_path = saver.save(session, "my_model")
session.close()
#Print Elapsed Time
print('------------------------')
print('Optimization Finished')
elapsed = time.time() - time_initial
print('Time Elapsed: ' + repr(elapsed))
#Visualization
with open("vocab.json", "r") as f:
vocab = json.load(f)
s = ["monday", "tuesday", "wednesday", "thursday", "friday",
"saturday", "sunday", "orange", "apple", "banana", "mango",
"pineapple", "cherry", "fruit"]
words = [(i, vocab[i]) for i in s]
model = TSNE(n_components=2, random_state=0)
#Note that the following line might use a good chunk of RAM
tsne_embedding = model.fit_transform(word_embedding_matrix)
words_vectors = tsne_embedding[np.array([item[1][0] for item in
words])]
z = words_vectors[:,0] #x-axis
y = words_vectors[:,1] #y-axis
fig, ax = plt.subplots()
ax.scatter(z, y)
for i, txt in enumerate(s):
ax.annotate(txt, (z[i],y[i]))
plt.show()
#Plots
itr_number = len(loss_plot)
t = np.arange(itr_number)
fig, ax1 = plt.subplots()
ax1.plot(t,np.reshape(loss_plot,(itr_number,1)), 'b-')
ax1.set_xlabel('Number of Iterations (pghw5)')
ax1.set_ylabel('Loss', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.plot(t,np.reshape(accuracy_train_plot,(itr_number,1)), 'r-')
ax2.set_ylabel('Percent Accuracy (pghw5)', color='k')
ax2.tick_params('y', colors='k')
ax2.plot(t,np.reshape(accuracy_test_plot,(itr_number,1)), 'g-')
fig.tight_layout()
plt.show()
if __name__ == "__main__":
main() | 5,694 | 0 | 23 |
ceb6898c641cfb4002ca3b57c862541acb373866 | 2,861 | py | Python | test/models/test_autoreg.py | gpescia/MyNetKet | 958510966a5870d9d491de0628903cf1fc210921 | [
"Apache-2.0"
] | null | null | null | test/models/test_autoreg.py | gpescia/MyNetKet | 958510966a5870d9d491de0628903cf1fc210921 | [
"Apache-2.0"
] | null | null | null | test/models/test_autoreg.py | gpescia/MyNetKet | 958510966a5870d9d491de0628903cf1fc210921 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import netket as nk
import numpy as np
import pytest
from jax import numpy as jnp
@pytest.mark.parametrize("dtype", [jnp.float64, jnp.complex128])
@pytest.mark.parametrize("s", [1 / 2, 1])
@pytest.mark.parametrize(
"partial_model",
[
pytest.param(
lambda hilbert, dtype: nk.models.ARNNDense(
hilbert=hilbert,
layers=3,
features=5,
dtype=dtype,
),
id="dense",
),
pytest.param(
lambda hilbert, dtype: nk.models.ARNNConv1D(
hilbert=hilbert,
layers=3,
features=5,
kernel_size=2,
dtype=dtype,
),
id="conv1d",
),
pytest.param(
lambda hilbert, dtype: nk.models.ARNNConv1D(
hilbert=hilbert,
layers=3,
features=5,
kernel_size=2,
kernel_dilation=2,
dtype=dtype,
),
id="conv1d_dilation",
),
],
)
| 32.146067 | 86 | 0.594547 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import netket as nk
import numpy as np
import pytest
from jax import numpy as jnp
@pytest.mark.parametrize("dtype", [jnp.float64, jnp.complex128])
@pytest.mark.parametrize("s", [1 / 2, 1])
@pytest.mark.parametrize(
"partial_model",
[
pytest.param(
lambda hilbert, dtype: nk.models.ARNNDense(
hilbert=hilbert,
layers=3,
features=5,
dtype=dtype,
),
id="dense",
),
pytest.param(
lambda hilbert, dtype: nk.models.ARNNConv1D(
hilbert=hilbert,
layers=3,
features=5,
kernel_size=2,
dtype=dtype,
),
id="conv1d",
),
pytest.param(
lambda hilbert, dtype: nk.models.ARNNConv1D(
hilbert=hilbert,
layers=3,
features=5,
kernel_size=2,
kernel_dilation=2,
dtype=dtype,
),
id="conv1d_dilation",
),
],
)
def test_ARNN(partial_model, s, dtype):
L = 4
batch_size = 3
hilbert = nk.hilbert.Spin(s=s, N=L)
model = partial_model(hilbert, dtype)
key_spins, key_model = jax.random.split(jax.random.PRNGKey(0))
spins = hilbert.random_state(key_spins, size=batch_size)
(p, _), params = model.init_with_output(
key_model, spins, None, method=model.conditionals
)
# Test if the model is normalized
# The result may not be very accurate, because it is in exp space
psi = nk.nn.to_array(hilbert, model.apply, params, normalize=False)
assert psi.conj() @ psi == pytest.approx(1, rel=1e-5, abs=1e-5)
# Test if the model is autoregressive
for i in range(batch_size):
for j in range(L):
# Change one input element at a time
spins_new = spins.at[i, j].set(-spins[i, j])
p_new, _ = model.apply(params, spins_new, None, method=model.conditionals)
p_diff = p_new - p
# The former output elements should not change
p_diff = p_diff.at[i, j + 1 :].set(0)
np.testing.assert_allclose(p_diff, 0, err_msg=f"i={i} j={j}")
| 1,128 | 0 | 22 |
03881bddee50a658bae810ebeae349ced5d95d0f | 16,307 | py | Python | tests/pytests/integration/runners/state/orchestrate/test_orchestrate.py | haodeon/salt | af2964f4ddbf9c5635d1528a495e473996cc7b71 | [
"Apache-2.0"
] | null | null | null | tests/pytests/integration/runners/state/orchestrate/test_orchestrate.py | haodeon/salt | af2964f4ddbf9c5635d1528a495e473996cc7b71 | [
"Apache-2.0"
] | null | null | null | tests/pytests/integration/runners/state/orchestrate/test_orchestrate.py | haodeon/salt | af2964f4ddbf9c5635d1528a495e473996cc7b71 | [
"Apache-2.0"
] | null | null | null | """
Tests for state.orchestrate
"""
import os
import pytest
pytestmark = [
pytest.mark.slow_test,
]
def test_orchestrate_output(salt_run_cli, salt_minion, salt_master):
"""
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
"""
bad_out = ["outputter:", " highstate"]
good_out = [
" Function: salt.state",
" Result: True",
"Succeeded: 1 (changed=1)",
"Failed: 0",
"Total states run: 1",
]
sls_contents = """
call_sleep_state:
salt.state:
- tgt: {}
- sls: simple-ping
""".format(
salt_minion.id
)
simple_ping_sls = """
simple-ping:
module.run:
- name: test.ping
"""
with salt_master.state_tree.base.temp_file(
"orch-test.sls", sls_contents
), salt_master.state_tree.base.temp_file("simple-ping.sls", simple_ping_sls):
ret = salt_run_cli.run("--out=highstate", "state.orchestrate", "orch-test")
assert ret.returncode == 0
ret_output = ret.stdout.splitlines()
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
assert len(ret_output) > 2
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
def test_orchestrate_state_output_with_salt_function(
salt_run_cli, salt_minion, salt_master
):
"""
Ensure that orchestration produces the correct output with salt.function.
A salt execution module function does not return highstate data, so we
should not try to recursively output it as such.
The outlier to this rule is state.apply, but that is handled by the salt.state.
See https://github.com/saltstack/salt/issues/60029 for more detail.
"""
sls_contents = """
arg_clean_test:
salt.function:
- name: test.arg_clean
- arg:
- B flat major
- has 2 flats
- tgt: {minion_id}
ping_test:
salt.function:
- name: test.ping
- tgt: {minion_id}
""".format(
minion_id=salt_minion.id
)
with salt_master.state_tree.base.temp_file("orch-function-test.sls", sls_contents):
ret = salt_run_cli.run(
"--out=highstate", "state.orchestrate", "orch-function-test"
)
assert ret.returncode == 0
ret_output = [line.strip() for line in ret.stdout.splitlines()]
assert "args:" in ret_output
assert "- B flat major" in ret_output
assert "- has 2 flats" in ret_output
assert "True" in ret_output
def test_orchestrate_nested(salt_run_cli, salt_minion, salt_master, tmp_path):
"""
test salt-run state.orchestrate and failhard with nested orchestration
"""
testfile = tmp_path / "ewu-2016-12-13"
inner_sls = """
cmd.run:
salt.function:
- tgt: {}
- arg:
- {}
- failhard: True
""".format(
salt_minion.id, pytest.helpers.shell_test_false()
)
outer_sls = """
state.orchestrate:
salt.runner:
- mods: nested.inner
- failhard: True
cmd.run:
salt.function:
- tgt: {}
- arg:
- touch {}
""".format(
salt_minion.id, testfile
)
with salt_master.state_tree.base.temp_file(
"nested/inner.sls", inner_sls
), salt_master.state_tree.base.temp_file("nested/outer.sls", outer_sls):
ret = salt_run_cli.run("state.orchestrate", "nested.outer")
assert ret.returncode != 0
assert testfile.exists() is False
def test_orchestrate_with_mine(salt_run_cli, salt_minion, salt_master):
"""
test salt-run state.orchestrate with mine.get call in sls
"""
sls_contents = (
"""
{% set minion = '"""
+ salt_minion.id
+ """' %}
{% set mine = salt.saltutil.runner('mine.get', tgt=minion, fun='test.ping') %}
{% if mine %}
test.ping:
salt.function:
- tgt: "{{ minion }}"
{% endif %}
"""
)
ret = salt_run_cli.run("mine.update", salt_minion.id)
assert ret.returncode == 0
with salt_master.state_tree.base.temp_file("orch/mine.sls", sls_contents):
ret = salt_run_cli.run("state.orchestrate", "orch.mine")
assert ret.returncode == 0
assert ret.data
assert ret.data["data"][salt_master.id]
for state_data in ret.data["data"][salt_master.id].values():
assert state_data["changes"]["ret"]
assert state_data["changes"]["ret"][salt_minion.id] is True
def test_orchestrate_state_and_function_failure(salt_run_cli, salt_master, salt_minion):
"""
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programmatically analyzed.
See https://github.com/saltstack/salt/issues/43204
"""
init_sls = """
Step01:
salt.state:
- tgt: {minion_id}
- sls:
- orch.issue43204.fail_with_changes
Step02:
salt.function:
- name: runtests_helpers.nonzero_retcode_return_false
- tgt: {minion_id}
- fail_function: runtests_helpers.fail_function
""".format(
minion_id=salt_minion.id
)
fail_sls = """
test fail with changes:
test.fail_with_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/issue43204/init.sls", init_sls
), salt_master.state_tree.base.temp_file(
"orch/issue43204/fail_with_changes.sls", fail_sls
):
ret = salt_run_cli.run("saltutil.sync_modules")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.issue43204")
assert ret.returncode != 0
# Drill down to the changes dict
data = ret.data["data"][salt_master.id]
state_ret = data["salt_|-Step01_|-Step01_|-state"]["changes"]
func_ret = data[
"salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function"
]["changes"]
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ("duration", "start_time"):
state_ret["ret"][salt_minion.id][
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes"
].pop(item)
expected = {
"out": "highstate",
"ret": {
salt_minion.id: {
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes": {
"__id__": "test fail with changes",
"__run_num__": 0,
"__sls__": "orch.issue43204.fail_with_changes",
"changes": {
"testing": {
"new": "Something pretended to change",
"old": "Unchanged",
}
},
"comment": "Failure!",
"name": "test fail with changes",
"result": False,
}
}
},
}
assert state_ret == expected
assert func_ret == {"ret": {salt_minion.id: False}}
def test_orchestrate_salt_function_return_false_failure(
salt_run_cli, salt_minion, salt_master
):
"""
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
"""
sls_contents = """
deploy_check:
salt.function:
- name: test.false
- tgt: {}
""".format(
salt_minion.id
)
with salt_master.state_tree.base.temp_file("orch/issue30367.sls", sls_contents):
ret = salt_run_cli.run("saltutil.sync_modules")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.issue30367")
assert ret.returncode != 0
# Drill down to the changes dict
data = ret.data["data"][salt_master.id]
state_result = data["salt_|-deploy_check_|-test.false_|-function"]["result"]
func_ret = data["salt_|-deploy_check_|-test.false_|-function"]["changes"]
assert state_result is False
assert func_ret == {"ret": {salt_minion.id: False}}
def test_orchestrate_target_exists(salt_run_cli, salt_minion, salt_master):
"""
test orchestration when target exists while using multiple states
"""
sls_contents = """
core:
salt.state:
- tgt: '{minion_id}*'
- sls:
- core
test-state:
salt.state:
- tgt: '{minion_id}*'
- sls:
- orch.target-test
cmd.run:
salt.function:
- tgt: '{minion_id}*'
- arg:
- echo test
""".format(
minion_id=salt_minion.id
)
target_test_sls = """
always_true:
test.succeed_without_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/target-exists.sls", sls_contents
), salt_master.state_tree.base.temp_file(
"orch/target-test.sls", target_test_sls
), salt_master.state_tree.base.temp_file(
"core.sls", target_test_sls
):
ret = salt_run_cli.run("state.orchestrate", "orch.target-exists")
assert ret.returncode == 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {"core", "test-state", "cmd.run"}
for state_data in data.values():
if state_data["name"] == "core":
to_check.remove("core")
assert state_data["result"] is True
if state_data["name"] == "test-state":
assert state_data["result"] is True
to_check.remove("test-state")
if state_data["name"] == "cmd.run":
assert state_data["changes"] == {
"ret": {salt_minion.id: "test"},
}
to_check.remove("cmd.run")
assert not to_check
def test_orchestrate_target_does_not_exist(salt_run_cli, salt_minion, salt_master):
"""
test orchestration when target does not exist while using multiple states
"""
sls_contents = """
core:
salt.state:
- tgt: 'does-not-exist*'
- sls:
- core
test-state:
salt.state:
- tgt: '{minion_id}*'
- sls:
- orch.target-test
cmd.run:
salt.function:
- tgt: '{minion_id}*'
- arg:
- echo test
""".format(
minion_id=salt_minion.id
)
target_test_sls = """
always_true:
test.succeed_without_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/target-does-not-exist.sls", sls_contents
), salt_master.state_tree.base.temp_file(
"orch/target-test.sls", target_test_sls
), salt_master.state_tree.base.temp_file(
"core.sls", target_test_sls
):
ret = salt_run_cli.run("state.orchestrate", "orch.target-does-not-exist")
assert ret.returncode != 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {"core", "test-state", "cmd.run"}
for state_data in data.values():
if state_data["name"] == "core":
to_check.remove("core")
assert state_data["result"] is False
assert state_data["comment"] == "No minions returned"
if state_data["name"] == "test-state":
assert state_data["result"] is True
to_check.remove("test-state")
if state_data["name"] == "cmd.run":
assert state_data["changes"] == {
"ret": {salt_minion.id: "test"},
}
to_check.remove("cmd.run")
assert not to_check
def test_orchestrate_retcode(salt_run_cli, salt_master):
"""
Test orchestration with nonzero retcode set in __context__
"""
sls_contents = """
test_runner_success:
salt.runner:
- name: runtests_helpers.success
test_runner_failure:
salt.runner:
- name: runtests_helpers.failure
test_wheel_success:
salt.wheel:
- name: runtests_helpers.success
test_wheel_failure:
salt.wheel:
- name: runtests_helpers.failure
"""
with salt_master.state_tree.base.temp_file("orch/retcode.sls", sls_contents):
ret = salt_run_cli.run("saltutil.sync_runners")
assert ret.returncode == 0
ret = salt_run_cli.run("saltutil.sync_wheel")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.retcode")
assert ret.returncode != 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {
"test_runner_success",
"test_runner_failure",
"test_wheel_failure",
"test_wheel_success",
}
for state_data in data.values():
name = state_data["__id__"]
to_check.remove(name)
if name in ("test_runner_success", "test_wheel_success"):
assert state_data["result"] is True
if name in ("test_runner_failure", "test_wheel_failure"):
assert state_data["result"] is False
assert not to_check
def test_orchestrate_batch_with_failhard_error(
salt_run_cli, salt_master, salt_minion, tmp_path
):
"""
test orchestration properly stops with failhard and batch.
"""
testfile = tmp_path / "test-file"
sls_contents = """
call_fail_state:
salt.state:
- tgt: {}
- batch: 1
- failhard: True
- sls: fail
""".format(
salt_minion.id
)
fail_sls = """
{}:
file.managed:
- source: salt://hnlcfsdjhkzkdhynclarkhmcls
""".format(
testfile
)
with salt_master.state_tree.base.temp_file(
"orch/batch.sls", sls_contents
), salt_master.state_tree.base.temp_file("fail.sls", fail_sls):
ret = salt_run_cli.run("state.orchestrate", "orch.batch")
assert ret.returncode != 0
data = ret.data["data"][salt_master.id]
result = data["salt_|-call_fail_state_|-call_fail_state_|-state"]["result"]
changes = data["salt_|-call_fail_state_|-call_fail_state_|-state"]["changes"]
assert result is False
# The execution should stop after first error, so return dict should contain only one minion
assert len(changes["ret"]) == 1
def test_orchestrate_subset(
salt_run_cli,
salt_master,
salt_minion,
salt_sub_minion,
grains,
):
"""
test orchestration state using subset
"""
sls_contents = """
test subset:
salt.state:
- tgt: '*minion*'
- subset: 1
- sls: test
"""
test_sls = """
test state:
test.succeed_without_changes:
- name: test
"""
if os.environ.get("CI_RUN", "0") == "1":
if grains["os"] == "Fedora" and int(grains["osrelease"]) == 35:
# This test is flaky on Fedora 35 - Don't really know why, because,
# of course, this test module passes when running locally on a
# Fedora 35 container.
pytest.skip("Skipping flaky Fedora 35 test for now, on CI runs.")
with salt_master.state_tree.base.temp_file(
"orch/subset.sls", sls_contents
), salt_master.state_tree.base.temp_file("test.sls", test_sls):
ret = salt_run_cli.run("state.orchestrate", "orch.subset")
assert ret.returncode == 0
for state_data in ret.data["data"][salt_master.id].values():
# Should only run in one of the minions
comment = state_data["comment"]
if salt_minion.id in comment:
assert salt_sub_minion.id not in comment
elif salt_sub_minion.id in comment:
assert salt_minion.id not in comment
else:
pytest.fail(
"None of the targeted minions({}) show up in comment: '{}'".format(
", ".join([salt_minion.id, salt_sub_minion.id]), comment
)
)
| 30.480374 | 96 | 0.601398 | """
Tests for state.orchestrate
"""
import os
import pytest
pytestmark = [
pytest.mark.slow_test,
]
def test_orchestrate_output(salt_run_cli, salt_minion, salt_master):
"""
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
"""
bad_out = ["outputter:", " highstate"]
good_out = [
" Function: salt.state",
" Result: True",
"Succeeded: 1 (changed=1)",
"Failed: 0",
"Total states run: 1",
]
sls_contents = """
call_sleep_state:
salt.state:
- tgt: {}
- sls: simple-ping
""".format(
salt_minion.id
)
simple_ping_sls = """
simple-ping:
module.run:
- name: test.ping
"""
with salt_master.state_tree.base.temp_file(
"orch-test.sls", sls_contents
), salt_master.state_tree.base.temp_file("simple-ping.sls", simple_ping_sls):
ret = salt_run_cli.run("--out=highstate", "state.orchestrate", "orch-test")
assert ret.returncode == 0
ret_output = ret.stdout.splitlines()
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
assert len(ret_output) > 2
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
def test_orchestrate_state_output_with_salt_function(
salt_run_cli, salt_minion, salt_master
):
"""
Ensure that orchestration produces the correct output with salt.function.
A salt execution module function does not return highstate data, so we
should not try to recursively output it as such.
The outlier to this rule is state.apply, but that is handled by the salt.state.
See https://github.com/saltstack/salt/issues/60029 for more detail.
"""
sls_contents = """
arg_clean_test:
salt.function:
- name: test.arg_clean
- arg:
- B flat major
- has 2 flats
- tgt: {minion_id}
ping_test:
salt.function:
- name: test.ping
- tgt: {minion_id}
""".format(
minion_id=salt_minion.id
)
with salt_master.state_tree.base.temp_file("orch-function-test.sls", sls_contents):
ret = salt_run_cli.run(
"--out=highstate", "state.orchestrate", "orch-function-test"
)
assert ret.returncode == 0
ret_output = [line.strip() for line in ret.stdout.splitlines()]
assert "args:" in ret_output
assert "- B flat major" in ret_output
assert "- has 2 flats" in ret_output
assert "True" in ret_output
def test_orchestrate_nested(salt_run_cli, salt_minion, salt_master, tmp_path):
"""
test salt-run state.orchestrate and failhard with nested orchestration
"""
testfile = tmp_path / "ewu-2016-12-13"
inner_sls = """
cmd.run:
salt.function:
- tgt: {}
- arg:
- {}
- failhard: True
""".format(
salt_minion.id, pytest.helpers.shell_test_false()
)
outer_sls = """
state.orchestrate:
salt.runner:
- mods: nested.inner
- failhard: True
cmd.run:
salt.function:
- tgt: {}
- arg:
- touch {}
""".format(
salt_minion.id, testfile
)
with salt_master.state_tree.base.temp_file(
"nested/inner.sls", inner_sls
), salt_master.state_tree.base.temp_file("nested/outer.sls", outer_sls):
ret = salt_run_cli.run("state.orchestrate", "nested.outer")
assert ret.returncode != 0
assert testfile.exists() is False
def test_orchestrate_with_mine(salt_run_cli, salt_minion, salt_master):
"""
test salt-run state.orchestrate with mine.get call in sls
"""
sls_contents = (
"""
{% set minion = '"""
+ salt_minion.id
+ """' %}
{% set mine = salt.saltutil.runner('mine.get', tgt=minion, fun='test.ping') %}
{% if mine %}
test.ping:
salt.function:
- tgt: "{{ minion }}"
{% endif %}
"""
)
ret = salt_run_cli.run("mine.update", salt_minion.id)
assert ret.returncode == 0
with salt_master.state_tree.base.temp_file("orch/mine.sls", sls_contents):
ret = salt_run_cli.run("state.orchestrate", "orch.mine")
assert ret.returncode == 0
assert ret.data
assert ret.data["data"][salt_master.id]
for state_data in ret.data["data"][salt_master.id].values():
assert state_data["changes"]["ret"]
assert state_data["changes"]["ret"][salt_minion.id] is True
def test_orchestrate_state_and_function_failure(salt_run_cli, salt_master, salt_minion):
"""
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programmatically analyzed.
See https://github.com/saltstack/salt/issues/43204
"""
init_sls = """
Step01:
salt.state:
- tgt: {minion_id}
- sls:
- orch.issue43204.fail_with_changes
Step02:
salt.function:
- name: runtests_helpers.nonzero_retcode_return_false
- tgt: {minion_id}
- fail_function: runtests_helpers.fail_function
""".format(
minion_id=salt_minion.id
)
fail_sls = """
test fail with changes:
test.fail_with_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/issue43204/init.sls", init_sls
), salt_master.state_tree.base.temp_file(
"orch/issue43204/fail_with_changes.sls", fail_sls
):
ret = salt_run_cli.run("saltutil.sync_modules")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.issue43204")
assert ret.returncode != 0
# Drill down to the changes dict
data = ret.data["data"][salt_master.id]
state_ret = data["salt_|-Step01_|-Step01_|-state"]["changes"]
func_ret = data[
"salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function"
]["changes"]
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ("duration", "start_time"):
state_ret["ret"][salt_minion.id][
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes"
].pop(item)
expected = {
"out": "highstate",
"ret": {
salt_minion.id: {
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes": {
"__id__": "test fail with changes",
"__run_num__": 0,
"__sls__": "orch.issue43204.fail_with_changes",
"changes": {
"testing": {
"new": "Something pretended to change",
"old": "Unchanged",
}
},
"comment": "Failure!",
"name": "test fail with changes",
"result": False,
}
}
},
}
assert state_ret == expected
assert func_ret == {"ret": {salt_minion.id: False}}
def test_orchestrate_salt_function_return_false_failure(
salt_run_cli, salt_minion, salt_master
):
"""
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
"""
sls_contents = """
deploy_check:
salt.function:
- name: test.false
- tgt: {}
""".format(
salt_minion.id
)
with salt_master.state_tree.base.temp_file("orch/issue30367.sls", sls_contents):
ret = salt_run_cli.run("saltutil.sync_modules")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.issue30367")
assert ret.returncode != 0
# Drill down to the changes dict
data = ret.data["data"][salt_master.id]
state_result = data["salt_|-deploy_check_|-test.false_|-function"]["result"]
func_ret = data["salt_|-deploy_check_|-test.false_|-function"]["changes"]
assert state_result is False
assert func_ret == {"ret": {salt_minion.id: False}}
def test_orchestrate_target_exists(salt_run_cli, salt_minion, salt_master):
"""
test orchestration when target exists while using multiple states
"""
sls_contents = """
core:
salt.state:
- tgt: '{minion_id}*'
- sls:
- core
test-state:
salt.state:
- tgt: '{minion_id}*'
- sls:
- orch.target-test
cmd.run:
salt.function:
- tgt: '{minion_id}*'
- arg:
- echo test
""".format(
minion_id=salt_minion.id
)
target_test_sls = """
always_true:
test.succeed_without_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/target-exists.sls", sls_contents
), salt_master.state_tree.base.temp_file(
"orch/target-test.sls", target_test_sls
), salt_master.state_tree.base.temp_file(
"core.sls", target_test_sls
):
ret = salt_run_cli.run("state.orchestrate", "orch.target-exists")
assert ret.returncode == 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {"core", "test-state", "cmd.run"}
for state_data in data.values():
if state_data["name"] == "core":
to_check.remove("core")
assert state_data["result"] is True
if state_data["name"] == "test-state":
assert state_data["result"] is True
to_check.remove("test-state")
if state_data["name"] == "cmd.run":
assert state_data["changes"] == {
"ret": {salt_minion.id: "test"},
}
to_check.remove("cmd.run")
assert not to_check
def test_orchestrate_target_does_not_exist(salt_run_cli, salt_minion, salt_master):
"""
test orchestration when target does not exist while using multiple states
"""
sls_contents = """
core:
salt.state:
- tgt: 'does-not-exist*'
- sls:
- core
test-state:
salt.state:
- tgt: '{minion_id}*'
- sls:
- orch.target-test
cmd.run:
salt.function:
- tgt: '{minion_id}*'
- arg:
- echo test
""".format(
minion_id=salt_minion.id
)
target_test_sls = """
always_true:
test.succeed_without_changes
"""
with salt_master.state_tree.base.temp_file(
"orch/target-does-not-exist.sls", sls_contents
), salt_master.state_tree.base.temp_file(
"orch/target-test.sls", target_test_sls
), salt_master.state_tree.base.temp_file(
"core.sls", target_test_sls
):
ret = salt_run_cli.run("state.orchestrate", "orch.target-does-not-exist")
assert ret.returncode != 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {"core", "test-state", "cmd.run"}
for state_data in data.values():
if state_data["name"] == "core":
to_check.remove("core")
assert state_data["result"] is False
assert state_data["comment"] == "No minions returned"
if state_data["name"] == "test-state":
assert state_data["result"] is True
to_check.remove("test-state")
if state_data["name"] == "cmd.run":
assert state_data["changes"] == {
"ret": {salt_minion.id: "test"},
}
to_check.remove("cmd.run")
assert not to_check
def test_orchestrate_retcode(salt_run_cli, salt_master):
"""
Test orchestration with nonzero retcode set in __context__
"""
sls_contents = """
test_runner_success:
salt.runner:
- name: runtests_helpers.success
test_runner_failure:
salt.runner:
- name: runtests_helpers.failure
test_wheel_success:
salt.wheel:
- name: runtests_helpers.success
test_wheel_failure:
salt.wheel:
- name: runtests_helpers.failure
"""
with salt_master.state_tree.base.temp_file("orch/retcode.sls", sls_contents):
ret = salt_run_cli.run("saltutil.sync_runners")
assert ret.returncode == 0
ret = salt_run_cli.run("saltutil.sync_wheel")
assert ret.returncode == 0
ret = salt_run_cli.run("state.orchestrate", "orch.retcode")
assert ret.returncode != 0
assert ret.data
data = ret.data["data"][salt_master.id]
to_check = {
"test_runner_success",
"test_runner_failure",
"test_wheel_failure",
"test_wheel_success",
}
for state_data in data.values():
name = state_data["__id__"]
to_check.remove(name)
if name in ("test_runner_success", "test_wheel_success"):
assert state_data["result"] is True
if name in ("test_runner_failure", "test_wheel_failure"):
assert state_data["result"] is False
assert not to_check
def test_orchestrate_batch_with_failhard_error(
salt_run_cli, salt_master, salt_minion, tmp_path
):
"""
test orchestration properly stops with failhard and batch.
"""
testfile = tmp_path / "test-file"
sls_contents = """
call_fail_state:
salt.state:
- tgt: {}
- batch: 1
- failhard: True
- sls: fail
""".format(
salt_minion.id
)
fail_sls = """
{}:
file.managed:
- source: salt://hnlcfsdjhkzkdhynclarkhmcls
""".format(
testfile
)
with salt_master.state_tree.base.temp_file(
"orch/batch.sls", sls_contents
), salt_master.state_tree.base.temp_file("fail.sls", fail_sls):
ret = salt_run_cli.run("state.orchestrate", "orch.batch")
assert ret.returncode != 0
data = ret.data["data"][salt_master.id]
result = data["salt_|-call_fail_state_|-call_fail_state_|-state"]["result"]
changes = data["salt_|-call_fail_state_|-call_fail_state_|-state"]["changes"]
assert result is False
# The execution should stop after first error, so return dict should contain only one minion
assert len(changes["ret"]) == 1
def test_orchestrate_subset(
salt_run_cli,
salt_master,
salt_minion,
salt_sub_minion,
grains,
):
"""
test orchestration state using subset
"""
sls_contents = """
test subset:
salt.state:
- tgt: '*minion*'
- subset: 1
- sls: test
"""
test_sls = """
test state:
test.succeed_without_changes:
- name: test
"""
if os.environ.get("CI_RUN", "0") == "1":
if grains["os"] == "Fedora" and int(grains["osrelease"]) == 35:
# This test is flaky on Fedora 35 - Don't really know why, because,
# of course, this test module passes when running locally on a
# Fedora 35 container.
pytest.skip("Skipping flaky Fedora 35 test for now, on CI runs.")
with salt_master.state_tree.base.temp_file(
"orch/subset.sls", sls_contents
), salt_master.state_tree.base.temp_file("test.sls", test_sls):
ret = salt_run_cli.run("state.orchestrate", "orch.subset")
assert ret.returncode == 0
for state_data in ret.data["data"][salt_master.id].values():
# Should only run in one of the minions
comment = state_data["comment"]
if salt_minion.id in comment:
assert salt_sub_minion.id not in comment
elif salt_sub_minion.id in comment:
assert salt_minion.id not in comment
else:
pytest.fail(
"None of the targeted minions({}) show up in comment: '{}'".format(
", ".join([salt_minion.id, salt_sub_minion.id]), comment
)
)
| 0 | 0 | 0 |
ded002448cfa7e16c5ce5015ad96af4af0573fea | 3,274 | py | Python | modules/aerodyn/ad_BAR_RNAMotion/CreateMotion.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_BAR_RNAMotion/CreateMotion.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_BAR_RNAMotion/CreateMotion.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
def vel_bump(time, A=1, half=False):
"""
velocity bump, position goes from 0 to A between time[0] and time[-1]
half is false: velocity 0 -> max -> 0
half is True: velocity 0 -> max
"""
time-=time[0]
T = np.max(time)
if half:
# instead of going from t=0 to 1, we gofrom t=0 to 0.5
A = 2*A
T = T*2
t = time/T
x = A * t**3 * (6*t**2 - 15*t + 10 )
v = 1/T * A * 30*t**2 *(1-t)**2
a = 1/T**2 * A * 60*t *(2*t**2-3*t+1)
return x, v, a
# --- Rot Motion
tMax = 10
dt = 0.1
T = 2
time = np.arange(0,tMax+dt/2,dt)
Yaw = np.zeros((len(time), 3)) # angle, velocity, acc
Pitch = np.zeros((len(time), 3)) # angle, velocity, acc
Rot = np.zeros((len(time), 3)) # angle, velocity, acc
# --- First period is one rotation of yaw
I = time <= T
Ip= time > T
x,v,a = vel_bump(time[I], 2*np.pi)
Yaw[I,0]+=x
Yaw[I,1]=v
Yaw[I,2]=a
Yaw[Ip,0]+=Yaw[I,0][-1]
# --- Second period we pitch one rotation
I = np.logical_and(time >= T, time<=2*T)
Ip = time>2*T
x,v,a = vel_bump(time[I], 2*np.pi)
Pitch[I,0]+=x
Pitch[I,1]=v
Pitch[I,2]=a
Pitch[Ip,0]+=Pitch[I,0][-1]
# --- Third period we start rotating
I = np.logical_and(time >= 2*T, time<=3*T)
x,v,a = vel_bump(time[I], np.pi/4, half=True)
Rot[I,0]=x
Rot[I,1]=v
Rot[I,2]=a
# --- Constant RPM for the remaining
I=time>3*T
Rot[I,1]=v[-1]
Rot[I,0]=x[-1]+np.cumsum(dt*Rot[I,1])
# --- Fourth period we yaw with some sine motion
I = np.logical_and(time >= 3*T, time<=4*T)
x,v,a = sine(time[I], np.pi/4)
Yaw[I,0]+=x
Yaw[I,1]=v
Yaw[I,2]=a
# --- Fifth period we pitch with some sine motion
I = np.logical_and(time >= 4*T, time<=5*T)
x,v,a = sine(time[I], np.pi/6)
Pitch[I,0]+=x
Pitch[I,1]=v
Pitch[I,2]=a
# ---
data = np.column_stack((time, Rot))
df = pd.DataFrame( data=data, columns=['time_[s]', 'azimuth_[rad]','omega_[rad/s]','rotacc_[rad/s^2]'])
df.to_csv('RotMotion.csv', index=False, sep=',', float_format='%10.6f')
data = np.column_stack((time, Yaw))
df = pd.DataFrame( data=data, columns=['time_[s]', 'yaw_[rad]','yaw_rate_[rad/s]','yaw_acc_[rad/s^2]'])
df.to_csv('YawMotion.csv', index=False, sep=',', float_format='%10.6f')
data = np.column_stack((time, Pitch))
df = pd.DataFrame( data=data, columns=['time_[s]', 'pitch_[rad]','pitch_rate_[rad/s]','pitch_acc_[rad/s^2]'])
df.to_csv('PitchMotion.csv', index=False, sep=',', float_format='%10.6f')
# fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
# fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
# ax.plot(time, data[:,1] , label='x')
# ax.plot(time, data[:,2] , label='v')
# ax.plot(time, np.concatenate(([0],np.diff(data[:,1])/dt)),'--', label='v2')
# ax.plot(time, data[:,3] , label='a')
# ax.plot(time, np.concatenate(([0],np.diff(data[:,2])/dt)),'--', label='a2')
# ax.set_xlabel('')
# ax.set_ylabel('')
# ax.legend()
# plt.show()
# #
| 25.184615 | 110 | 0.576359 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
def vel_bump(time, A=1, half=False):
"""
velocity bump, position goes from 0 to A between time[0] and time[-1]
half is false: velocity 0 -> max -> 0
half is True: velocity 0 -> max
"""
time-=time[0]
T = np.max(time)
if half:
# instead of going from t=0 to 1, we gofrom t=0 to 0.5
A = 2*A
T = T*2
t = time/T
x = A * t**3 * (6*t**2 - 15*t + 10 )
v = 1/T * A * 30*t**2 *(1-t)**2
a = 1/T**2 * A * 60*t *(2*t**2-3*t+1)
return x, v, a
def sine(time, A=1):
time-=time[0]
T = np.max(time)
omega = 2*np.pi/T
t= time/T
x = A*np.sin(omega*t)
v =1/T * omega *A*np.cos(omega*t)
a =1/T**2 * -omega**2*A*np.sin(omega*t)
return x, v, a
# --- Rot Motion
tMax = 10
dt = 0.1
T = 2
time = np.arange(0,tMax+dt/2,dt)
Yaw = np.zeros((len(time), 3)) # angle, velocity, acc
Pitch = np.zeros((len(time), 3)) # angle, velocity, acc
Rot = np.zeros((len(time), 3)) # angle, velocity, acc
# --- First period is one rotation of yaw
I = time <= T
Ip= time > T
x,v,a = vel_bump(time[I], 2*np.pi)
Yaw[I,0]+=x
Yaw[I,1]=v
Yaw[I,2]=a
Yaw[Ip,0]+=Yaw[I,0][-1]
# --- Second period we pitch one rotation
I = np.logical_and(time >= T, time<=2*T)
Ip = time>2*T
x,v,a = vel_bump(time[I], 2*np.pi)
Pitch[I,0]+=x
Pitch[I,1]=v
Pitch[I,2]=a
Pitch[Ip,0]+=Pitch[I,0][-1]
# --- Third period we start rotating
I = np.logical_and(time >= 2*T, time<=3*T)
x,v,a = vel_bump(time[I], np.pi/4, half=True)
Rot[I,0]=x
Rot[I,1]=v
Rot[I,2]=a
# --- Constant RPM for the remaining
I=time>3*T
Rot[I,1]=v[-1]
Rot[I,0]=x[-1]+np.cumsum(dt*Rot[I,1])
# --- Fourth period we yaw with some sine motion
I = np.logical_and(time >= 3*T, time<=4*T)
x,v,a = sine(time[I], np.pi/4)
Yaw[I,0]+=x
Yaw[I,1]=v
Yaw[I,2]=a
# --- Fifth period we pitch with some sine motion
I = np.logical_and(time >= 4*T, time<=5*T)
x,v,a = sine(time[I], np.pi/6)
Pitch[I,0]+=x
Pitch[I,1]=v
Pitch[I,2]=a
# ---
data = np.column_stack((time, Rot))
df = pd.DataFrame( data=data, columns=['time_[s]', 'azimuth_[rad]','omega_[rad/s]','rotacc_[rad/s^2]'])
df.to_csv('RotMotion.csv', index=False, sep=',', float_format='%10.6f')
data = np.column_stack((time, Yaw))
df = pd.DataFrame( data=data, columns=['time_[s]', 'yaw_[rad]','yaw_rate_[rad/s]','yaw_acc_[rad/s^2]'])
df.to_csv('YawMotion.csv', index=False, sep=',', float_format='%10.6f')
data = np.column_stack((time, Pitch))
df = pd.DataFrame( data=data, columns=['time_[s]', 'pitch_[rad]','pitch_rate_[rad/s]','pitch_acc_[rad/s^2]'])
df.to_csv('PitchMotion.csv', index=False, sep=',', float_format='%10.6f')
# fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
# fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
# ax.plot(time, data[:,1] , label='x')
# ax.plot(time, data[:,2] , label='v')
# ax.plot(time, np.concatenate(([0],np.diff(data[:,1])/dt)),'--', label='v2')
# ax.plot(time, data[:,3] , label='a')
# ax.plot(time, np.concatenate(([0],np.diff(data[:,2])/dt)),'--', label='a2')
# ax.set_xlabel('')
# ax.set_ylabel('')
# ax.legend()
# plt.show()
# #
| 228 | 0 | 23 |
476bfdea3b5aa53f26a2194456c20aecdb4920ed | 79 | py | Python | data/datasets/utils/__init__.py | qinwang-ai/Contact-Distil | 5e98389de70e0d9c4d16bd91ca1326689dc220a6 | [
"Apache-2.0"
] | null | null | null | data/datasets/utils/__init__.py | qinwang-ai/Contact-Distil | 5e98389de70e0d9c4d16bd91ca1326689dc220a6 | [
"Apache-2.0"
] | null | null | null | data/datasets/utils/__init__.py | qinwang-ai/Contact-Distil | 5e98389de70e0d9c4d16bd91ca1326689dc220a6 | [
"Apache-2.0"
] | null | null | null | from .batch_converter import BatchConverter
from .data_reader import DataReader | 39.5 | 43 | 0.886076 | from .batch_converter import BatchConverter
from .data_reader import DataReader | 0 | 0 | 0 |
29c5a6df6defacf45ddb0a6ce456af9940b51621 | 3,572 | py | Python | example_scripts/dirLoader.py | KingsPM/pySQVD | e882e4c3a8d57226c124b52404898e92c9a1bb64 | [
"MIT"
] | null | null | null | example_scripts/dirLoader.py | KingsPM/pySQVD | e882e4c3a8d57226c124b52404898e92c9a1bb64 | [
"MIT"
] | null | null | null | example_scripts/dirLoader.py | KingsPM/pySQVD | e882e4c3a8d57226c124b52404898e92c9a1bb64 | [
"MIT"
] | null | null | null | import os
import re
import sys
import time
from pysqvd import SQVD
'''
Simple loading script from directory structure
root/<group>/workflow/panelid+version/sample/BAM+VCF+BEDGRAPH
'''
if __name__ == "__main__":
# grab username and password
user = os.environ.get("SQVDUSER", default="admin")
passwd = os.environ.get("SQVDPASS", default="Kings123")
host = os.environ.get("SQVDHOST", default="localhost:3000/sqvd")
try:
assert user and passwd and host
root = sys.argv[1].rstrip('/')
assert os.path.isdir(root)
except Exception:
print("""
python dirLoader.py <DIRECTORY>
The directory structure must be like GROUP/WORKFLOW/TESTANDVERSION/SAMPLE/files.
eg. genetics/dna_somatic/SWIFT1/ACCRO/*.(vcf.gz|bam|bed|bedgraph)
Ensure SQVDUSER, SQVDPASS, SQVDHOST env variables are set!
""")
else:
# dwell time between directories
dwell = 0
try:
dwell = int(sys.argv[2])
except Exception:
pass
main(host, user, passwd, root, dwell)
| 40.590909 | 92 | 0.510078 | import os
import re
import sys
import time
from pysqvd import SQVD
'''
Simple loading script from directory structure
root/<group>/workflow/panelid+version/sample/BAM+VCF+BEDGRAPH
'''
def main(host, user, passwd, directory, dwell_time):
# configure the API connection
sqvd = SQVD(username=user, password=passwd, host=host)
# automatically logs in and out
with sqvd:
for root, dirs, files in os.walk(directory, topdown=False):
p = root[len(directory):].strip('/').split("/")
if len(p) == 4:
# get files
jsns = list([f for f in files if f.endswith('.json')])
bams = list([f for f in files if f.endswith('.bam')])
vcfs = list([f for f in files if f.endswith('.vcf.gz')])
beds = list([f for f in files if f.endswith('.bed')])
bedg = list([f for f in files if f.endswith('.bedgraph')])
bigw = list([f for f in files if f.endswith('.bw')])
pdfs = list([f for f in files if f.endswith('.pdf')])
upload_files = list([f'{root}/{f}' for f in
jsns + bams + vcfs + beds + bedg + bigw + pdfs])
# get study
group, workflow, panel, sample = p
m = re.match(r'([A-Za-z]+)(\d+)$', panel)
if m and upload_files:
# create study object
panel_name, panel_version = m.groups()
study_name = f'{sample}_{panel}'
study_object = {
'study_name': study_name,
'sample_id': sample,
'panel_id': panel_name,
'panel_version': int(panel_version),
'workflow': workflow,
'subpanels': [],
'group': group,
'dataset_name': ""
}
print(f"## {study_name} ({len(upload_files)} files)")
# create or fetch study (by name)
try:
study = sqvd.createStudy(study_object)
sqvd.upload(upload_files, study_name, {"skip": "processing"})
print(f'Uploaded {len(upload_files)} files for {study_name}')
except:
studies = sqvd.rest('study', data={'study_name': study_name})
study = studies['data'][0]
print(f"Study {study_name} already exists! -> Skipping")
time.sleep(dwell_time)
if __name__ == "__main__":
# grab username and password
user = os.environ.get("SQVDUSER", default="admin")
passwd = os.environ.get("SQVDPASS", default="Kings123")
host = os.environ.get("SQVDHOST", default="localhost:3000/sqvd")
try:
assert user and passwd and host
root = sys.argv[1].rstrip('/')
assert os.path.isdir(root)
except Exception:
print("""
python dirLoader.py <DIRECTORY>
The directory structure must be like GROUP/WORKFLOW/TESTANDVERSION/SAMPLE/files.
eg. genetics/dna_somatic/SWIFT1/ACCRO/*.(vcf.gz|bam|bed|bedgraph)
Ensure SQVDUSER, SQVDPASS, SQVDHOST env variables are set!
""")
else:
# dwell time between directories
dwell = 0
try:
dwell = int(sys.argv[2])
except Exception:
pass
main(host, user, passwd, root, dwell)
| 2,446 | 0 | 23 |
eaa75f603aafdb6e54c92923660e65cf2356b31b | 147 | py | Python | pathfile.py | M4TH1EU/john-the-ia | 35db1430350e2144695baeef17a67819b9724497 | [
"Unlicense"
] | 2 | 2021-05-05T20:49:55.000Z | 2021-05-05T21:03:02.000Z | pathfile.py | M4TH1EU/john-the-ia | 35db1430350e2144695baeef17a67819b9724497 | [
"Unlicense"
] | null | null | null | pathfile.py | M4TH1EU/john-the-ia | 35db1430350e2144695baeef17a67819b9724497 | [
"Unlicense"
] | null | null | null | # THIS FILE DON'T DO ANYTHING EXCEPT GIVE ME THE PROJECT PATH (i'm listening for better ideas)
if __name__ == '__main__':
print("DO NOTHING")
| 29.4 | 94 | 0.714286 | # THIS FILE DON'T DO ANYTHING EXCEPT GIVE ME THE PROJECT PATH (i'm listening for better ideas)
if __name__ == '__main__':
print("DO NOTHING")
| 0 | 0 | 0 |
12cddfd703815a58c0e703ae7e1becdf862baa86 | 553 | py | Python | PythonClient/astrodrone/utils.py | jeremyhardy/AirSim | ddebcf1d9ad97dd93e248bcfd411e9cdb00e783b | [
"MIT"
] | null | null | null | PythonClient/astrodrone/utils.py | jeremyhardy/AirSim | ddebcf1d9ad97dd93e248bcfd411e9cdb00e783b | [
"MIT"
] | null | null | null | PythonClient/astrodrone/utils.py | jeremyhardy/AirSim | ddebcf1d9ad97dd93e248bcfd411e9cdb00e783b | [
"MIT"
] | null | null | null | import airsim
| 26.333333 | 95 | 0.717902 | import airsim
def qnorm(quaternion):
while(quaternion.get_length()!=1):
quaternion = quaternion.sgn()
return quaternion
def quat2vec(quaternion):
vector = airsim.Vector3r(quaternion.x_val, quaternion.y_val, quaternion.z_val)
return vector
def vec2quat(vector):
quaternion = airsim.Quaternionr(vector.x_val, vector.y_val, vector.z_val, 0)
return quaternion
def mess2quat(message):
quaternion = airsim.Quaternionr(message.x_val, message.y_val, message.z_val, message.w_val)
return quaternion
| 431 | 0 | 101 |
4e69aacfba71738ff06579bf4a85e097b5391a41 | 20,553 | py | Python | vespa/interfaces/inline/philips/run_inline_vespa_philips.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | null | null | null | vespa/interfaces/inline/philips/run_inline_vespa_philips.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 4 | 2021-04-17T13:58:31.000Z | 2022-01-20T14:19:57.000Z | vespa/interfaces/inline/philips/run_inline_vespa_philips.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 3 | 2021-06-05T16:34:57.000Z | 2022-01-19T16:13:22.000Z |
# Python modules
import os
import io
import base64
import traceback
import datetime
import pathlib
# 3rd party modules
import matplotlib
matplotlib.use('Agg')
import numpy as np
from pydicom import Dataset, FileDataset, dcmread, read_file
# Our modules
import vespa.interfaces.inline.vespa_inline_engine as vie
import vespa.analysis.figure_layouts as figure_layouts
import vespa.analysis.fileio.util_philips as util_philips
import vespa.common.util.time_ as util_time
import vespa.common.util.misc as util_misc
from vespa.interfaces.inline.vespa_inline_engine import VespaInlineError, VespaInlineSettings
VERSION = '0.1.0'
#==============================================================================
def run(settings, verbose=True):
"""
There are 4 processing steps:
1. collate all files from specified 'datadir', and sort into 'water', 'metab' etc.
2. load file names into VIE dataset_filename and preset_filename dicts
3. run files through the Vespa-Analysis inline engine
3a. (optional) save provenance XML file and/or PNG/PDF images for debugging.
4. output 'screenshot image' gets put into a pydicom secondary capture RGB DICOM
"""
msg = ''
# these are set here for error checking reasons
fdatasets = {'metab':None, 'water':None, 'ecc':None, 'coil':None}
fpresets = {'metab':None, 'water':None, 'ecc':None, 'coil':None}
dcm_cur = ''
try:
settings.vespa_version = util_misc.get_vespa_version()+'-VIE' # not really up to the user
# ---------------------------------------------------------------
# 1. Get filenames from known DATADIR directory and sort
# - may move to separate module in future as formats accrue
if settings.dataformat == 'philips_press28_dicom':
settings.import_class = 'import_philips_dicom'
mrs_files = []
other_files = []
for dirpath, dirnames, filenames in os.walk(settings.data_dir):
for filename in filenames:
ftest = os.path.join(dirpath, filename)
if vie.is_dicom(ftest):
dataset = read_file(ftest, defer_size=1024)
if util_philips.is_mrs_dicom(dataset):
mrs_files.append(ftest)
if verbose: print('Found DICOM MRS file - '+ftest)
else:
other_files.append(ftest)
if (len(mrs_files) != 2):
msg = 'Exception (do_main): Wrong number of DICOM MRS files found in - '+settings.data_dir
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab, fname_water, fname_ecc, fname_coil = None, None, None, None
fname_water = mrs_files[0]
fname_metab = mrs_files[1]
fname_metab_preset, fname_water_preset, fname_ecc_preset, fname_coil_preset = None, None, None, None
fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_dicom_press28_metab.xml')
fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_dicom_press28_water.xml')
fname_mmol_basis = None
dcm_cur = dcmread(fname_metab)
elif settings.dataformat == 'philips_slaser30_cmrr_spar':
settings.import_class = 'import_philips_spar'
mrs_files = []
other_files = []
for dirpath, dirnames, filenames in os.walk(settings.data_dir):
for filename in filenames:
ftest = os.path.join(dirpath, filename)
if pathlib.Path(ftest).suffix in ['spar','sdat','.SPAR','.SDAT']:
mrs_files.append(ftest)
if verbose: print('Found Spar/Sdat MRS file - '+ftest)
else:
other_files.append(ftest)
if len(mrs_files) != 4:
msg = 'Exception (do_main): Wrong number of Spar/Sdat datasets found in - '+settings.data_dir
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab, fname_water, fname_ecc, fname_coil = None, None, None, None
for fname in mrs_files:
if '_act.spar' in fname.lower():
fname_metab = fname
if '_ref.spar' in fname.lower():
fname_water = fname
if fname_metab is None: msg += '\nException (do_main): Metabolite data Spar/Sdat not found in - '+settings.data_dir
if fname_water is None: msg += '\nException (do_main): Water reference Spar/Sdat not found in - '+settings.data_dir
if msg:
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab_preset, fname_water_preset, fname_ecc_preset, fname_coil_preset = None, None, None, None
fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_berrington_spar_metab01.xml')
fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_berrington_spar_water01.xml')
# fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_slaser30_cmrr_spar_metab.xml')
# fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_slaser30_cmrr_spar_water.xml')
fname_mmol_basis = os.path.join(settings.preset_dir,'basis_mmol_simulated_from_seadMM2014_philips_128mhz_dataset.xml')
dcm_cur = ''
# ----------------------------------------------------------
# 2. load filenames into parameter dicts
fdatasets['metab'] = fname_metab
fdatasets['water'] = fname_water
fdatasets['ecc'] = fname_ecc
fdatasets['coil'] = fname_coil
fpresets['metab'] = fname_metab_preset
fpresets['water'] = fname_water_preset
fpresets['ecc'] = fname_ecc_preset
fpresets['coil'] = fname_coil_preset
fbasis_mmol = fname_mmol_basis # None
# ----------------------------------------------------------
# 3. Run the processing
params = [fdatasets, fpresets, fbasis_mmol, settings]
png_buf, pdf_buf, _ = vie.analysis_kernel( params, verbose=verbose )
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
except VespaInlineError as e:
if verbose:
print('Exception: VespaInlineError - see error report ', str(e))
trace = '' # returned in the VespaInlineError msg
png_buf = do_error_processing(e, fdatasets, fpresets, trace, settings)
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
except Exception as e:
if verbose:
print('Exception: GeneralException - see error report')
trace = traceback.format_exc()
png_buf = do_error_processing(e, fdatasets, fpresets, trace, settings)
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
# ----------------------------------------------------------
# 4. dump dcm_buf to a DICOM RGB file ....
if settings.save_dcm:
dcm_out = rgb2dcm(png_buf, dcm_cur, buf_shape)
dcm_out.save_as(settings.dcm_fname)
if settings.save_dcm_pdf:
dcm_pdf_out = pdf2dcm(pdf_buf, dcm_cur)
dcm_pdf_out.save_as(settings.dcm_pdf_fname)
if verbose: print('fname_dicom = ' + settings.dcm_fname)
return
SSC_TEMPLATE = b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABESUNNAgAAAFVMBACSAAAAAgABAE9CAAACAAAAAAECAAIAVUkaADEuMi44NDAuMTAwMDguNS4xLjQuMS4xLjcAAgADAFVJAAACABAAVUkUADEuMi44NDAuMTAwMDguMS4yLjEAAgASAFVJHgAxLjMuNDYuNjcwNTg5LjExLjAuMC41MS40LjU2LjECABMAU0gQAFBoaWxpcHMgTVIgNTYuMSAIAAUAQ1MKAElTT19JUiAxMDAIAAgAQ1MAAAgAEgBEQQAACAATAFRNAAAIABQAVUkWADEuMy40Ni42NzA1ODkuMTEuODkuNQAIABYAVUkaADEuMi44NDAuMTAwMDguNS4xLjQuMS4xLjcACAAYAFVJAAAIACAAREEIADIwMTcwMTAxCAAhAERBAAAIACIAREEAAAgAIwBEQQAACAAwAFRNBgAxMTIyMzQIADEAVE0AAAgAMgBUTQAACAAzAFRNAAAIAFAAU0gAAAgAYABDUwIATVIIAGQAQ1MEAFdTRCAIAHAATE8AAAgAgABMTwAACACBAFNUAAAIAJAAUE4AAAgAEBBTSAAACAAwEExPAAAIADIQU1EAAP/////+/93gAAAAAAgAPhBMTwAACABAEExPAAAIAFAQUE4AAAgAcBBQTgAACACAEExPAAAIAJAQTE8IAEluZ2VuaWEgCAAQEVNRAAD//////v8A4P////8IAFARVUkYADEuMi44NDAuMTAwMDguMy4xLjIuMy4xAAgAVRFVSTwAMS4zLjQ2LjY3MDU4OS4xMS4xMDUxNjgwOTcxLjQxNTQzOTY3NjcuMjUzMjI2MzUyOC4yOTkyNjM0MDM1/v8N4AAAAAD+/93gAAAAAAgAERFTUQAA//////7/AOD/////CAAFAENTCgBJU09fSVIgMTAwCAASAERBCAAyMDIwMDQwMQgAEwBUTQoAMDcxMzIyLjM0NggAFABVSTwAMS4zLjQ2LjY3MDU4OS4xMS43NDE3OTIyNjEuMzY5NTY2OTE5Mi40MTg2NjIxNTEzLjQwODg2Njg5NDkACABQEVVJGAAxLjIuODQwLjEwMDA4LjMuMS4yLjMuMwAIAFURVUk6ADEuMy40Ni42NzA1ODkuMTEuMjUyNjE3MjA3OS41NDUzMTgxMjEuMTUxMjgwNjcwMy4yOTEyNjY5MTIgABMASVMCADAgBSAUAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDA1IAUgBBRTUwIAAQAFIAYUU1MCAAEA/v8N4AAAAAD+/93gAAAAABAAEABQTgAAEAAgAExPAAAQADAAREEAABAAQABDUwAAEAAQEEFTBAAwMzdZEAAwEERTAAAQAAAgTE8AABAAECFMTwAAEABgIVNIAAAQAIAhU0gAABAAsCFMVAAAEADAIVVTAgAAABAAAEBMVAAAGAAQAExPAAAYABUAQ1MAABgAABBMTwYAMDAwNzEgGAASEERBAAAYABQQVE0AABgAFhBMTwgAUGhpbGlwcyAYABgQTE8IAEluZ2VuaWEgGAAZEExPBgA1LjYuMSAYACAQTE8GADUuNi4xIBgAIxBMTwAAGAAwEExPAAAgAA0AVUkAACAADgBVSQAAIAAQAFNIAAAgABEASVMAACAAEgBJUwAAIAATAElTAgAtMSAAIABDUwAAIABgAENTAAAgAABATFQAACgAAgBVUwIAAwAoAAQAQ1MEAFJHQiAoAAYAVVMCAAAAKAAQAFVTAgAVBigAEQBVUwIAxAgoAAABVVMCAAgAKAABAVVTAgAIACgAAgFVUwIABwAoAAMBVVMCAAAAKAAQIUNTAgAwMDIAMhBQTgAAMgAzEExPAAAyAGAQTE8AADIAcBBMTwAAMgAAQExUAAA4AFAATE8AADgAAAVMTwAAQAAGAFBOAABAAEECQUUOAFJBRFJFU0VBUkNIM1QgQABCAlNIAABAAEMCU0gAAEAARAJEQQgAMjAxNzAxMDFAAEUCVE0GADExMjIzNEAAUAJEQQgAMjAxNzAxMDFAAFECVE0GADExMjIzNEAAUgJDUwAAQABTAlNICgA1MzcyOTQxNTMgQABUAkxPAABAAFUCTE8AAEAAYAJTUQAA//////7/AOD/////CAAAAVNICgBVTkRFRklORUQgCAACAVNICgBVTkRFRklORUQgCAAEAUxPBgB4eHh4eCAIAAsBQ1MCAE4g/v8N4AAAAAD+/93gAAAAAEAAgAJTVAAAQAABEFNIAABAAAIQTE8AAEAAAxBTSAAAQAAEEExPAABAAAUQTE8AAEAAABRMVAAAQAABIExPAABAAAQgREEIADIwMTcwMTAxQAAFIFRNCgAxMTIyMzMuOTUxQAAJIFNIAABAABAgU0gAAEAAACRMVAAAASAQAExPFgBQaGlsaXBzIEltYWdpbmcgREQgMDAxASAdEElTAgAyIAEgThBDUwAAASBhEENTAgBOIAEgYhBDUwIATiABIGMQQ1MKAEVMU0VXSEVSRSABIHcQQ1MAAAEgehBGTAQAAAAAAAEgexBJUwIAOCABIMgQTE8IAEdvQnJhaW4gASDMEFNUAAAFIBAATE8aAFBoaWxpcHMgTVIgSW1hZ2luZyBERCAwMDEgBSARAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDAyIAUgEgBMTxoAUGhpbGlwcyBNUiBJbWFnaW5nIEREIDAwMyAFIBMATE8aAFBoaWxpcHMgTVIgSW1hZ2luZyBERCAwMDQgBSAUAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDA1IAUgFQBMTxoAUGhpbGlwcyBNUiBJbWFnaW5nIEREIDAwNiAFIDcQQ1MCAE4gBSBfEENTCABVTktOT1dOIAUgYBBJUwIALTEFIJkRVUwEAAAAAAAFIAASVUwEAAEAAAAFIAESVUwEAAAAAAAFIBMSVUwEAAEAAAAFIEUSU1MCAAEABSBJElNTAgAAAAUgURJTUwIAAAAFIFISU1MCAAAABSBTElNTAgAAAAUgVhJTUwIAAQAFIIITVUwEAAAAAAAFIJETUE4AAAUglxNMTwAABSABFFVMBAABAAAABSADFFVMBAAAAAAABSAEFFNTAgABAAUgBhRTUwIAAQAFIA8UU1EAAP/////+/wDg//////7/DeAAAAAA/v/d4AAAAAAFICoUQ1MIAElOSVRJQUwgBSArFENTCABJTklUSUFMIAUgLBRDUwgASU5JVElBTCAFIC0UQ1MKAENPTVBMRVRFRCAFIDoUTFQaAGRhdGFkZWZzICRSZXZpc2lvbjogNTYuMCAkBSB0FURTAgAwIAUgdRVEUwIAMCAFIHYVTFQAAAUgeBVDUwQATk9ORQUggRVDUwoARklSU1RMRVZFTAUgghVJUwIAMCAFIIMVTFQAAAUghRVEUwIAMCAFIIYVTFQIAEdhdXNzL2NtBSCHFURTAgAwIOB/EABPVwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='
if __name__ == '__main__':
fname_tstamp = util_time.filename_timestamp() # yyyymmdd.hhmmss.usecs
# get defaults
settings = VespaInlineSettings()
# reset relative to 'this' filename, not to vespa_inline_engine location
settings.base_path = os.path.dirname(os.path.abspath(__file__))
settings.data_dir = os.path.join(settings.base_path, 'datadir')
settings.preset_dir = os.path.join(settings.base_path, 'presets')
settings.output_dir = os.path.join(settings.base_path, 'output')
settings.debug_dir = os.path.join(settings.base_path, 'debug')
settings.dataformat = 'philips_press28_dicom'
#settings.dataformat = 'philips_slaser30_cmrr_spar'
settings.save_err = True
settings.save_xml = True
settings.save_pdf = True
settings.save_png = True
settings.save_dcm = True
settings.save_dcm_pdf = True
settings.err_fname_unique = True
settings.xml_fname_unique = True
settings.pdf_fname_unique = True
settings.png_fname_unique = True
settings.dcm_fname_unique = True
settings.dcm_pdf_fname_unique = True
settings.err_fname = os.path.join(settings.debug_dir, "debug_vespa_viff.png")
settings.xml_fname = os.path.join(settings.debug_dir, "debug_xml_last_run.xml")
settings.pdf_fname = os.path.join(settings.debug_dir, "debug_pdf_philips.pdf")
settings.png_fname = os.path.join(settings.output_dir, "results_vespa_inline_philips.png")
settings.dcm_fname = os.path.join(settings.output_dir, "results_vespa_inline_dicom.dcm")
settings.dcm_pdf_fname = os.path.join(settings.output_dir, "results_vespa_inline_dicom_pdf.dcm")
settings.pdf_plotstyle = 'lcm_multi'
settings.pdf_file_label = 'Analysis- Philips PRIDE Inline'
settings.pdf_minppm = 0.5
settings.pdf_maxppm = 4.2
settings.pdf_apply_phase = False
settings.pdf_remove_base = False
settings.pdf_fontname = 'Courier New'
settings.pdf_dpi = 300
settings.pdf_pad_inches = 0.5
settings.png_plotstyle = 'lcm_square'
settings.png_file_label = 'Analysis- Philips PRIDE Inline'
settings.png_minppm = 0.5
settings.png_maxppm = 4.2
settings.png_apply_phase = False
settings.png_remove_base = False
settings.png_fontname = 'Courier New'
settings.png_dpi = 100
settings.png_pad_inches = 0.5
settings.err_dpi = 100
settings.err_pad_inches = 0.5
settings.debug = False
run(settings)
| 54.373016 | 3,758 | 0.67776 |
# Python modules
import os
import io
import base64
import traceback
import datetime
import pathlib
# 3rd party modules
import matplotlib
matplotlib.use('Agg')
import numpy as np
from pydicom import Dataset, FileDataset, dcmread, read_file
# Our modules
import vespa.interfaces.inline.vespa_inline_engine as vie
import vespa.analysis.figure_layouts as figure_layouts
import vespa.analysis.fileio.util_philips as util_philips
import vespa.common.util.time_ as util_time
import vespa.common.util.misc as util_misc
from vespa.interfaces.inline.vespa_inline_engine import VespaInlineError, VespaInlineSettings
VERSION = '0.1.0'
#==============================================================================
def run(settings, verbose=True):
"""
There are 4 processing steps:
1. collate all files from specified 'datadir', and sort into 'water', 'metab' etc.
2. load file names into VIE dataset_filename and preset_filename dicts
3. run files through the Vespa-Analysis inline engine
3a. (optional) save provenance XML file and/or PNG/PDF images for debugging.
4. output 'screenshot image' gets put into a pydicom secondary capture RGB DICOM
"""
msg = ''
# these are set here for error checking reasons
fdatasets = {'metab':None, 'water':None, 'ecc':None, 'coil':None}
fpresets = {'metab':None, 'water':None, 'ecc':None, 'coil':None}
dcm_cur = ''
try:
settings.vespa_version = util_misc.get_vespa_version()+'-VIE' # not really up to the user
# ---------------------------------------------------------------
# 1. Get filenames from known DATADIR directory and sort
# - may move to separate module in future as formats accrue
if settings.dataformat == 'philips_press28_dicom':
settings.import_class = 'import_philips_dicom'
mrs_files = []
other_files = []
for dirpath, dirnames, filenames in os.walk(settings.data_dir):
for filename in filenames:
ftest = os.path.join(dirpath, filename)
if vie.is_dicom(ftest):
dataset = read_file(ftest, defer_size=1024)
if util_philips.is_mrs_dicom(dataset):
mrs_files.append(ftest)
if verbose: print('Found DICOM MRS file - '+ftest)
else:
other_files.append(ftest)
if (len(mrs_files) != 2):
msg = 'Exception (do_main): Wrong number of DICOM MRS files found in - '+settings.data_dir
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab, fname_water, fname_ecc, fname_coil = None, None, None, None
fname_water = mrs_files[0]
fname_metab = mrs_files[1]
fname_metab_preset, fname_water_preset, fname_ecc_preset, fname_coil_preset = None, None, None, None
fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_dicom_press28_metab.xml')
fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_dicom_press28_water.xml')
fname_mmol_basis = None
dcm_cur = dcmread(fname_metab)
elif settings.dataformat == 'philips_slaser30_cmrr_spar':
settings.import_class = 'import_philips_spar'
mrs_files = []
other_files = []
for dirpath, dirnames, filenames in os.walk(settings.data_dir):
for filename in filenames:
ftest = os.path.join(dirpath, filename)
if pathlib.Path(ftest).suffix in ['spar','sdat','.SPAR','.SDAT']:
mrs_files.append(ftest)
if verbose: print('Found Spar/Sdat MRS file - '+ftest)
else:
other_files.append(ftest)
if len(mrs_files) != 4:
msg = 'Exception (do_main): Wrong number of Spar/Sdat datasets found in - '+settings.data_dir
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab, fname_water, fname_ecc, fname_coil = None, None, None, None
for fname in mrs_files:
if '_act.spar' in fname.lower():
fname_metab = fname
if '_ref.spar' in fname.lower():
fname_water = fname
if fname_metab is None: msg += '\nException (do_main): Metabolite data Spar/Sdat not found in - '+settings.data_dir
if fname_water is None: msg += '\nException (do_main): Water reference Spar/Sdat not found in - '+settings.data_dir
if msg:
if verbose: print(msg)
raise VespaInlineError(msg)
fname_metab_preset, fname_water_preset, fname_ecc_preset, fname_coil_preset = None, None, None, None
fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_berrington_spar_metab01.xml')
fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_berrington_spar_water01.xml')
# fname_metab_preset = os.path.join(settings.preset_dir,'preset_philips_slaser30_cmrr_spar_metab.xml')
# fname_water_preset = os.path.join(settings.preset_dir,'preset_philips_slaser30_cmrr_spar_water.xml')
fname_mmol_basis = os.path.join(settings.preset_dir,'basis_mmol_simulated_from_seadMM2014_philips_128mhz_dataset.xml')
dcm_cur = ''
# ----------------------------------------------------------
# 2. load filenames into parameter dicts
fdatasets['metab'] = fname_metab
fdatasets['water'] = fname_water
fdatasets['ecc'] = fname_ecc
fdatasets['coil'] = fname_coil
fpresets['metab'] = fname_metab_preset
fpresets['water'] = fname_water_preset
fpresets['ecc'] = fname_ecc_preset
fpresets['coil'] = fname_coil_preset
fbasis_mmol = fname_mmol_basis # None
# ----------------------------------------------------------
# 3. Run the processing
params = [fdatasets, fpresets, fbasis_mmol, settings]
png_buf, pdf_buf, _ = vie.analysis_kernel( params, verbose=verbose )
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
except VespaInlineError as e:
if verbose:
print('Exception: VespaInlineError - see error report ', str(e))
trace = '' # returned in the VespaInlineError msg
png_buf = do_error_processing(e, fdatasets, fpresets, trace, settings)
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
except Exception as e:
if verbose:
print('Exception: GeneralException - see error report')
trace = traceback.format_exc()
png_buf = do_error_processing(e, fdatasets, fpresets, trace, settings)
buf_shape = 10.24*settings.png_dpi, 10.24*settings.png_dpi, 3
# ----------------------------------------------------------
# 4. dump dcm_buf to a DICOM RGB file ....
if settings.save_dcm:
dcm_out = rgb2dcm(png_buf, dcm_cur, buf_shape)
dcm_out.save_as(settings.dcm_fname)
if settings.save_dcm_pdf:
dcm_pdf_out = pdf2dcm(pdf_buf, dcm_cur)
dcm_pdf_out.save_as(settings.dcm_pdf_fname)
if verbose: print('fname_dicom = ' + settings.dcm_fname)
return
def do_error_processing(e, fdatasets, fpresets, trace, settings):
fig = figure_layouts.inline_error(e, fdatasets, fpresets, trace,
fontname='Courier New',
dpi=settings.err_dpi)
dcm_buf = fig[0].canvas.tostring_rgb()
dcm_buf = np.frombuffer(dcm_buf, dtype=np.uint8)
if settings.save_err:
fname, _ = os.path.splitext(settings.err_fname)
if settings.err_fname_unique:
fname += util_time.filename_timestamp() # yyyymmdd.hhmmss.usecs
fig[0].savefig(fname + '.png',
dpi=settings.err_dpi,
pad_inches=settings.err_pad_inches)
return dcm_buf
SSC_TEMPLATE = b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABESUNNAgAAAFVMBACSAAAAAgABAE9CAAACAAAAAAECAAIAVUkaADEuMi44NDAuMTAwMDguNS4xLjQuMS4xLjcAAgADAFVJAAACABAAVUkUADEuMi44NDAuMTAwMDguMS4yLjEAAgASAFVJHgAxLjMuNDYuNjcwNTg5LjExLjAuMC41MS40LjU2LjECABMAU0gQAFBoaWxpcHMgTVIgNTYuMSAIAAUAQ1MKAElTT19JUiAxMDAIAAgAQ1MAAAgAEgBEQQAACAATAFRNAAAIABQAVUkWADEuMy40Ni42NzA1ODkuMTEuODkuNQAIABYAVUkaADEuMi44NDAuMTAwMDguNS4xLjQuMS4xLjcACAAYAFVJAAAIACAAREEIADIwMTcwMTAxCAAhAERBAAAIACIAREEAAAgAIwBEQQAACAAwAFRNBgAxMTIyMzQIADEAVE0AAAgAMgBUTQAACAAzAFRNAAAIAFAAU0gAAAgAYABDUwIATVIIAGQAQ1MEAFdTRCAIAHAATE8AAAgAgABMTwAACACBAFNUAAAIAJAAUE4AAAgAEBBTSAAACAAwEExPAAAIADIQU1EAAP/////+/93gAAAAAAgAPhBMTwAACABAEExPAAAIAFAQUE4AAAgAcBBQTgAACACAEExPAAAIAJAQTE8IAEluZ2VuaWEgCAAQEVNRAAD//////v8A4P////8IAFARVUkYADEuMi44NDAuMTAwMDguMy4xLjIuMy4xAAgAVRFVSTwAMS4zLjQ2LjY3MDU4OS4xMS4xMDUxNjgwOTcxLjQxNTQzOTY3NjcuMjUzMjI2MzUyOC4yOTkyNjM0MDM1/v8N4AAAAAD+/93gAAAAAAgAERFTUQAA//////7/AOD/////CAAFAENTCgBJU09fSVIgMTAwCAASAERBCAAyMDIwMDQwMQgAEwBUTQoAMDcxMzIyLjM0NggAFABVSTwAMS4zLjQ2LjY3MDU4OS4xMS43NDE3OTIyNjEuMzY5NTY2OTE5Mi40MTg2NjIxNTEzLjQwODg2Njg5NDkACABQEVVJGAAxLjIuODQwLjEwMDA4LjMuMS4yLjMuMwAIAFURVUk6ADEuMy40Ni42NzA1ODkuMTEuMjUyNjE3MjA3OS41NDUzMTgxMjEuMTUxMjgwNjcwMy4yOTEyNjY5MTIgABMASVMCADAgBSAUAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDA1IAUgBBRTUwIAAQAFIAYUU1MCAAEA/v8N4AAAAAD+/93gAAAAABAAEABQTgAAEAAgAExPAAAQADAAREEAABAAQABDUwAAEAAQEEFTBAAwMzdZEAAwEERTAAAQAAAgTE8AABAAECFMTwAAEABgIVNIAAAQAIAhU0gAABAAsCFMVAAAEADAIVVTAgAAABAAAEBMVAAAGAAQAExPAAAYABUAQ1MAABgAABBMTwYAMDAwNzEgGAASEERBAAAYABQQVE0AABgAFhBMTwgAUGhpbGlwcyAYABgQTE8IAEluZ2VuaWEgGAAZEExPBgA1LjYuMSAYACAQTE8GADUuNi4xIBgAIxBMTwAAGAAwEExPAAAgAA0AVUkAACAADgBVSQAAIAAQAFNIAAAgABEASVMAACAAEgBJUwAAIAATAElTAgAtMSAAIABDUwAAIABgAENTAAAgAABATFQAACgAAgBVUwIAAwAoAAQAQ1MEAFJHQiAoAAYAVVMCAAAAKAAQAFVTAgAVBigAEQBVUwIAxAgoAAABVVMCAAgAKAABAVVTAgAIACgAAgFVUwIABwAoAAMBVVMCAAAAKAAQIUNTAgAwMDIAMhBQTgAAMgAzEExPAAAyAGAQTE8AADIAcBBMTwAAMgAAQExUAAA4AFAATE8AADgAAAVMTwAAQAAGAFBOAABAAEECQUUOAFJBRFJFU0VBUkNIM1QgQABCAlNIAABAAEMCU0gAAEAARAJEQQgAMjAxNzAxMDFAAEUCVE0GADExMjIzNEAAUAJEQQgAMjAxNzAxMDFAAFECVE0GADExMjIzNEAAUgJDUwAAQABTAlNICgA1MzcyOTQxNTMgQABUAkxPAABAAFUCTE8AAEAAYAJTUQAA//////7/AOD/////CAAAAVNICgBVTkRFRklORUQgCAACAVNICgBVTkRFRklORUQgCAAEAUxPBgB4eHh4eCAIAAsBQ1MCAE4g/v8N4AAAAAD+/93gAAAAAEAAgAJTVAAAQAABEFNIAABAAAIQTE8AAEAAAxBTSAAAQAAEEExPAABAAAUQTE8AAEAAABRMVAAAQAABIExPAABAAAQgREEIADIwMTcwMTAxQAAFIFRNCgAxMTIyMzMuOTUxQAAJIFNIAABAABAgU0gAAEAAACRMVAAAASAQAExPFgBQaGlsaXBzIEltYWdpbmcgREQgMDAxASAdEElTAgAyIAEgThBDUwAAASBhEENTAgBOIAEgYhBDUwIATiABIGMQQ1MKAEVMU0VXSEVSRSABIHcQQ1MAAAEgehBGTAQAAAAAAAEgexBJUwIAOCABIMgQTE8IAEdvQnJhaW4gASDMEFNUAAAFIBAATE8aAFBoaWxpcHMgTVIgSW1hZ2luZyBERCAwMDEgBSARAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDAyIAUgEgBMTxoAUGhpbGlwcyBNUiBJbWFnaW5nIEREIDAwMyAFIBMATE8aAFBoaWxpcHMgTVIgSW1hZ2luZyBERCAwMDQgBSAUAExPGgBQaGlsaXBzIE1SIEltYWdpbmcgREQgMDA1IAUgFQBMTxoAUGhpbGlwcyBNUiBJbWFnaW5nIEREIDAwNiAFIDcQQ1MCAE4gBSBfEENTCABVTktOT1dOIAUgYBBJUwIALTEFIJkRVUwEAAAAAAAFIAASVUwEAAEAAAAFIAESVUwEAAAAAAAFIBMSVUwEAAEAAAAFIEUSU1MCAAEABSBJElNTAgAAAAUgURJTUwIAAAAFIFISU1MCAAAABSBTElNTAgAAAAUgVhJTUwIAAQAFIIITVUwEAAAAAAAFIJETUE4AAAUglxNMTwAABSABFFVMBAABAAAABSADFFVMBAAAAAAABSAEFFNTAgABAAUgBhRTUwIAAQAFIA8UU1EAAP/////+/wDg//////7/DeAAAAAA/v/d4AAAAAAFICoUQ1MIAElOSVRJQUwgBSArFENTCABJTklUSUFMIAUgLBRDUwgASU5JVElBTCAFIC0UQ1MKAENPTVBMRVRFRCAFIDoUTFQaAGRhdGFkZWZzICRSZXZpc2lvbjogNTYuMCAkBSB0FURTAgAwIAUgdRVEUwIAMCAFIHYVTFQAAAUgeBVDUwQATk9ORQUggRVDUwoARklSU1RMRVZFTAUgghVJUwIAMCAFIIMVTFQAAAUghRVEUwIAMCAFIIYVTFQIAEdhdXNzL2NtBSCHFURTAgAwIOB/EABPVwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='
def rgb2dcm(png_buf, dcm_cur, buf_shape):
dcm_ssc = dcmread(io.BytesIO(base64.b64decode(SSC_TEMPLATE)))
if not isinstance(png_buf, np.ndarray):
raise VespaInlineError('rgb2dcm(): png_buf was not a Numpy array, returning!')
png_buf = png_buf.tobytes()
if dcm_cur == '':
dcm_cur = dcmread(io.BytesIO(base64.b64decode(SSC_TEMPLATE))) # just a dummy default
dt = datetime.datetime.now() # Create time format strings and ids
date_str = dt.strftime('%Y%m%d')
time_str = dt.strftime('%H%M%S.%f')[:-3] # long format with milliseconds
unique_ssc_str = '1.3.46.670589.11.71.5.0.10236.' + date_str + time_str.replace('.', '')
dcm_ssc.file_meta.MediaStorageSOPInstanceUID = unique_ssc_str # (0002,0003)
dcm_ssc.InstanceCreationDate = dt.strftime('%Y%m%d') # (0008,0012)
dcm_ssc.InstanceCreationTime = time_str # (0008,0013)
dcm_ssc.SOPInstanceUID = dcm_ssc.file_meta.MediaStorageSOPInstanceUID # (0008,0018)
dcm_ssc.SeriesDate = date_str # (0008,0021)
dcm_ssc.SeriesTime = time_str # (0008,0031)
dcm_ssc.AcquisitionDate = date_str # (0008,0022)
dcm_ssc.AcquisitionTime = time_str # (0008,0032)
dcm_ssc.ContentDate = date_str # (0008,0023)
dcm_ssc.ContentTime = time_str # (0008,0033)
dcm_ssc.Manufacturer = dcm_cur.Manufacturer # (0008,0070)
dcm_ssc.SeriesDescription = "xReport_" # (0008,103E)
dcm_ssc.PatientName = dcm_cur.PatientName # (0010,0010)
dcm_ssc.PatientID = dcm_cur.PatientID # (0010,0020)
dcm_ssc.PatientBirthDate = dcm_cur.PatientBirthDate # (0010,0030)
dcm_ssc.PatientSex = dcm_cur.PatientSex # (0010,0030)
dcm_ssc.PatientWeight = dcm_cur.PatientWeight # (0010,1030)
dcm_ssc.DateOfSecondaryCapture = date_str # (0018,1012)
dcm_ssc.TimeOfSecondaryCapture = time_str # (0018,1014)
dcm_ssc.ProtocolName = dcm_cur.ProtocolName + time_str # (0018,1030)
dcm_ssc.StudyID = dcm_cur.StudyID # do not change # (0020,0010)
dcm_ssc.StudyInstanceUID = dcm_cur.StudyInstanceUID # do not change # (0020,000D)
new_id = unique_ssc_str + datetime.datetime.now().strftime('%Y%m%d') + datetime.datetime.now().strftime('%H%M%S%f')
dcm_ssc.SeriesInstanceUID = new_id # (0020,0010)
dcm_ssc.SeriesNumber = ''
dcm_ssc.AcquisitionNumber = '' # (0020,0012)
# dcm_ssc.AcquisitionNumber = getattr(dcm_cur,'AcquisitionNumber',1) # (0020,0012)
dcm_ssc.SamplesPerPixel = int(3) # (0028,0002)
dcm_ssc.PhotometricInterpretation = 'RGB' # (0028,0004)
dcm_ssc.Rows = buf_shape[0] # (0028,0010)
dcm_ssc.Columns = buf_shape[1] # (0028,0011)
dcm_ssc.PlanarConfiguration = 0 # np shape=[H,W,Ch] # (0028,0006)
# dcm_ssc.SecondaryCaptureDeviceManufacturer = 'Philips' # (0018,1016)
# dcm_ssc.SecondaryCaptureDeviceManufacturerModelName = 'ISD v3 CNode' # (0018,1018)
# dcm_ssc.SecondaryCaptureDeviceSoftwareVersions = '5.6.1' # (0018,1019)
dcm_ssc.PixelData = png_buf
return dcm_ssc
def pdf2dcm(pdf_buf, dcm_cur):
if not isinstance(pdf_buf, io.BytesIO):
raise VespaInlineError('pdf2dcm(): pdf_buf was not a byte array, returning!')
if dcm_cur == '':
dcm_cur = dcmread(io.BytesIO(base64.b64decode(SSC_TEMPLATE))) # just a dummy default
dt = datetime.datetime.now() # Create time format strings and ids
date_str = dt.strftime('%Y%m%d')
time_str = dt.strftime('%H%M%S.%f')[:-3] # long format with milliseconds
unique_ssc_str = '1.3.46.670589.11.71.5.0.10236.' + date_str + time_str.replace('.', '')
meta = Dataset()
meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.104.1'
meta.MediaStorageSOPInstanceUID = '2.16.840.1.114430.287196081618142314176776725491661159509.60.1'
meta.ImplementationClassUID = '1.3.46.670589.50.1.8.0'
ds = FileDataset(None, {}, file_meta=meta, preamble=b"\0" * 128)
ds.is_little_endian = True
ds.is_implicit_VR = True
ds.ContentDate = dt.strftime('%Y%m%d')
ds.ContentTime = dt.strftime('%H%M%S.%f')
ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.104.1'
ds.MIMETypeOfEncapsulatedDocument = 'application/pdf'
new_id = unique_ssc_str + datetime.datetime.now().strftime('%Y%m%d') + datetime.datetime.now().strftime('%H%M%S%f')
ds.Modality = dcm_cur.Modality
ds.PatientName = dcm_cur.PatientName
ds.PatientID = dcm_cur.PatientID
ds.PatientSex = dcm_cur.PatientSex
ds.PatientWeight = dcm_cur.PatientWeight
ds.PatientBirthDate = dcm_cur.PatientBirthDate
ds.StudyInstanceUID = dcm_cur.StudyInstanceUID # do not change
ds.SeriesInstanceUID = new_id
ds.SOPInstanceUID = ds.file_meta.MediaStorageSOPInstanceUID
ds.MIMETypeOfEncapsulatedDocument = 'application/pdf'
ds.Manufacturer = dcm_cur.Manufacturer
ds.SeriesDescription = "xReport_encap_pdf"
ds.StudyID = dcm_cur.StudyID # do not change
ds.SeriesNumber = ''
ds.AcquisitionNumber = ''
ds.EncapsulatedDocument = pdf_buf.getvalue()
return ds
if __name__ == '__main__':
fname_tstamp = util_time.filename_timestamp() # yyyymmdd.hhmmss.usecs
# get defaults
settings = VespaInlineSettings()
# reset relative to 'this' filename, not to vespa_inline_engine location
settings.base_path = os.path.dirname(os.path.abspath(__file__))
settings.data_dir = os.path.join(settings.base_path, 'datadir')
settings.preset_dir = os.path.join(settings.base_path, 'presets')
settings.output_dir = os.path.join(settings.base_path, 'output')
settings.debug_dir = os.path.join(settings.base_path, 'debug')
settings.dataformat = 'philips_press28_dicom'
#settings.dataformat = 'philips_slaser30_cmrr_spar'
settings.save_err = True
settings.save_xml = True
settings.save_pdf = True
settings.save_png = True
settings.save_dcm = True
settings.save_dcm_pdf = True
settings.err_fname_unique = True
settings.xml_fname_unique = True
settings.pdf_fname_unique = True
settings.png_fname_unique = True
settings.dcm_fname_unique = True
settings.dcm_pdf_fname_unique = True
settings.err_fname = os.path.join(settings.debug_dir, "debug_vespa_viff.png")
settings.xml_fname = os.path.join(settings.debug_dir, "debug_xml_last_run.xml")
settings.pdf_fname = os.path.join(settings.debug_dir, "debug_pdf_philips.pdf")
settings.png_fname = os.path.join(settings.output_dir, "results_vespa_inline_philips.png")
settings.dcm_fname = os.path.join(settings.output_dir, "results_vespa_inline_dicom.dcm")
settings.dcm_pdf_fname = os.path.join(settings.output_dir, "results_vespa_inline_dicom_pdf.dcm")
settings.pdf_plotstyle = 'lcm_multi'
settings.pdf_file_label = 'Analysis- Philips PRIDE Inline'
settings.pdf_minppm = 0.5
settings.pdf_maxppm = 4.2
settings.pdf_apply_phase = False
settings.pdf_remove_base = False
settings.pdf_fontname = 'Courier New'
settings.pdf_dpi = 300
settings.pdf_pad_inches = 0.5
settings.png_plotstyle = 'lcm_square'
settings.png_file_label = 'Analysis- Philips PRIDE Inline'
settings.png_minppm = 0.5
settings.png_maxppm = 4.2
settings.png_apply_phase = False
settings.png_remove_base = False
settings.png_fontname = 'Courier New'
settings.png_dpi = 100
settings.png_pad_inches = 0.5
settings.err_dpi = 100
settings.err_pad_inches = 0.5
settings.debug = False
run(settings)
| 6,801 | 0 | 69 |
cbdc4378671115d23a90b8ecd17b6a7d7dd767c5 | 905 | py | Python | stellargraph/core/utils.py | anonymnous-gituser/stellargraph | 8c2872a8907f8ccef79256238c6c0d21b94cf2f3 | [
"Apache-2.0"
] | 1 | 2019-07-15T08:56:05.000Z | 2019-07-15T08:56:05.000Z | stellargraph/core/utils.py | subpath/stellargraph | 60edf4a6268f29b49b7c768c382e235af4108506 | [
"Apache-2.0"
] | null | null | null | stellargraph/core/utils.py | subpath/stellargraph | 60edf4a6268f29b49b7c768c382e235af4108506 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
def is_real_iterable(x):
"""
Tests if x is an iterable and is not a string.
Args:
x:
Returns:
True if x is an iterable (but not a string) and False otherwise
"""
return isinstance(x, collections.Iterable) and not isinstance(x, (str, bytes))
| 30.166667 | 82 | 0.711602 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
def is_real_iterable(x):
"""
Tests if x is an iterable and is not a string.
Args:
x:
Returns:
True if x is an iterable (but not a string) and False otherwise
"""
return isinstance(x, collections.Iterable) and not isinstance(x, (str, bytes))
| 0 | 0 | 0 |
294ada3ece65f13dbc7fbda4391ac82e9c40d818 | 2,706 | py | Python | src/AppiumLibrary/keywords/_android_utils.py | stevenkcolin/AppiumLibrarySteve | 87e5d09be749542d0a3ab16ad1b1522a9570e4da | [
"Apache-2.0"
] | 2 | 2016-07-11T08:01:37.000Z | 2020-07-30T07:20:27.000Z | src/AppiumLibrary/keywords/_android_utils.py | stevenkcolin/AppiumLibrarySteve | 87e5d09be749542d0a3ab16ad1b1522a9570e4da | [
"Apache-2.0"
] | null | null | null | src/AppiumLibrary/keywords/_android_utils.py | stevenkcolin/AppiumLibrarySteve | 87e5d09be749542d0a3ab16ad1b1522a9570e4da | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
from keywordgroup import KeywordGroup
from appium.webdriver.connectiontype import ConnectionType
| 36.08 | 102 | 0.578344 | # -*- coding: utf-8 -*-
import base64
from keywordgroup import KeywordGroup
from appium.webdriver.connectiontype import ConnectionType
class _AndroidUtilsKeywords(KeywordGroup):
# Public
def get_network_connection_status(self):
"""Returns an integer bitmask specifying the network connection type.
Android only.
See `set network connection status` for more details.
"""
driver = self._current_application()
return driver.network_connection
def set_network_connection_status(self, connectionStatus):
"""Sets the network connection Status.
Android only.
Possible values:
Value |(Alias) | Data | Wifi | Airplane Mode
-------------------------------------------------
0 |(None) | 0 | 0 | 0
1 |(Airplane Mode) | 0 | 0 | 1
2 |(Wifi only) | 0 | 1 | 0
4 |(Data only) | 1 | 0 | 0
6 |(All network on) | 1 | 1 | 0
"""
driver = self._current_application()
connType = ConnectionType(int(connectionStatus))
return driver.set_network_connection(connType)
def pull_file(self, path, decode=False):
"""Retrieves the file at `path` and return it's content.
Android only.
:Args:
- path - the path to the file on the device
- decode - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFile = driver.pull_file(path)
if decode:
theFile = base64.b64decode(theFile)
return theFile
def pull_folder(self, path, decode=False):
"""Retrieves a folder at `path`. Returns the folder's contents zipped.
Android only.
:Args:
- path - the path to the folder on the device
- decode - True/False decode the data (base64) before returning it (default=False)
"""
driver = self._current_application()
theFolder = driver.pull_folder(path)
if decode:
theFolder = base64.b64decode(theFolder)
return theFolder
def push_file(self, path, data, encode=False):
"""Puts the data in the file specified as `path`.
Android only.
:Args:
- path - the path on the device
- data - data to be written to the file
- encode - True/False encode the data as base64 before writing it to the file (default=False)
"""
driver = self._current_application()
if encode:
data = base64.b64encode(data)
driver.push_file(path, data)
| 0 | 2,547 | 23 |
e066197b442ba032e2f568218267b4a10c8feeef | 2,402 | py | Python | examples/RuleBasedEngine.py | MATHEMA-GmbH/Owl-Racer-AI-Client-Python | 3a16a254710e4a2e868e8569e7d6a67050cbc180 | [
"MIT"
] | null | null | null | examples/RuleBasedEngine.py | MATHEMA-GmbH/Owl-Racer-AI-Client-Python | 3a16a254710e4a2e868e8569e7d6a67050cbc180 | [
"MIT"
] | null | null | null | examples/RuleBasedEngine.py | MATHEMA-GmbH/Owl-Racer-AI-Client-Python | 3a16a254710e4a2e868e8569e7d6a67050cbc180 | [
"MIT"
] | null | null | null | import time
from owlracer.env import Env as Owlracer_Env
from owlracer import owlParser
@owlParser
if __name__ == '__main__':
main_loop()
| 27.609195 | 145 | 0.568693 | import time
from owlracer.env import Env as Owlracer_Env
from owlracer import owlParser
def calculate_action(step_result, list):
distance_right = step_result.distance.right
distance_front_right = step_result.distance.frontRight
distance_left = step_result.distance.left
distance_front_left = step_result.distance.frontLeft
if list["fixed_left"] > 0:
list["fixed_left"] = list["fixed_left"]-1
if list["fixed_left"] > 30:
return 2
else:
return 3
elif list["fixed_right"] > 0:
list["fixed_right"] = list["fixed_right"]-1
if list["fixed_right"] > 30:
return 2
return 4
elif distance_left > 200 and list["fixed_left"] == 0:
list["fixed_left"] = 80
print("distance left big!")
return 2
elif distance_right > 200 and list["fixed_right"] == 0:
list["fixed_right"] = 80
print("distance left big!")
return 2
else:
if distance_front_left == 0:
ratio = distance_front_right/(distance_front_left + 0.00001)
else:
ratio = float(distance_front_right)/distance_front_left
if step_result.distance.front >= 50:
if ratio < 1:
return 3
elif ratio > 1:
return 4
else:
return 1
else:
if ratio < 1:
return 5
elif ratio > 1:
return 6
else:
return 2
@owlParser
def main_loop(args):
env = Owlracer_Env(ip=args.ip, port=args.port, spectator=args.spectator, session=args.session, carName="Rule-based (Py)", carColor="#07f036")
step_result = env.step(0)
list ={
"fixed_left": 0,
"fixed_right": 0
}
while True:
# waiting for game to start
while env.isPrerace or env.isPaused:
env.updateSession()
time.sleep(0.1)
action = calculate_action(step_result, list)
step_result = env.step(action)
print("Car Left/right: {} {}, Vel: {} forward distance {}".format(step_result.distance.left, step_result.distance.right,
step_result.velocity, step_result.distance.front))
# sleep for human
time.sleep(0.01)
if __name__ == '__main__':
main_loop()
| 2,211 | 0 | 45 |
a941edd32a80157885771b3caed6ba002cfcd163 | 876 | py | Python | config.py | red-green/youtube-to-podcast | 569cdbbaca95a287a34dbcabb1782b23de3f42d4 | [
"Unlicense"
] | null | null | null | config.py | red-green/youtube-to-podcast | 569cdbbaca95a287a34dbcabb1782b23de3f42d4 | [
"Unlicense"
] | null | null | null | config.py | red-green/youtube-to-podcast | 569cdbbaca95a287a34dbcabb1782b23de3f42d4 | [
"Unlicense"
] | null | null | null | ### channel configuration
CHANNEL_NAME = 'ThreatWire'
CHANNEL_PLAYLIST_ID = 'PLW5y1tjAOzI0Sx4UU2fncEwQ9BQLr5Vlu'
ITEMS_TO_SCAN = 5
FG_YOUTUBE = 'https://www.youtube.com/channel/UC3s0BtrBJpwNDaflRSoiieQ' # channel link
FG_AUTHOR = {'name':'Shannon Morse','email':'shannon@hak5.org'}
### data storage and history
ITEMS_TO_KEEP = 25
HISTORY_JSON = 'history.json'
PODCAST_FILE = 'podcast.rss'
### web hosting
WEB_HOST_DIRECTORY = '/var/www/html/ytp'
WEB_BASE_URL = 'http://10.0.1.25/ytp/'
### api stuff
API_KEY = 'insert your api key here so you won’t get rate-limited'
API_PLAYLIST_URL = 'https://www.googleapis.com/youtube/v3/playlistItems?key={}&part=snippet&contentDetails&status&maxResults={}&playlistId={}'
### other config items
REFRESH_TIME = 7200 # in seconds, this is 2 hours
FFMPEG_CMD = 'ffmpeg -i {} -b:a 192K -vn {}'
TEMP_DIRECTORY = '/tmp/yt-podcast/'
| 26.545455 | 142 | 0.736301 | ### channel configuration
CHANNEL_NAME = 'ThreatWire'
CHANNEL_PLAYLIST_ID = 'PLW5y1tjAOzI0Sx4UU2fncEwQ9BQLr5Vlu'
ITEMS_TO_SCAN = 5
FG_YOUTUBE = 'https://www.youtube.com/channel/UC3s0BtrBJpwNDaflRSoiieQ' # channel link
FG_AUTHOR = {'name':'Shannon Morse','email':'shannon@hak5.org'}
### data storage and history
ITEMS_TO_KEEP = 25
HISTORY_JSON = 'history.json'
PODCAST_FILE = 'podcast.rss'
### web hosting
WEB_HOST_DIRECTORY = '/var/www/html/ytp'
WEB_BASE_URL = 'http://10.0.1.25/ytp/'
### api stuff
API_KEY = 'insert your api key here so you won’t get rate-limited'
API_PLAYLIST_URL = 'https://www.googleapis.com/youtube/v3/playlistItems?key={}&part=snippet&contentDetails&status&maxResults={}&playlistId={}'
### other config items
REFRESH_TIME = 7200 # in seconds, this is 2 hours
FFMPEG_CMD = 'ffmpeg -i {} -b:a 192K -vn {}'
TEMP_DIRECTORY = '/tmp/yt-podcast/'
| 0 | 0 | 0 |
25659b11d01bf9d7aa939a9d338807126094201c | 3,640 | py | Python | examples/sas_interconnects.py | Manoj-M-97/python-hpOneView | 134f158f4fd857e7454383186f2975e8bb0568c8 | [
"MIT"
] | null | null | null | examples/sas_interconnects.py | Manoj-M-97/python-hpOneView | 134f158f4fd857e7454383186f2975e8bb0568c8 | [
"MIT"
] | null | null | null | examples/sas_interconnects.py | Manoj-M-97/python-hpOneView | 134f158f4fd857e7454383186f2975e8bb0568c8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
# This resource is only available on HPE Synergy
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
sas_interconnects = oneview_client.sas_interconnects
# Get all, with defaults
print("\nGet all SAS Interconnects")
all_sas_interconnects = sas_interconnects.get_all()
pprint(all_sas_interconnects)
# Get the first 10 records
print("\nGet the first ten SAS Interconnects")
sas_interconnects_limited = sas_interconnects.get_all(0, 10)
pprint(sas_interconnects_limited)
if all_sas_interconnects:
sas_interconnect_uri = all_sas_interconnects[0]['uri']
# Get by Uri
print("\nGet a SAS Interconnect by uri")
sas_interconnect_by_uri = sas_interconnects.get_by_uri(sas_interconnect_uri)
pprint(sas_interconnect_by_uri.data)
if sas_interconnect_by_uri.data["powerState"] == 'Off':
print("\nTurn on power for SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/powerState',
value='On'
)
print("Done!")
print("\nRefresh a SAS interconnect")
sas_interconnect_by_uri.refresh_state(
configuration={"refreshState": "RefreshPending"}
)
print("Done!")
print("\nTurn 'On' UID light on SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/uidState',
value='On'
)
print("Done!")
print("\nSoft Reset SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/softResetState',
value='Reset'
)
print("Done!")
print("\nReset SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/hardResetState',
value='Reset'
)
print("Done!")
print("\nTurn off power for SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/powerState',
value='Off'
)
print("Done!")
| 34.018692 | 96 | 0.709066 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
# This resource is only available on HPE Synergy
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
sas_interconnects = oneview_client.sas_interconnects
# Get all, with defaults
print("\nGet all SAS Interconnects")
all_sas_interconnects = sas_interconnects.get_all()
pprint(all_sas_interconnects)
# Get the first 10 records
print("\nGet the first ten SAS Interconnects")
sas_interconnects_limited = sas_interconnects.get_all(0, 10)
pprint(sas_interconnects_limited)
if all_sas_interconnects:
sas_interconnect_uri = all_sas_interconnects[0]['uri']
# Get by Uri
print("\nGet a SAS Interconnect by uri")
sas_interconnect_by_uri = sas_interconnects.get_by_uri(sas_interconnect_uri)
pprint(sas_interconnect_by_uri.data)
if sas_interconnect_by_uri.data["powerState"] == 'Off':
print("\nTurn on power for SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/powerState',
value='On'
)
print("Done!")
print("\nRefresh a SAS interconnect")
sas_interconnect_by_uri.refresh_state(
configuration={"refreshState": "RefreshPending"}
)
print("Done!")
print("\nTurn 'On' UID light on SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/uidState',
value='On'
)
print("Done!")
print("\nSoft Reset SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/softResetState',
value='Reset'
)
print("Done!")
print("\nReset SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/hardResetState',
value='Reset'
)
print("Done!")
print("\nTurn off power for SAS interconnect %s" % sas_interconnect_by_uri.data['name'])
sas_interconnect_by_uri.patch(
operation='replace',
path='/powerState',
value='Off'
)
print("Done!")
| 0 | 0 | 0 |
a83d95d0bdb34524a4d9657cff33c959ab96b482 | 19,189 | py | Python | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
import logging
from contextlib import AsyncExitStack, closing
from datetime import datetime, timedelta, timezone
from json import JSONDecodeError
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
from uuid import UUID
import sniffio
from anyio import TASK_STATUS_IGNORED, create_task_group, sleep
from attr import asdict
from sqlalchemy import (
Column, DateTime, Integer, LargeBinary, MetaData, Table, Unicode, and_, bindparam, func, or_,
select)
from sqlalchemy.engine import URL
from sqlalchemy.exc import CompileError, IntegrityError
from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncConnectable
from sqlalchemy.sql.ddl import DropTable
from ... import events as events_module
from ...abc import AsyncDataStore, Job, Schedule, Serializer
from ...events import (
AsyncEventHub, DataStoreEvent, Event, JobAdded, JobDeserializationFailed, ScheduleAdded,
ScheduleDeserializationFailed, ScheduleRemoved, ScheduleUpdated, SubscriptionToken)
from ...exceptions import ConflictingIdError, SerializationError
from ...policies import ConflictPolicy
from ...serializers.pickle import PickleSerializer
from ...util import reentrant
logger = logging.getLogger(__name__)
@reentrant
| 45.471564 | 99 | 0.614675 | from __future__ import annotations
import json
import logging
from contextlib import AsyncExitStack, closing
from datetime import datetime, timedelta, timezone
from json import JSONDecodeError
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
from uuid import UUID
import sniffio
from anyio import TASK_STATUS_IGNORED, create_task_group, sleep
from attr import asdict
from sqlalchemy import (
Column, DateTime, Integer, LargeBinary, MetaData, Table, Unicode, and_, bindparam, func, or_,
select)
from sqlalchemy.engine import URL
from sqlalchemy.exc import CompileError, IntegrityError
from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncConnectable
from sqlalchemy.sql.ddl import DropTable
from ... import events as events_module
from ...abc import AsyncDataStore, Job, Schedule, Serializer
from ...events import (
AsyncEventHub, DataStoreEvent, Event, JobAdded, JobDeserializationFailed, ScheduleAdded,
ScheduleDeserializationFailed, ScheduleRemoved, ScheduleUpdated, SubscriptionToken)
from ...exceptions import ConflictingIdError, SerializationError
from ...policies import ConflictPolicy
from ...serializers.pickle import PickleSerializer
from ...util import reentrant
logger = logging.getLogger(__name__)
def default_json_handler(obj: Any) -> Any:
if isinstance(obj, datetime):
return obj.timestamp()
elif isinstance(obj, UUID):
return obj.hex
elif isinstance(obj, frozenset):
return list(obj)
raise TypeError(f'Cannot JSON encode type {type(obj)}')
def json_object_hook(obj: Dict[str, Any]) -> Any:
for key, value in obj.items():
if key == 'timestamp':
obj[key] = datetime.fromtimestamp(value, timezone.utc)
elif key == 'job_id':
obj[key] = UUID(value)
elif key == 'tags':
obj[key] = frozenset(value)
return obj
@reentrant
class SQLAlchemyDataStore(AsyncDataStore):
_metadata = MetaData()
t_metadata = Table(
'metadata',
_metadata,
Column('schema_version', Integer, nullable=False)
)
t_schedules = Table(
'schedules',
_metadata,
Column('id', Unicode, primary_key=True),
Column('task_id', Unicode, nullable=False),
Column('serialized_data', LargeBinary, nullable=False),
Column('next_fire_time', DateTime(timezone=True), index=True),
Column('acquired_by', Unicode),
Column('acquired_until', DateTime(timezone=True))
)
t_jobs = Table(
'jobs',
_metadata,
Column('id', Unicode(32), primary_key=True),
Column('task_id', Unicode, nullable=False, index=True),
Column('serialized_data', LargeBinary, nullable=False),
Column('created_at', DateTime(timezone=True), nullable=False),
Column('acquired_by', Unicode),
Column('acquired_until', DateTime(timezone=True))
)
def __init__(self, bind: AsyncConnectable, *, schema: Optional[str] = None,
serializer: Optional[Serializer] = None,
lock_expiration_delay: float = 30, max_poll_time: Optional[float] = 1,
max_idle_time: float = 60, start_from_scratch: bool = False,
notify_channel: Optional[str] = 'apscheduler'):
self.bind = bind
self.schema = schema
self.serializer = serializer or PickleSerializer()
self.lock_expiration_delay = lock_expiration_delay
self.max_poll_time = max_poll_time
self.max_idle_time = max_idle_time
self.start_from_scratch = start_from_scratch
self._logger = logging.getLogger(__name__)
self._exit_stack = AsyncExitStack()
self._events = AsyncEventHub()
# Find out if the dialect supports RETURNING
statement = self.t_jobs.update().returning(self.t_schedules.c.id)
try:
statement.compile(bind=self.bind)
except CompileError:
self._supports_update_returning = False
else:
self._supports_update_returning = True
self.notify_channel = notify_channel
if notify_channel:
if self.bind.dialect.name != 'postgresql' or self.bind.dialect.driver != 'asyncpg':
self.notify_channel = None
@classmethod
def from_url(cls, url: Union[str, URL], **options) -> 'SQLAlchemyDataStore':
engine = create_async_engine(url, future=True)
return cls(engine, **options)
async def __aenter__(self):
asynclib = sniffio.current_async_library() or '(unknown)'
if asynclib != 'asyncio':
raise RuntimeError(f'This data store requires asyncio; currently running: {asynclib}')
# Verify that the schema is in place
async with self.bind.begin() as conn:
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
await conn.execute(DropTable(table, if_exists=True))
await conn.run_sync(self._metadata.create_all)
query = select(self.t_metadata.c.schema_version)
result = await conn.execute(query)
version = result.scalar()
if version is None:
await conn.execute(self.t_metadata.insert(values={'schema_version': 1}))
elif version > 1:
raise RuntimeError(f'Unexpected schema version ({version}); '
f'only version 1 is supported by this version of APScheduler')
await self._exit_stack.enter_async_context(self._events)
if self.notify_channel:
task_group = create_task_group()
await self._exit_stack.enter_async_context(task_group)
await task_group.start(self._listen_notifications)
self._exit_stack.callback(task_group.cancel_scope.cancel)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
async def _publish(self, conn: AsyncConnection, event: DataStoreEvent) -> None:
if self.notify_channel:
event_type = event.__class__.__name__
event_data = json.dumps(asdict(event), ensure_ascii=False,
default=default_json_handler)
notification = event_type + ' ' + event_data
if len(notification) < 8000:
await conn.execute(func.pg_notify(self.notify_channel, notification))
return
self._logger.warning(
'Could not send %s notification because it is too long (%d >= 8000)',
event_type, len(notification))
self._events.publish(event)
async def _listen_notifications(self, *, task_status=TASK_STATUS_IGNORED) -> None:
def callback(connection, pid, channel: str, payload: str) -> None:
self._logger.debug('Received notification on channel %s: %s', channel, payload)
event_type, _, json_data = payload.partition(' ')
try:
event_data = json.loads(json_data, object_hook=json_object_hook)
except JSONDecodeError:
self._logger.exception('Failed decoding JSON payload of notification: %s', payload)
return
event_class = getattr(events_module, event_type)
event = event_class(**event_data)
self._events.publish(event)
task_started_sent = False
while True:
with closing(await self.bind.raw_connection()) as conn:
asyncpg_conn = conn.connection._connection
await asyncpg_conn.add_listener(self.notify_channel, callback)
if not task_started_sent:
task_status.started()
task_started_sent = True
try:
while True:
await sleep(self.max_idle_time)
await asyncpg_conn.execute('SELECT 1')
finally:
await asyncpg_conn.remove_listener(self.notify_channel, callback)
def _deserialize_jobs(self, serialized_jobs: Iterable[Tuple[UUID, bytes]]) -> List[Job]:
jobs: List[Job] = []
for job_id, serialized_data in serialized_jobs:
try:
jobs.append(self.serializer.deserialize(serialized_data))
except SerializationError as exc:
self._events.publish(JobDeserializationFailed(job_id=job_id, exception=exc))
return jobs
def _deserialize_schedules(
self, serialized_schedules: Iterable[Tuple[str, bytes]]) -> List[Schedule]:
jobs: List[Schedule] = []
for schedule_id, serialized_data in serialized_schedules:
try:
jobs.append(self.serializer.deserialize(serialized_data))
except SerializationError as exc:
self._events.publish(
ScheduleDeserializationFailed(schedule_id=schedule_id, exception=exc))
return jobs
def subscribe(self, callback: Callable[[Event], Any],
event_types: Optional[Iterable[Type[Event]]] = None) -> SubscriptionToken:
return self._events.subscribe(callback, event_types)
def unsubscribe(self, token: SubscriptionToken) -> None:
self._events.unsubscribe(token)
async def clear(self) -> None:
async with self.bind.begin() as conn:
await conn.execute(self.t_schedules.delete())
await conn.execute(self.t_jobs.delete())
async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
serialized_data = self.serializer.serialize(schedule)
statement = self.t_schedules.insert().\
values(id=schedule.id, task_id=schedule.task_id, serialized_data=serialized_data,
next_fire_time=schedule.next_fire_time)
try:
async with self.bind.begin() as conn:
await conn.execute(statement)
event = ScheduleAdded(schedule_id=schedule.id,
next_fire_time=schedule.next_fire_time)
await self._publish(conn, event)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
statement = self.t_schedules.update().\
where(self.t_schedules.c.id == schedule.id).\
values(serialized_data=serialized_data,
next_fire_time=schedule.next_fire_time)
async with self.bind.begin() as conn:
await conn.execute(statement)
event = ScheduleUpdated(schedule_id=schedule.id,
next_fire_time=schedule.next_fire_time)
await self._publish(conn, event)
async def remove_schedules(self, ids: Iterable[str]) -> None:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
conditions = and_(self.t_schedules.c.id.in_(ids),
or_(self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now))
statement = self.t_schedules.delete(conditions)
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id)
removed_ids = [row[0] for row in await conn.execute(statement)]
else:
await conn.execute(statement)
for schedule_id in removed_ids:
await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id))
async def get_schedules(self, ids: Optional[Set[str]] = None) -> List[Schedule]:
query = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\
order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
async with self.bind.begin() as conn:
result = await conn.execute(query)
return self._deserialize_schedules(result)
async def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = datetime.fromtimestamp(
now.timestamp() + self.lock_expiration_delay, timezone.utc)
schedules_cte = select(self.t_schedules.c.id).\
where(and_(self.t_schedules.c.next_fire_time.isnot(None),
self.t_schedules.c.next_fire_time <= now,
or_(self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now))).\
limit(limit).cte()
subselect = select([schedules_cte.c.id])
statement = self.t_schedules.update().where(self.t_schedules.c.id.in_(subselect)).\
values(acquired_by=scheduler_id, acquired_until=acquired_until)
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id,
self.t_schedules.c.serialized_data)
result = await conn.execute(statement)
else:
await conn.execute(statement)
statement = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\
where(and_(self.t_schedules.c.acquired_by == scheduler_id))
result = await conn.execute(statement)
return self._deserialize_schedules(result)
async def release_schedules(self, scheduler_id: str, schedules: List[Schedule]) -> None:
update_events: List[ScheduleUpdated] = []
finished_schedule_ids: List[str] = []
async with self.bind.begin() as conn:
update_args: List[Dict[str, Any]] = []
for schedule in schedules:
if schedule.next_fire_time is not None:
try:
serialized_data = self.serializer.serialize(schedule)
except SerializationError:
self._logger.exception('Error serializing schedule %r – '
'removing from data store', schedule.id)
finished_schedule_ids.append(schedule.id)
continue
update_args.append({
'p_id': schedule.id,
'p_serialized_data': serialized_data,
'p_next_fire_time': schedule.next_fire_time
})
else:
finished_schedule_ids.append(schedule.id)
# Update schedules that have a next fire time
if update_args:
p_id = bindparam('p_id')
p_serialized = bindparam('p_serialized_data')
p_next_fire_time = bindparam('p_next_fire_time')
statement = self.t_schedules.update().\
where(and_(self.t_schedules.c.id == p_id,
self.t_schedules.c.acquired_by == scheduler_id)).\
values(serialized_data=p_serialized, next_fire_time=p_next_fire_time)
next_fire_times = {arg['p_id']: arg['p_next_fire_time'] for arg in update_args}
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id)
updated_ids = [row[0] for row in await conn.execute(statement, update_args)]
for schedule_id in updated_ids:
event = ScheduleUpdated(schedule_id=schedule_id,
next_fire_time=next_fire_times[schedule_id])
update_events.append(event)
# Remove schedules that have no next fire time or failed to serialize
if finished_schedule_ids:
statement = self.t_schedules.delete().\
where(and_(self.t_schedules.c.id.in_(finished_schedule_ids),
self.t_schedules.c.acquired_by == scheduler_id))
await conn.execute(statement)
for event in update_events:
await self._publish(conn, event)
for schedule_id in finished_schedule_ids:
await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id))
async def add_job(self, job: Job) -> None:
now = datetime.now(timezone.utc)
serialized_data = self.serializer.serialize(job)
statement = self.t_jobs.insert().values(id=job.id.hex, task_id=job.task_id,
created_at=now, serialized_data=serialized_data)
async with self.bind.begin() as conn:
await conn.execute(statement)
event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,
tags=job.tags)
await self._publish(conn, event)
async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> List[Job]:
query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\
order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id.hex for job_id in ids]
query = query.where(self.t_jobs.c.id.in_(job_ids))
async with self.bind.begin() as conn:
result = await conn.execute(query)
return self._deserialize_jobs(result)
async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> List[Job]:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\
where(or_(self.t_jobs.c.acquired_until.is_(None),
self.t_jobs.c.acquired_until < now)).\
order_by(self.t_jobs.c.created_at).\
limit(limit)
serialized_jobs: Dict[str, bytes] = {row[0]: row[1]
for row in await conn.execute(query)}
if serialized_jobs:
query = self.t_jobs.update().\
values(acquired_by=worker_id, acquired_until=acquired_until).\
where(self.t_jobs.c.id.in_(serialized_jobs))
await conn.execute(query)
return self._deserialize_jobs(serialized_jobs.items())
async def release_jobs(self, worker_id: str, jobs: List[Job]) -> None:
job_ids = [job.id.hex for job in jobs]
statement = self.t_jobs.delete().\
where(and_(self.t_jobs.c.acquired_by == worker_id, self.t_jobs.c.id.in_(job_ids)))
async with self.bind.begin() as conn:
await conn.execute(statement)
| 16,222 | 1,549 | 68 |
054cd7fa94295f629465d6c8d8d5104a5b922b0f | 5,505 | py | Python | analysis/ConsensusAnalysis.py | glatard/narps | c5d0700de6dabfa9c090761ef4aa7c26f1c066c2 | [
"MIT"
] | null | null | null | analysis/ConsensusAnalysis.py | glatard/narps | c5d0700de6dabfa9c090761ef4aa7c26f1c066c2 | [
"MIT"
] | null | null | null | analysis/ConsensusAnalysis.py | glatard/narps | c5d0700de6dabfa9c090761ef4aa7c26f1c066c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
"""
run consensus analysis to identify overall pattern
analysis method developed by T Nichols and J Mumford
"""
import os
import sys
import glob
import numpy
import nibabel
import nilearn.plotting
import nilearn.input_data
import matplotlib.pyplot as plt
from statsmodels.stats.multitest import multipletests
import scipy.stats
from narps import Narps, hypnums, hypotheses
from narps import NarpsDirs # noqa, flake8 issue
from utils import log_to_file
def t_corr(y, res_mean=None, res_var=None, Q=None):
"""
perform a one-sample t-test on correlated data
y = data (n observations X n vars)
res_mean = Common mean over voxels and results
res_var = Common variance over voxels and results
Q = "known" correlation across observations
- (use empirical correlation based on maps)
"""
npts = y.shape[0]
X = numpy.ones((npts, 1))
if res_mean is None:
res_mean = 0
if res_var is None:
res_var = 1
if Q is None:
Q = numpy.eye(npts)
VarMean = res_var * X.T.dot(Q).dot(X) / npts**2
# T = mean(y,0)/s-hat-2
# use diag to get s_hat2 for each variable
T = (numpy.mean(y, 0)-res_mean
)/numpy.sqrt(VarMean)*numpy.sqrt(res_var) + res_mean
# Assuming variance is estimated on whole image
# and assuming infinite df
p = 1 - scipy.stats.norm.cdf(T)
return(T, p)
if __name__ == "__main__":
# set an environment variable called NARPS_BASEDIR
# with location of base directory
if 'NARPS_BASEDIR' in os.environ:
basedir = os.environ['NARPS_BASEDIR']
else:
basedir = '/data'
# setup main class
narps = Narps(basedir)
narps.load_data()
narps.dirs.dirs['consensus'] = os.path.join(
narps.dirs.dirs['output'],
'consensus_analysis')
logfile = os.path.join(
narps.dirs.dirs['logs'],
'%s.txt' % sys.argv[0].split('.')[0])
log_to_file(
logfile, 'running %s' %
sys.argv[0].split('.')[0],
flush=True)
if not os.path.exists(narps.dirs.dirs['consensus']):
os.mkdir(narps.dirs.dirs['consensus'])
run_ttests(narps, logfile)
mk_figures(narps, logfile)
| 29.438503 | 78 | 0.592189 | #!/usr/bin/env python
# coding: utf-8
"""
run consensus analysis to identify overall pattern
analysis method developed by T Nichols and J Mumford
"""
import os
import sys
import glob
import numpy
import nibabel
import nilearn.plotting
import nilearn.input_data
import matplotlib.pyplot as plt
from statsmodels.stats.multitest import multipletests
import scipy.stats
from narps import Narps, hypnums, hypotheses
from narps import NarpsDirs # noqa, flake8 issue
from utils import log_to_file
def t_corr(y, res_mean=None, res_var=None, Q=None):
"""
perform a one-sample t-test on correlated data
y = data (n observations X n vars)
res_mean = Common mean over voxels and results
res_var = Common variance over voxels and results
Q = "known" correlation across observations
- (use empirical correlation based on maps)
"""
npts = y.shape[0]
X = numpy.ones((npts, 1))
if res_mean is None:
res_mean = 0
if res_var is None:
res_var = 1
if Q is None:
Q = numpy.eye(npts)
VarMean = res_var * X.T.dot(Q).dot(X) / npts**2
# T = mean(y,0)/s-hat-2
# use diag to get s_hat2 for each variable
T = (numpy.mean(y, 0)-res_mean
)/numpy.sqrt(VarMean)*numpy.sqrt(res_var) + res_mean
# Assuming variance is estimated on whole image
# and assuming infinite df
p = 1 - scipy.stats.norm.cdf(T)
return(T, p)
def run_ttests(narps, logfile,
overwrite=True):
masker = nilearn.input_data.NiftiMasker(
mask_img=narps.dirs.MNI_mask)
results_dir = narps.dirs.dirs['consensus']
func_name = sys._getframe().f_code.co_name
log_to_file(
logfile, '%s' %
func_name)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for hyp in hypnums:
if not overwrite and os.path.exists(os.path.join(
results_dir,
'hypo%d_1-fdr.nii.gz' % hyp)):
print('using existing results')
continue
print('running consensus analysis for hypothesis', hyp)
maps = glob.glob(os.path.join(
narps.dirs.dirs['output'],
'zstat/*/hypo%d_unthresh.nii.gz' % hyp))
maps.sort()
data = masker.fit_transform(maps)
# get estimated mean, variance, and correlation for t_corr
img_mean = numpy.mean(data)
img_var = numpy.mean(numpy.var(data, 1))
cc = numpy.corrcoef(data)
log_to_file(
logfile,
'mean = %f, var = %f, mean_cc = %f' %
(img_mean, img_var,
numpy.mean(cc[numpy.triu_indices_from(cc, 1)])))
# perform t-test
tvals, pvals = t_corr(data,
res_mean=img_mean,
res_var=img_var,
Q=cc)
# move back into image format
timg = masker.inverse_transform(tvals)
timg.to_filename(os.path.join(results_dir, 'hypo%d_t.nii.gz' % hyp))
pimg = masker.inverse_transform(1-pvals)
pimg.to_filename(os.path.join(results_dir, 'hypo%d_1-p.nii.gz' % hyp))
fdr_results = multipletests(pvals[0, :], 0.05, 'fdr_tsbh')
log_to_file(
logfile,
"%d voxels significant at FDR corrected p<.05" %
numpy.sum(fdr_results[0]))
fdrimg = masker.inverse_transform(1 - fdr_results[1])
fdrimg.to_filename(os.path.join(
results_dir,
'hypo%d_1-fdr.nii.gz' % hyp))
def mk_figures(narps, logfile, thresh=0.95):
func_name = sys._getframe().f_code.co_name
log_to_file(
logfile, '%s' %
func_name)
fig, ax = plt.subplots(7, 1, figsize=(12, 24))
cut_coords = [-24, -10, 4, 18, 32, 52, 64]
for i, hyp in enumerate(hypnums):
pmap = os.path.join(
narps.dirs.dirs['consensus'],
'hypo%d_1-fdr.nii.gz' % hyp)
tmap = os.path.join(
narps.dirs.dirs['consensus'],
'hypo%d_t.nii.gz' % hyp)
pimg = nibabel.load(pmap)
timg = nibabel.load(tmap)
pdata = pimg.get_fdata()
tdata = timg.get_fdata()[:, :, :, 0]
threshdata = (pdata > thresh)*tdata
threshimg = nibabel.Nifti1Image(threshdata, affine=timg.affine)
nilearn.plotting.plot_stat_map(
threshimg,
threshold=0.1,
display_mode="z",
colorbar=True,
title='hyp %d:' % hyp+hypotheses[hyp],
vmax=8,
cmap='jet',
cut_coords=cut_coords,
axes=ax[i])
plt.savefig(os.path.join(
narps.dirs.dirs['figures'],
'consensus_map.pdf'))
plt.close(fig)
if __name__ == "__main__":
# set an environment variable called NARPS_BASEDIR
# with location of base directory
if 'NARPS_BASEDIR' in os.environ:
basedir = os.environ['NARPS_BASEDIR']
else:
basedir = '/data'
# setup main class
narps = Narps(basedir)
narps.load_data()
narps.dirs.dirs['consensus'] = os.path.join(
narps.dirs.dirs['output'],
'consensus_analysis')
logfile = os.path.join(
narps.dirs.dirs['logs'],
'%s.txt' % sys.argv[0].split('.')[0])
log_to_file(
logfile, 'running %s' %
sys.argv[0].split('.')[0],
flush=True)
if not os.path.exists(narps.dirs.dirs['consensus']):
os.mkdir(narps.dirs.dirs['consensus'])
run_ttests(narps, logfile)
mk_figures(narps, logfile)
| 3,236 | 0 | 46 |
48111bf76386d688236fee2e8fec8f616bedd277 | 687 | py | Python | tests/conftest.py | treyhunner/countdown | bee05652893aa3472c001a3eb270c9139bafe32c | [
"MIT"
] | 1 | 2022-01-12T07:28:25.000Z | 2022-01-12T07:28:25.000Z | tests/conftest.py | treyhunner/countdown | bee05652893aa3472c001a3eb270c9139bafe32c | [
"MIT"
] | 33 | 2021-12-30T00:16:08.000Z | 2022-03-31T07:33:26.000Z | tests/conftest.py | treyhunner/countdown-cli | bee05652893aa3472c001a3eb270c9139bafe32c | [
"MIT"
] | null | null | null | """PyTest configuration."""
from __future__ import annotations
from typing import Any
from _pytest.assertion import truncate
truncate.DEFAULT_MAX_LINES = 40
truncate.DEFAULT_MAX_CHARS = 40 * 80
| 24.535714 | 88 | 0.588064 | """PyTest configuration."""
from __future__ import annotations
from typing import Any
from _pytest.assertion import truncate
truncate.DEFAULT_MAX_LINES = 40
truncate.DEFAULT_MAX_CHARS = 40 * 80
def pytest_assertrepr_compare(
op: str,
left: Any,
right: Any,
) -> list[str] | None: # pragma: nocover
if isinstance(left, str) and isinstance(right, str) and "█" in right and op == "==":
return [
"Big number string comparison doesn't match",
"Got:",
*left.splitlines(),
"Expected:",
*right.splitlines(),
"",
f"Repr Comparison: {left!r} != {right!r}",
]
return None
| 468 | 0 | 23 |
5c13847f5b6842c147a8bcfea3eed5cfff7826be | 3,444 | py | Python | src/scenepic/mesh.py | microsoft/scenepic | e3fd2c6312fa670a92b7888962b6812c262c6759 | [
"MIT"
] | 28 | 2021-10-05T08:51:26.000Z | 2022-03-18T11:19:23.000Z | src/scenepic/mesh.py | microsoft/scenepic | e3fd2c6312fa670a92b7888962b6812c262c6759 | [
"MIT"
] | 17 | 2021-10-05T11:36:17.000Z | 2022-02-10T13:33:43.000Z | src/scenepic/mesh.py | microsoft/scenepic | e3fd2c6312fa670a92b7888962b6812c262c6759 | [
"MIT"
] | 2 | 2021-12-12T16:42:51.000Z | 2022-02-23T11:50:14.000Z | """Module which extends the scenepic Mesh type."""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
from ._scenepic import Mesh, MeshUpdate
class VertexBuffer:
"""Class which provides dictionary access to vertex buffer blocks.
Description:
The different parts of the vertex buffer can be accessed via a dictionary
interface:
- "pos": the vertex positions
- "norm": the vertex normals
- "rgb": the vertex colors (if present)
- "uv": the vertex uvs (if present)
Args:
values (np.ndarray): The raw vertex buffer values
"""
def __init__(self, values: np.ndarray):
"""Initializer."""
self._values = values
self._lookup = {
"pos": slice(0, 3),
"norm": slice(3, 6),
"rgb": slice(6, 9),
"uv": slice(6, 8)
}
@property
def shape(self) -> tuple:
"""Shape of the entire vertex buffer."""
return self._values.shape
def copy(self) -> "VertexBuffer":
"""Returns a copy of the vertex buffer."""
return VertexBuffer(self._values.copy())
def __repr__(self) -> str:
"""Return a string representation of the vertex buffer."""
return str(self._values)
def __getitem__(self, key: str) -> np.ndarray:
"""Returns a sub-section of the buffer given the key.
Args:
key (str): one of ["pos", "norm", "rgb", "uv"]
Returns:
np.ndarray: a subsection of the buffer
"""
assert key in self._lookup, "Invalid vertex buffer key " + key
return self._values[:, self._lookup[key]]
def __setitem__(self, key: str, data: np.ndarray):
"""Sets a subsection of the buffer specified by the key.
Args:
key (str): one of ["pos", "norm", "rgb", "uv"]
data (np.ndarray): The new values to place in the buffer
"""
assert key in self._lookup, "Invalid vertex buffer key " + key
self._values[:, self._lookup[key]] = data
def vertex_buffer(self):
"""VertexBuffer: a reference to the vertex buffer."""
return VertexBuffer(self.get_vertex_buffer())
def quantize(self, keyframe_index: int, fixed_point_range: float, keyframe_vertex_buffer: VertexBuffer):
"""Quantize the mesh update.
Args:
self (MeshUpdate): self reference
keyframe_index (int): Index of the keyframe to use in quantizing this update
fixed_point_range (float): The range to use for the fixed point representation.
keyframe_vertex_buffer (VertexBuffer): The keyframe vertex buffer
"""
self.quantize_(keyframe_index, fixed_point_range, keyframe_vertex_buffer._values)
def difference_range(self, vertex_buffer: VertexBuffer) -> float:
"""Returns the absolute range of values in the difference between this update and the buffer.
Args:
self (MeshUpdate): self reference
vertex_buffer (VertexBuffer): the buffer to use in the comparison
Return:
float: the absolute range (from minimum to maximum) in the per-index difference between
this update and the reference.
"""
self.difference_range_(vertex_buffer._values)
Mesh.vertex_buffer = property(vertex_buffer)
MeshUpdate.vertex_buffer = property(vertex_buffer)
MeshUpdate.quantize = quantize
MeshUpdate.difference_range = difference_range
| 31.888889 | 104 | 0.64489 | """Module which extends the scenepic Mesh type."""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
from ._scenepic import Mesh, MeshUpdate
class VertexBuffer:
"""Class which provides dictionary access to vertex buffer blocks.
Description:
The different parts of the vertex buffer can be accessed via a dictionary
interface:
- "pos": the vertex positions
- "norm": the vertex normals
- "rgb": the vertex colors (if present)
- "uv": the vertex uvs (if present)
Args:
values (np.ndarray): The raw vertex buffer values
"""
def __init__(self, values: np.ndarray):
"""Initializer."""
self._values = values
self._lookup = {
"pos": slice(0, 3),
"norm": slice(3, 6),
"rgb": slice(6, 9),
"uv": slice(6, 8)
}
@property
def shape(self) -> tuple:
"""Shape of the entire vertex buffer."""
return self._values.shape
def copy(self) -> "VertexBuffer":
"""Returns a copy of the vertex buffer."""
return VertexBuffer(self._values.copy())
def __repr__(self) -> str:
"""Return a string representation of the vertex buffer."""
return str(self._values)
def __getitem__(self, key: str) -> np.ndarray:
"""Returns a sub-section of the buffer given the key.
Args:
key (str): one of ["pos", "norm", "rgb", "uv"]
Returns:
np.ndarray: a subsection of the buffer
"""
assert key in self._lookup, "Invalid vertex buffer key " + key
return self._values[:, self._lookup[key]]
def __setitem__(self, key: str, data: np.ndarray):
"""Sets a subsection of the buffer specified by the key.
Args:
key (str): one of ["pos", "norm", "rgb", "uv"]
data (np.ndarray): The new values to place in the buffer
"""
assert key in self._lookup, "Invalid vertex buffer key " + key
self._values[:, self._lookup[key]] = data
def vertex_buffer(self):
"""VertexBuffer: a reference to the vertex buffer."""
return VertexBuffer(self.get_vertex_buffer())
def quantize(self, keyframe_index: int, fixed_point_range: float, keyframe_vertex_buffer: VertexBuffer):
"""Quantize the mesh update.
Args:
self (MeshUpdate): self reference
keyframe_index (int): Index of the keyframe to use in quantizing this update
fixed_point_range (float): The range to use for the fixed point representation.
keyframe_vertex_buffer (VertexBuffer): The keyframe vertex buffer
"""
self.quantize_(keyframe_index, fixed_point_range, keyframe_vertex_buffer._values)
def difference_range(self, vertex_buffer: VertexBuffer) -> float:
"""Returns the absolute range of values in the difference between this update and the buffer.
Args:
self (MeshUpdate): self reference
vertex_buffer (VertexBuffer): the buffer to use in the comparison
Return:
float: the absolute range (from minimum to maximum) in the per-index difference between
this update and the reference.
"""
self.difference_range_(vertex_buffer._values)
Mesh.vertex_buffer = property(vertex_buffer)
MeshUpdate.vertex_buffer = property(vertex_buffer)
MeshUpdate.quantize = quantize
MeshUpdate.difference_range = difference_range
| 0 | 0 | 0 |
dee147bb1bd19ae8509dfa5dacb3a056e3445202 | 1,279 | py | Python | admin.py | zhengxinxing/bespeak_meal | 0c64d9389afb408e74353051569c5e1018752223 | [
"MIT"
] | null | null | null | admin.py | zhengxinxing/bespeak_meal | 0c64d9389afb408e74353051569c5e1018752223 | [
"MIT"
] | null | null | null | admin.py | zhengxinxing/bespeak_meal | 0c64d9389afb408e74353051569c5e1018752223 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.db import models
from django import forms
from bespeak_meal.models import Person, Person_category, Week_menu
admin.site.register(Person, PersonAdmin)
admin.site.register(Person_category, Person_categoryAdmin)
admin.site.register(Week_menu, Week_menuAdmin) | 29.068182 | 66 | 0.648944 | from django.contrib import admin
from django.db import models
from django import forms
from bespeak_meal.models import Person, Person_category, Week_menu
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'category', 'remarks')
fieldsets = [
(None, {'fields': ['name']}),
('按哪种收费方式', {'fields': ['category']}),
('备注', {'fields': ['remarks']}),
]
list_filter = ('category',) #右边的分类栏
class PersonInline(admin.TabularInline):
model = Person
class Person_categoryAdmin(admin.ModelAdmin):
list_display = ('name', 'breakfast_charge', 'lunch_charge',
'dinner_charge', 'remarks', 'count')
# 用于 list_display 的函数
def count(self, obj):
return obj.person_set.count()
count.short_description = '人员数量' # 让 list_display 人性化显示本函数名称
# 让管理员在 人员分类 的界面下,可以直接增减属于该类的人员
inlines = [PersonInline,]
class Week_menuAdmin(admin.ModelAdmin):
formfield_overrides = {
# 调整文本窗口大小,后期建议改到其他地方去
models.TextField: {
'widget': forms.Textarea(
attrs={ 'rows': 3, 'cols': 64, } )
},
}
admin.site.register(Person, PersonAdmin)
admin.site.register(Person_category, Person_categoryAdmin)
admin.site.register(Week_menu, Week_menuAdmin) | 38 | 1,009 | 92 |
21977069d3e5088253cd63c286fd5c97ffb85f66 | 204 | py | Python | dashboard/urls.py | dt-self-service/self-service-app | 36e1608c08917972344341886241c3d51ff6c885 | [
"MIT"
] | 2 | 2020-08-14T15:46:51.000Z | 2020-08-20T07:43:50.000Z | dashboard/urls.py | dt-self-service/self-service-app | 36e1608c08917972344341886241c3d51ff6c885 | [
"MIT"
] | 16 | 2020-06-14T17:16:48.000Z | 2021-12-13T20:48:58.000Z | dashboard/urls.py | dt-self-service/self-service-app | 36e1608c08917972344341886241c3d51ff6c885 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='admin-home'),
path('tables/', views.tables, name='tables'),
path('get_tenant', views.get_tenant),
] | 25.5 | 49 | 0.666667 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='admin-home'),
path('tables/', views.tables, name='tables'),
path('get_tenant', views.get_tenant),
] | 0 | 0 | 0 |
07b277e7996093cb49581853b24656c92d364351 | 5,660 | py | Python | lib/python2.7/site-packages/appionlib/apRadon.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/appionlib/apRadon.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/appionlib/apRadon.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/usr/bin/env python
import sys
import time
import math
import numpy
from scipy import ndimage
from pyami import imagefun
from appionlib import apDisplay
from appionlib.apImage import imagefile
#=========================
def classicradon(image, stepsize=2):
"""
computes Radon transform of image
"""
radonlist = []
nsteps = int(math.ceil(180/stepsize))
blackcircle = imagefun.filled_circle(image.shape, image.shape[0]/2*0.75)
mask = 1 - blackcircle
maskline = mask.sum(axis=0) + 1
for i in range(nsteps):
rotated = ndimage.rotate(image, -i*stepsize, reshape=False, order=1)
rotated = mask*rotated
line = rotated.sum(axis=0)
radonlist.append(line/maskline)
radon = numpy.array(radonlist)
radon = radon/radon.std()
#radonlr = numpy.fliplr(radon)
#radon = numpy.vstack((radon, radonlr))
return radon
#=========================
def classicradonlist(imagelist, stepsize=2, maskrad=None, msg=None):
"""
computes Radon transform of image list
"""
t0 = time.time()
if msg is None and len(imagelist) > 50:
msg = True
elif msg is None:
msg = False
radonimagelist = []
if msg is True:
apDisplay.printMsg("Performing Radon transforms with one processor")
for imageid in range(len(imagelist)):
if msg is True and imageid % 50 == 0:
### FUTURE: add time estimate
sys.stderr.write(".")
image = imagelist[imageid]
radonimage = classicradon(image, stepsize)
radonimagelist.append(radonimage)
if msg is True:
sys.stderr.write("\n")
print "Classic Radon images complete in %s"%(apDisplay.timeString(time.time()-t0))
return radonimagelist
#=========================
#=========================
#=========================
def radonImage(image, imageid, stepsize, mask, queue):
"""
computes Radon transform of single image,
requires multiprocessing queue
"""
radonlist = []
nsteps = int(math.ceil(180/float(stepsize)))
maskline = mask.sum(axis=0) + 1
### rotate image and assemble radon image
for i in range(nsteps):
angle = -i*stepsize
rotated = ndimage.rotate(image, angle, reshape=False, order=1)
rotated = mask*rotated
line = rotated.sum(axis=0)
radonlist.append(line/maskline)
radon = numpy.array(radonlist)
### normalize standard deviation
radon = radon/radon.std()
### this does not work with shifting
#radonlr = numpy.fliplr(radon)
#radon = numpy.vstack((radon, radonlr))
queue.put([imageid, radon])
return
#=========================
def radonlist(imagelist, stepsize=2, maskrad=None, msg=None):
"""
computes Radon transform of image list
"""
if msg is None and len(imagelist) > 50:
msg = True
elif msg is None:
msg = False
### Note: multiprocessing version not compatible with python 2.4
from multiprocessing import Queue, Process
t0 = time.time()
### prepare mask
shape = imagelist[0].shape
if maskrad is None:
maskrad = shape[0]/2
blackcircle = imagefun.filled_circle(shape, maskrad)
mask = 1 - blackcircle
### preform radon transform for each image
queuelist = []
if msg is True:
apDisplay.printMsg("Performing Radon transforms with multiprocessor")
for imageid in range(len(imagelist)):
if msg is True and imageid % 50 == 0:
### FUTURE: add time estimate
sys.stderr.write(".")
image = imagelist[imageid]
queue = Queue()
queuelist.append(queue)
#below is equivalent to "radonImage(image, imageid, stepsize, mask, queue)"
proc = Process(target=radonImage, args=(image, imageid, stepsize, mask, queue))
proc.start()
proc.join()
### assemble radon image list
radonimagelist = range(len(imagelist))
for queue in queuelist:
imageid, radonimage = queue.get()
radonimagelist[imageid] = radonimage
if msg is True:
sys.stderr.write("\n")
print "Multi Radon images complete in %s"%(apDisplay.timeString(time.time()-t0))
return radonimagelist
#=========================
#=========================
if __name__ == "__main__":
t0 = time.time()
a = numpy.zeros((512,512))
a[128:256,256:384] = 1
a += numpy.random.random((512,512))
radon(a, 0.5)
radon2(a, 0.5)
print "Completed in %s"%(apDisplay.timeString(time.time() - t0))
| 28.019802 | 84 | 0.682155 | #!/usr/bin/env python
import sys
import time
import math
import numpy
from scipy import ndimage
from pyami import imagefun
from appionlib import apDisplay
from appionlib.apImage import imagefile
#=========================
def classicradon(image, stepsize=2):
"""
computes Radon transform of image
"""
radonlist = []
nsteps = int(math.ceil(180/stepsize))
blackcircle = imagefun.filled_circle(image.shape, image.shape[0]/2*0.75)
mask = 1 - blackcircle
maskline = mask.sum(axis=0) + 1
for i in range(nsteps):
rotated = ndimage.rotate(image, -i*stepsize, reshape=False, order=1)
rotated = mask*rotated
line = rotated.sum(axis=0)
radonlist.append(line/maskline)
radon = numpy.array(radonlist)
radon = radon/radon.std()
#radonlr = numpy.fliplr(radon)
#radon = numpy.vstack((radon, radonlr))
return radon
#=========================
def classicradonlist(imagelist, stepsize=2, maskrad=None, msg=None):
"""
computes Radon transform of image list
"""
t0 = time.time()
if msg is None and len(imagelist) > 50:
msg = True
elif msg is None:
msg = False
radonimagelist = []
if msg is True:
apDisplay.printMsg("Performing Radon transforms with one processor")
for imageid in range(len(imagelist)):
if msg is True and imageid % 50 == 0:
### FUTURE: add time estimate
sys.stderr.write(".")
image = imagelist[imageid]
radonimage = classicradon(image, stepsize)
radonimagelist.append(radonimage)
if msg is True:
sys.stderr.write("\n")
print "Classic Radon images complete in %s"%(apDisplay.timeString(time.time()-t0))
return radonimagelist
#=========================
def project(image, row, angle, mask, queue):
#print "%d, angle=%.3f"%(row, angle)
### prepare mask
if mask is None:
maskrad = image.shape[0]/2
blackcircle = imagefun.filled_circle(image.shape, maskrad)
mask = 1 - blackcircle
maskline = mask.sum(axis=0) + 1
### rotate and project image
rotated = ndimage.rotate(image, angle, reshape=False, order=1)
rotated = mask*rotated
#imagefile.arrayToJpeg(rotated, "rotated%02d.jpg"%(row))
line = rotated.sum(axis=0)
### insert into radon array
#print "insert %d, %.3f"%(row, line.mean())
line = line/maskline
queue.put([row, line])
return
#=========================
def radon(image, stepsize=2, maskrad=None):
from multiprocessing import Queue, Process
t0 = time.time()
### prepare mask
if maskrad is None:
maskrad = image.shape[0]/2
blackcircle = imagefun.filled_circle(image.shape, maskrad)
mask = 1 - blackcircle
nsteps = int(math.ceil(180/stepsize))
queuelist = []
for row in range(nsteps):
angle = -row*stepsize
queue = Queue()
queuelist.append(queue)
#below is equivalent to "project(image, row, angle, mask, queue)"
proc = Process(target=project, args=(image, row, angle, mask, queue))
proc.start()
proc.join()
### assemble radon image
radonimage = numpy.zeros( (nsteps, image.shape[0]) )
for queue in queuelist:
row, line = queue.get()
radonimage[row, :] = line
#radonlr = numpy.fliplr(radonimage)
#radonimage = numpy.vstack((radonimage, radonlr))
imagefile.arrayToJpeg(radonimage, "radonimage.jpg", msg=False)
print "Multi radon completed in %s"%(apDisplay.timeString(time.time() - t0))
return radonimage
#=========================
def radonImage(image, imageid, stepsize, mask, queue):
"""
computes Radon transform of single image,
requires multiprocessing queue
"""
radonlist = []
nsteps = int(math.ceil(180/float(stepsize)))
maskline = mask.sum(axis=0) + 1
### rotate image and assemble radon image
for i in range(nsteps):
angle = -i*stepsize
rotated = ndimage.rotate(image, angle, reshape=False, order=1)
rotated = mask*rotated
line = rotated.sum(axis=0)
radonlist.append(line/maskline)
radon = numpy.array(radonlist)
### normalize standard deviation
radon = radon/radon.std()
### this does not work with shifting
#radonlr = numpy.fliplr(radon)
#radon = numpy.vstack((radon, radonlr))
queue.put([imageid, radon])
return
#=========================
def radonlist(imagelist, stepsize=2, maskrad=None, msg=None):
"""
computes Radon transform of image list
"""
if msg is None and len(imagelist) > 50:
msg = True
elif msg is None:
msg = False
### Note: multiprocessing version not compatible with python 2.4
from multiprocessing import Queue, Process
t0 = time.time()
### prepare mask
shape = imagelist[0].shape
if maskrad is None:
maskrad = shape[0]/2
blackcircle = imagefun.filled_circle(shape, maskrad)
mask = 1 - blackcircle
### preform radon transform for each image
queuelist = []
if msg is True:
apDisplay.printMsg("Performing Radon transforms with multiprocessor")
for imageid in range(len(imagelist)):
if msg is True and imageid % 50 == 0:
### FUTURE: add time estimate
sys.stderr.write(".")
image = imagelist[imageid]
queue = Queue()
queuelist.append(queue)
#below is equivalent to "radonImage(image, imageid, stepsize, mask, queue)"
proc = Process(target=radonImage, args=(image, imageid, stepsize, mask, queue))
proc.start()
proc.join()
### assemble radon image list
radonimagelist = range(len(imagelist))
for queue in queuelist:
imageid, radonimage = queue.get()
radonimagelist[imageid] = radonimage
if msg is True:
sys.stderr.write("\n")
print "Multi Radon images complete in %s"%(apDisplay.timeString(time.time()-t0))
return radonimagelist
#=========================
#=========================
if __name__ == "__main__":
t0 = time.time()
a = numpy.zeros((512,512))
a[128:256,256:384] = 1
a += numpy.random.random((512,512))
radon(a, 0.5)
radon2(a, 0.5)
print "Completed in %s"%(apDisplay.timeString(time.time() - t0))
| 1,547 | 0 | 44 |
033d1509e978b1a962b5e1fddcf5d23c3c27e8e6 | 8,101 | py | Python | lagou/middlewares.py | JairusTse/lagou_spider | 37887bf8eb0fb80df4abd117dc5eb2f24b8a5312 | [
"MIT"
] | null | null | null | lagou/middlewares.py | JairusTse/lagou_spider | 37887bf8eb0fb80df4abd117dc5eb2f24b8a5312 | [
"MIT"
] | null | null | null | lagou/middlewares.py | JairusTse/lagou_spider | 37887bf8eb0fb80df4abd117dc5eb2f24b8a5312 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# ua_list = [
# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0',
# 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
# 'Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.2.15 Version/10.00'
# ]
#
# custom_settings = {
# "COOKIES_ENABLED": False,
# "DOWNLOAD_DELAY": 1,
# 'DEFAULT_REQUEST_HEADERS': {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'user_trace_token=20171015132411-12af3b52-3a51-466f-bfae-a98fc96b4f90; LGUID=20171015132412-13eaf40f-b169-11e7-960b-525400f775ce; SEARCH_ID=070e82cdbbc04cc8b97710c2c0159ce1; ab_test_random_num=0; X_HTTP_TOKEN=d1cf855aacf760c3965ee017e0d3eb96; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DsXIrWUxpNGLE2g_bKzlUCXPTRJMHxfCs6L20RqgCpUq%26wd%3D%26eqid%3Dee53adaf00026e940000000559e354cc; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=index_hotjob; login=false; unick=""; _putrc=""; JSESSIONID=ABAAABAAAFCAAEG50060B788C4EED616EB9D1BF30380575; _gat=1; _ga=GA1.2.471681568.1508045060; LGSID=20171015203008-94e1afa5-b1a4-11e7-9788-525400f775ce; LGRID=20171015204552-c792b887-b1a6-11e7-9788-525400f775ce',
# 'Host': 'www.lagou.com',
# 'Origin': 'https://www.lagou.com',
# 'Referer': 'https://www.lagou.com/',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
# }
# } | 56.65035 | 1,789 | 0.710283 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class LagouSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class LagouDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CookieMiddleware(object):
# ua_list = [
# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0',
# 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
# 'Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.2.15 Version/10.00'
# ]
#
# custom_settings = {
# "COOKIES_ENABLED": False,
# "DOWNLOAD_DELAY": 1,
# 'DEFAULT_REQUEST_HEADERS': {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'user_trace_token=20171015132411-12af3b52-3a51-466f-bfae-a98fc96b4f90; LGUID=20171015132412-13eaf40f-b169-11e7-960b-525400f775ce; SEARCH_ID=070e82cdbbc04cc8b97710c2c0159ce1; ab_test_random_num=0; X_HTTP_TOKEN=d1cf855aacf760c3965ee017e0d3eb96; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DsXIrWUxpNGLE2g_bKzlUCXPTRJMHxfCs6L20RqgCpUq%26wd%3D%26eqid%3Dee53adaf00026e940000000559e354cc; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=index_hotjob; login=false; unick=""; _putrc=""; JSESSIONID=ABAAABAAAFCAAEG50060B788C4EED616EB9D1BF30380575; _gat=1; _ga=GA1.2.471681568.1508045060; LGSID=20171015203008-94e1afa5-b1a4-11e7-9788-525400f775ce; LGRID=20171015204552-c792b887-b1a6-11e7-9788-525400f775ce',
# 'Host': 'www.lagou.com',
# 'Origin': 'https://www.lagou.com',
# 'Referer': 'https://www.lagou.com/',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
# }
# }
def process_request(self, request, spider):
request.headers['accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
request.headers['accept-encoding'] = 'gzip, deflate, br',
request.headers['accept-language'] = 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
request.headers['cache-control'] = 'max-age=0',
request.headers['cookie'] = 'JSESSIONID=ABAAAECABIEACCADB90A86A71E6DAD2ECF2B38727C77577; WEBTJ-ID=20200502161006-171d46f74bba8-021a03d728c4-153f6554-1440000-171d46f74bc406; user_trace_token=20200502161006-5f64092b-68ad-49bb-85a1-952defce0136; LGUID=20200502161006-24b8a27b-853e-42e1-9809-59c326101987; _ga=GA1.2.1074256822.1588407007; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1587110728; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22171d47186695ae-07036821372d5c-153f6554-1440000-171d471866ab1b%22%2C%22%24device_id%22%3A%22171d47186695ae-07036821372d5c-153f6554-1440000-171d471866ab1b%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; X_MIDDLE_TOKEN=02fc7f6174fa05f29534d0042e6e367e; _gid=GA1.2.1621370232.1589458129; RECOMMEND_TIP=true; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; privacyPolicyPopup=false; TG-TRACK-CODE=index_navigation; SEARCH_ID=3ba76d5b9b864e779f1d0273fb7905d9; index_location_city=%E5%85%A8%E5%9B%BD; LGSID=20200517011232-2aa8e8d6-da46-46b4-85fb-5cedd9d0786b; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fpassport.lagou.com%2Flogin%2Flogin.html%3Fmsg%3Dvalidation%26uStatus%3D2%26clientIp%3D101.104.53.73; gate_login_token=f1b9dbaba1d5b52e5a9da435b228976eea3cb5c1fae38c6329dae2ac4305d11b; _putrc=43F69FDFC6C99C97123F89F2B170EADC; login=true; unick=%E7%94%A8%E6%88%B78772; _gat=1; hasDeliver=0; X_HTTP_TOKEN=1a6f741dbfa808232059469851197b66cad4d9d31a; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1589649504; LGRID=20200517011822-317e4c36-5f46-4051-a7a9-3880a2af4d31',
request.headers['user-agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0',
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
return None | 5,083 | 695 | 150 |
970e618696198330d69062cee61245ffe5bebcc4 | 2,475 | py | Python | backintime/market_data_storage/market_data_storage.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | backintime/market_data_storage/market_data_storage.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | backintime/market_data_storage/market_data_storage.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | from ..candles_providers import CandlesProvider
from ..candle_properties import CandleProperties
from ..timeframes import Timeframes
from .timeframe_values import TimeframeValues
from .float_generator import FloatGenerator
class MarketDataStorage:
"""
Stores historical market data that was reserved by oscillators
It can be accessed by providing desired timeframe, property and size
"""
def get(
self,
timeframe: Timeframes,
property: CandleProperties,
max_size: int
) -> FloatGenerator:
"""
Return at most `max_size` of `property` values
of `timeframe` candles
:param timeframe:
buffer will be associated with this timeframe
:param property:
OHLCV property to store
:param size:
max size of buffer
"""
timeframe_values = self._timeframes_values[timeframe]
return timeframe_values.get(property, max_size)
def reserve(
self,
timeframe: Timeframes,
property: CandleProperties,
size: int
) -> None:
"""
Reserves buffer to store at most `size` of `property` values
of `timeframe` candles
If already has one, will be resized if needed
:param timeframe:
buffer will be associated with this timeframe
:param property:
OHLCV property to store
:param size:
max size of buffer
"""
if not timeframe in self._timeframes_values:
self._timeframes_values[timeframe] = TimeframeValues(timeframe, self._market_data)
timeframe_values = self._timeframes_values[timeframe]
if not property in timeframe_values:
timeframe_values.add_property_buffer(property, size)
property_buffer = timeframe_values.get_property_buffer(property)
if property_buffer.capacity() < size:
property_buffer.resize(size)
def update(self) -> None:
"""
Runs each time a new candle closes
Each value buffer will be updated by the new candle if needed
"""
for timeframe_values in self._timeframes_values.values():
timeframe_values.update()
| 33 | 95 | 0.621818 | from ..candles_providers import CandlesProvider
from ..candle_properties import CandleProperties
from ..timeframes import Timeframes
from .timeframe_values import TimeframeValues
from .float_generator import FloatGenerator
class MarketDataStorage:
"""
Stores historical market data that was reserved by oscillators
It can be accessed by providing desired timeframe, property and size
"""
def __init__(self, market_data: CandlesProvider):
self._market_data = market_data
self._timeframes_values = {}
def get(
self,
timeframe: Timeframes,
property: CandleProperties,
max_size: int
) -> FloatGenerator:
"""
Return at most `max_size` of `property` values
of `timeframe` candles
:param timeframe:
buffer will be associated with this timeframe
:param property:
OHLCV property to store
:param size:
max size of buffer
"""
timeframe_values = self._timeframes_values[timeframe]
return timeframe_values.get(property, max_size)
def reserve(
self,
timeframe: Timeframes,
property: CandleProperties,
size: int
) -> None:
"""
Reserves buffer to store at most `size` of `property` values
of `timeframe` candles
If already has one, will be resized if needed
:param timeframe:
buffer will be associated with this timeframe
:param property:
OHLCV property to store
:param size:
max size of buffer
"""
if not timeframe in self._timeframes_values:
self._timeframes_values[timeframe] = TimeframeValues(timeframe, self._market_data)
timeframe_values = self._timeframes_values[timeframe]
if not property in timeframe_values:
timeframe_values.add_property_buffer(property, size)
property_buffer = timeframe_values.get_property_buffer(property)
if property_buffer.capacity() < size:
property_buffer.resize(size)
def update(self) -> None:
"""
Runs each time a new candle closes
Each value buffer will be updated by the new candle if needed
"""
for timeframe_values in self._timeframes_values.values():
timeframe_values.update()
| 107 | 0 | 27 |
5c8c52a3029a1ee7c398f84a284a8491329cff3d | 763 | py | Python | django/tiantian/utils/usermiddleware.py | zhang15780/web_project | 820708ae68f4d1bc06cdde4a86e40a5457c11df8 | [
"Apache-2.0"
] | null | null | null | django/tiantian/utils/usermiddleware.py | zhang15780/web_project | 820708ae68f4d1bc06cdde4a86e40a5457c11df8 | [
"Apache-2.0"
] | null | null | null | django/tiantian/utils/usermiddleware.py | zhang15780/web_project | 820708ae68f4d1bc06cdde4a86e40a5457c11df8 | [
"Apache-2.0"
] | null | null | null | import datetime
from django.http import HttpResponseRedirect
from django.utils.deprecation import MiddlewareMixin
from users.models import UserSession
| 29.346154 | 64 | 0.593709 | import datetime
from django.http import HttpResponseRedirect
from django.utils.deprecation import MiddlewareMixin
from users.models import UserSession
class AuthMiddleware(MiddlewareMixin):
def process_request(self, request):
ticket = request.COOKIES.get('ticket')
if not ticket:
request.user = ''
else:
user = UserSession.objects.filter(ticket=ticket)
if user:
out_time = user[0].out_time.replace(tzinfo=None)
now_time = datetime.datetime.utcnow()
if now_time >= out_time:
user[0].delete()
return HttpResponseRedirect('/user/login/')
else:
request.user = user[0].user
| 542 | 17 | 49 |
5d74b09cb62947a39695f78253f454a7bc34429e | 5,816 | py | Python | pyscf/gto/ecp.py | mtreinish/pyscf | b3c86bc145c180230cb6aba81e9c47b5764aeec4 | [
"Apache-2.0"
] | 1 | 2021-01-24T13:35:42.000Z | 2021-01-24T13:35:42.000Z | pyscf/gto/ecp.py | holy0213/pyscf | aff8a94003cc47ff5e741ce648d877b008a0c59e | [
"Apache-2.0"
] | 36 | 2018-08-22T19:44:03.000Z | 2020-05-09T10:02:36.000Z | pyscf/gto/ecp.py | holy0213/pyscf | aff8a94003cc47ff5e741ce648d877b008a0c59e | [
"Apache-2.0"
] | 4 | 2018-02-14T16:28:28.000Z | 2019-08-12T16:40:30.000Z | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Effective core potential (ECP)
This module exposes some ecp integration functions from the C implementation.
Reference for ecp integral computation
* Analytical integration
J. Chem. Phys. 65, 3826
J. Chem. Phys. 111, 8778
J. Comput. Phys. 44, 289
* Numerical integration
J. Comput. Chem. 27, 1009
Chem. Phys. Lett. 296, 445
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.gto import moleintor
libecp = moleintor.libcgto
libecp.ECPscalar_cache_size.restype = ctypes.c_int
AS_ECPBAS_OFFSET= 18
AS_NECPBAS = 19
def so_by_shell(mol, shls):
'''Spin-orbit coupling ECP in spinor basis
i/2 <Pauli_matrix dot l U(r)>
'''
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
di = (li*4+2) * mol.bas_nctr(shls[0])
dj = (lj*4+2) * mol.bas_nctr(shls[1])
bas = numpy.vstack((mol._bas, mol._ecpbas))
mol._env[AS_ECPBAS_OFFSET] = len(mol._bas)
mol._env[AS_NECPBAS] = len(mol._ecpbas)
buf = numpy.empty((di,dj), order='F', dtype=numpy.complex128)
cache = numpy.empty(buf.size*48)
fn = libecp.ECPso_spinor
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(di, dj),
(ctypes.c_int*2)(*shls),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='''
Cu 0. 0. 0.
H 0. 0. -1.56
H 0. 0. 1.56
''',
basis={'Cu':'lanl2dz', 'H':'sto3g'},
ecp = {'cu':'lanl2dz'},
#basis={'Cu':'crenbs', 'H':'sto3g'},
#ecp = {'cu':'crenbs'},
charge=-1,
verbose=4)
mf = scf.RHF(mol)
print(mf.kernel(), -196.09477546034623)
mol = gto.M(atom='''
Na 0. 0. 0.
H 0. 0. 1.
''',
basis={'Na':'lanl2dz', 'H':'sto3g'},
ecp = {'Na':'lanl2dz'},
verbose=0)
mf = scf.RHF(mol)
print(mf.kernel(), -0.45002315562861461)
| 33.234286 | 88 | 0.627407 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Effective core potential (ECP)
This module exposes some ecp integration functions from the C implementation.
Reference for ecp integral computation
* Analytical integration
J. Chem. Phys. 65, 3826
J. Chem. Phys. 111, 8778
J. Comput. Phys. 44, 289
* Numerical integration
J. Comput. Chem. 27, 1009
Chem. Phys. Lett. 296, 445
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.gto import moleintor
libecp = moleintor.libcgto
libecp.ECPscalar_cache_size.restype = ctypes.c_int
def type1_by_shell(mol, shls, cart=False):
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
if cart:
fn = libecp.ECPtype1_cart
di = (li+1)*(li+2)//2 * mol.bas_nctr(shls[0])
dj = (lj+1)*(lj+2)//2 * mol.bas_nctr(shls[1])
else:
fn = libecp.ECPtype1_sph
di = (li*2+1) * mol.bas_nctr(shls[0])
dj = (lj*2+1) * mol.bas_nctr(shls[1])
cache_size = libecp.ECPscalar_cache_size(
ctypes.c_int(1), (ctypes.c_int*2)(*shls),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p))
cache = numpy.empty(cache_size)
buf = numpy.empty((di,dj), order='F')
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
def type2_by_shell(mol, shls, cart=False):
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
if cart:
fn = libecp.ECPtype2_cart
di = (li+1)*(li+2)//2 * mol.bas_nctr(shls[0])
dj = (lj+1)*(lj+2)//2 * mol.bas_nctr(shls[1])
else:
fn = libecp.ECPtype2_sph
di = (li*2+1) * mol.bas_nctr(shls[0])
dj = (lj*2+1) * mol.bas_nctr(shls[1])
cache_size = libecp.ECPscalar_cache_size(
ctypes.c_int(1), (ctypes.c_int*2)(*shls),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p))
cache = numpy.empty(cache_size)
buf = numpy.empty((di,dj), order='F')
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
AS_ECPBAS_OFFSET= 18
AS_NECPBAS = 19
def so_by_shell(mol, shls):
'''Spin-orbit coupling ECP in spinor basis
i/2 <Pauli_matrix dot l U(r)>
'''
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
di = (li*4+2) * mol.bas_nctr(shls[0])
dj = (lj*4+2) * mol.bas_nctr(shls[1])
bas = numpy.vstack((mol._bas, mol._ecpbas))
mol._env[AS_ECPBAS_OFFSET] = len(mol._bas)
mol._env[AS_NECPBAS] = len(mol._ecpbas)
buf = numpy.empty((di,dj), order='F', dtype=numpy.complex128)
cache = numpy.empty(buf.size*48)
fn = libecp.ECPso_spinor
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(di, dj),
(ctypes.c_int*2)(*shls),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
def core_configuration(nelec_core):
conf_dic = {
0 : '0s0p0d0f',
2 : '1s0p0d0f',
10: '2s1p0d0f',
18: '3s2p0d0f',
28: '3s2p1d0f',
36: '4s3p1d0f',
46: '4s3p2d0f',
54: '5s4p2d0f',
60: '4s3p2d1f',
68: '5s4p2d1f',
78: '5s4p3d1f',
92: '5s4p3d2f',
}
if nelec_core not in conf_dic:
raise RuntimeError('Core configuration for %d core electrons is not available.')
coreshell = [int(x) for x in conf_dic[nelec_core][::2]]
return coreshell
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='''
Cu 0. 0. 0.
H 0. 0. -1.56
H 0. 0. 1.56
''',
basis={'Cu':'lanl2dz', 'H':'sto3g'},
ecp = {'cu':'lanl2dz'},
#basis={'Cu':'crenbs', 'H':'sto3g'},
#ecp = {'cu':'crenbs'},
charge=-1,
verbose=4)
mf = scf.RHF(mol)
print(mf.kernel(), -196.09477546034623)
mol = gto.M(atom='''
Na 0. 0. 0.
H 0. 0. 1.
''',
basis={'Na':'lanl2dz', 'H':'sto3g'},
ecp = {'Na':'lanl2dz'},
verbose=0)
mf = scf.RHF(mol)
print(mf.kernel(), -0.45002315562861461)
| 2,916 | 0 | 69 |
f73abc1e615309be1749df31916bc425ccea0619 | 352 | py | Python | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 11 | 2015-10-08T07:19:05.000Z | 2020-05-27T12:10:31.000Z | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 7 | 2016-05-25T21:37:28.000Z | 2018-10-03T09:37:31.000Z | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 4 | 2018-11-20T01:07:05.000Z | 2020-01-12T11:36:14.000Z |
from setuptools import setup, find_packages
config = {
'description':'fingerid-package',
'author':'Huibin Shen',
'url':'project https://github.com/icdishb/fingerid',
'author_email':'huibin.shen@aalto.fi',
'version':'1.4',
'install_requires':['nose'],
'packages':find_packages(),
'name':'fingerid',
}
setup(**config)
| 20.705882 | 56 | 0.644886 |
from setuptools import setup, find_packages
config = {
'description':'fingerid-package',
'author':'Huibin Shen',
'url':'project https://github.com/icdishb/fingerid',
'author_email':'huibin.shen@aalto.fi',
'version':'1.4',
'install_requires':['nose'],
'packages':find_packages(),
'name':'fingerid',
}
setup(**config)
| 0 | 0 | 0 |
f7900d5a9d979ce44979ab77dce6b668305c892e | 10,251 | py | Python | brainspace/mesh/mesh_operations.py | josemariamoreira/BrainSpace | d7e8e65c6463a81146e7fcfcca902feef04d329d | [
"BSD-3-Clause"
] | null | null | null | brainspace/mesh/mesh_operations.py | josemariamoreira/BrainSpace | d7e8e65c6463a81146e7fcfcca902feef04d329d | [
"BSD-3-Clause"
] | null | null | null | brainspace/mesh/mesh_operations.py | josemariamoreira/BrainSpace | d7e8e65c6463a81146e7fcfcca902feef04d329d | [
"BSD-3-Clause"
] | null | null | null | """
Basic functions on surface meshes.
"""
# Author: Oualid Benkarim <oualid.benkarim@mcgill.ca>
# License: BSD 3 clause
import warnings
import numpy as np
from vtk import (vtkDataObject, vtkThreshold, vtkGeometryFilter,
vtkAppendPolyData)
from .array_operations import get_connected_components
from ..vtk_interface import wrap_vtk, serial_connect, get_output
from ..vtk_interface.pipeline import connect
from ..vtk_interface.decorators import wrap_input
ASSOC_CELLS = vtkDataObject.FIELD_ASSOCIATION_CELLS
ASSOC_POINTS = vtkDataObject.FIELD_ASSOCIATION_POINTS
@wrap_input(0)
def _surface_selection(surf, array_name, low=-np.inf, upp=np.inf,
use_cell=False, keep=True):
"""Selection of points or cells meeting some thresholding criteria.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or ndarray
Array used to perform selection.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is +np.inf.
use_cell : bool, optional
If True, apply selection to cells. Otherwise, use points.
Default is False.
keep : bool, optional
If True, elements within the thresholds (inclusive) are kept.
Otherwise, are discarded. Default is True.
Returns
-------
surf_selected : BSPolyData
Surface after thresholding.
"""
if low > upp:
raise ValueError('Threshold limits are not valid: {0} -- {1}'.
format(low, upp))
at = 'c' if use_cell else 'p'
if isinstance(array_name, np.ndarray):
drop_array = True
array = array_name
array_name = surf.append_array(array, at=at)
else:
drop_array = False
array = surf.get_array(name=array_name, at=at, return_name=False)
if array.ndim > 1:
raise ValueError('Arrays has more than one dimension.')
if low == -np.inf:
low = array.min()
if upp == np.inf:
upp = array.max()
if keep is False:
raise ValueError("Don't support 'keep=False'.")
# tf = wrap_vtk(vtkThreshold, invert=not keep)
tf = wrap_vtk(vtkThreshold)
tf.ThresholdBetween(low, upp)
if use_cell:
tf.SetInputArrayToProcess(0, 0, 0, ASSOC_CELLS, array_name)
else:
tf.SetInputArrayToProcess(0, 0, 0, ASSOC_POINTS, array_name)
gf = wrap_vtk(vtkGeometryFilter(), merging=False)
surf_sel = serial_connect(surf, tf, gf)
# Check results
mask = np.logical_and(array >= low, array <= upp)
if keep:
n_expected = np.count_nonzero(mask)
else:
n_expected = np.count_nonzero(~mask)
n_sel = surf_sel.n_cells if use_cell else surf_sel.n_points
if n_expected != n_sel:
element = 'cells' if use_cell else 'points'
warnings.warn('The number of selected {0} is different than expected. '
'This may be due to the topology after after selection: '
'expected={1}, selected={2}.'.
format(element, n_expected, n_sel))
if drop_array:
surf.remove_array(name=array_name, at=at)
surf_sel.remove_array(name=array_name, at=at)
return surf_sel
@wrap_input(0)
def _surface_mask(surf, mask, use_cell=False):
"""Selection fo points or cells meeting some criteria.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : str or ndarray
Binary boolean or integer array. Zero or False elements are
discarded.
use_cell : bool, optional
If True, apply selection to cells. Otherwise, use points.
Default is False.
Returns
-------
surf_masked : BSPolyData
PolyData after masking.
"""
if isinstance(mask, np.ndarray):
if np.issubdtype(mask.dtype, np.bool_):
mask = mask.astype(np.uint8)
else:
mask = surf.get_array(name=mask, at='c' if use_cell else 'p')
if np.any(np.unique(mask) > 1):
raise ValueError('Cannot work with non-binary mask.')
return _surface_selection(surf, mask, low=1, upp=1, use_cell=use_cell,
keep=True)
def drop_points(surf, array_name, low=-np.inf, upp=np.inf):
"""Remove surface points whose values fall within the threshold.
Cells corresponding to these points are also removed.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the PointData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after thresholding.
See Also
--------
:func:`drop_cells`
:func:`select_points`
:func:`mask_points`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, keep=False)
def drop_cells(surf, array_name, low=-np.inf, upp=np.inf):
"""Remove surface cells whose values fall within the threshold.
Points corresponding to these cells are also removed.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the CellData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after thresholding.
See Also
--------
:func:`drop_points`
:func:`select_cells`
:func:`mask_cells`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,
keep=False)
def select_points(surf, array_name, low=-np.inf, upp=np.inf):
"""Select surface points whose values fall within the threshold.
Cells corresponding to these points are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the PointData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after selection.
See Also
--------
:func:`select_cells`
:func:`drop_points`
:func:`mask_points`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, keep=True)
def select_cells(surf, array_name, low=-np.inf, upp=np.inf):
"""Select surface cells whose values fall within the threshold.
Points corresponding to these cells are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the CellData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after selection.
See Also
--------
:func:`select_points`
:func:`drop_cells`
:func:`mask_cells`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,
keep=True)
def mask_points(surf, mask):
"""Mask surface points.
Cells corresponding to these points are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : 1D ndarray
Binary boolean array. Zero elements are discarded.
Returns
-------
surf_masked : vtkPolyData or BSPolyData
PolyData after masking.
See Also
--------
:func:`mask_cells`
:func:`drop_points`
:func:`select_points`
"""
return _surface_mask(surf, mask)
def mask_cells(surf, mask):
"""Mask surface cells.
Points corresponding to these cells are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : 1D ndarray
Binary boolean array. Zero elements are discarded.
Returns
-------
surf_masked : vtkPolyData or BSPolyData
PolyData after masking.
See Also
--------
:func:`mask_points`
:func:`drop_cells`
:func:`select_cells`
"""
return _surface_mask(surf, mask, use_cell=True)
def combine_surfaces(*surfs):
""" Combine surfaces.
Parameters
----------
surfs : sequence of vtkPolyData and/or BSPolyData
Input surfaces.
Returns
-------
res : BSPolyData
Combination of input surfaces.
See Also
--------
:func:`split_surface`
"""
alg = vtkAppendPolyData()
for s in surfs:
alg = connect(s, alg, add_conn=True)
return get_output(alg)
@wrap_input(0)
def split_surface(surf, labeling=None):
""" Split surface according to the labeling.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
labeling : str, 1D ndarray or None, optional
Array used to perform the splitting. If str, it must be an array in
the PointData attributes of `surf`. If None, split surface in its
connected components. Default is None.
Returns
-------
res : dict[int, BSPolyData]
Dictionary of sub-surfaces for each label.
See Also
--------
:func:`combine_surfaces`
:func:`mask_points`
"""
if labeling is None:
labeling = get_connected_components(surf)
elif isinstance(labeling, str):
labeling = surf.get_array(labeling, at='p')
ulab = np.unique(labeling)
return {l: mask_points(surf, labeling == l) for l in ulab}
| 26.083969 | 80 | 0.63067 | """
Basic functions on surface meshes.
"""
# Author: Oualid Benkarim <oualid.benkarim@mcgill.ca>
# License: BSD 3 clause
import warnings
import numpy as np
from vtk import (vtkDataObject, vtkThreshold, vtkGeometryFilter,
vtkAppendPolyData)
from .array_operations import get_connected_components
from ..vtk_interface import wrap_vtk, serial_connect, get_output
from ..vtk_interface.pipeline import connect
from ..vtk_interface.decorators import wrap_input
ASSOC_CELLS = vtkDataObject.FIELD_ASSOCIATION_CELLS
ASSOC_POINTS = vtkDataObject.FIELD_ASSOCIATION_POINTS
@wrap_input(0)
def _surface_selection(surf, array_name, low=-np.inf, upp=np.inf,
use_cell=False, keep=True):
"""Selection of points or cells meeting some thresholding criteria.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or ndarray
Array used to perform selection.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is +np.inf.
use_cell : bool, optional
If True, apply selection to cells. Otherwise, use points.
Default is False.
keep : bool, optional
If True, elements within the thresholds (inclusive) are kept.
Otherwise, are discarded. Default is True.
Returns
-------
surf_selected : BSPolyData
Surface after thresholding.
"""
if low > upp:
raise ValueError('Threshold limits are not valid: {0} -- {1}'.
format(low, upp))
at = 'c' if use_cell else 'p'
if isinstance(array_name, np.ndarray):
drop_array = True
array = array_name
array_name = surf.append_array(array, at=at)
else:
drop_array = False
array = surf.get_array(name=array_name, at=at, return_name=False)
if array.ndim > 1:
raise ValueError('Arrays has more than one dimension.')
if low == -np.inf:
low = array.min()
if upp == np.inf:
upp = array.max()
if keep is False:
raise ValueError("Don't support 'keep=False'.")
# tf = wrap_vtk(vtkThreshold, invert=not keep)
tf = wrap_vtk(vtkThreshold)
tf.ThresholdBetween(low, upp)
if use_cell:
tf.SetInputArrayToProcess(0, 0, 0, ASSOC_CELLS, array_name)
else:
tf.SetInputArrayToProcess(0, 0, 0, ASSOC_POINTS, array_name)
gf = wrap_vtk(vtkGeometryFilter(), merging=False)
surf_sel = serial_connect(surf, tf, gf)
# Check results
mask = np.logical_and(array >= low, array <= upp)
if keep:
n_expected = np.count_nonzero(mask)
else:
n_expected = np.count_nonzero(~mask)
n_sel = surf_sel.n_cells if use_cell else surf_sel.n_points
if n_expected != n_sel:
element = 'cells' if use_cell else 'points'
warnings.warn('The number of selected {0} is different than expected. '
'This may be due to the topology after after selection: '
'expected={1}, selected={2}.'.
format(element, n_expected, n_sel))
if drop_array:
surf.remove_array(name=array_name, at=at)
surf_sel.remove_array(name=array_name, at=at)
return surf_sel
@wrap_input(0)
def _surface_mask(surf, mask, use_cell=False):
"""Selection fo points or cells meeting some criteria.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : str or ndarray
Binary boolean or integer array. Zero or False elements are
discarded.
use_cell : bool, optional
If True, apply selection to cells. Otherwise, use points.
Default is False.
Returns
-------
surf_masked : BSPolyData
PolyData after masking.
"""
if isinstance(mask, np.ndarray):
if np.issubdtype(mask.dtype, np.bool_):
mask = mask.astype(np.uint8)
else:
mask = surf.get_array(name=mask, at='c' if use_cell else 'p')
if np.any(np.unique(mask) > 1):
raise ValueError('Cannot work with non-binary mask.')
return _surface_selection(surf, mask, low=1, upp=1, use_cell=use_cell,
keep=True)
def drop_points(surf, array_name, low=-np.inf, upp=np.inf):
"""Remove surface points whose values fall within the threshold.
Cells corresponding to these points are also removed.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the PointData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after thresholding.
See Also
--------
:func:`drop_cells`
:func:`select_points`
:func:`mask_points`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, keep=False)
def drop_cells(surf, array_name, low=-np.inf, upp=np.inf):
"""Remove surface cells whose values fall within the threshold.
Points corresponding to these cells are also removed.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the CellData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after thresholding.
See Also
--------
:func:`drop_points`
:func:`select_cells`
:func:`mask_cells`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,
keep=False)
def select_points(surf, array_name, low=-np.inf, upp=np.inf):
"""Select surface points whose values fall within the threshold.
Cells corresponding to these points are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the PointData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after selection.
See Also
--------
:func:`select_cells`
:func:`drop_points`
:func:`mask_points`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, keep=True)
def select_cells(surf, array_name, low=-np.inf, upp=np.inf):
"""Select surface cells whose values fall within the threshold.
Points corresponding to these cells are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
array_name : str or 1D ndarray
Array used to perform selection. If str, it must be an array in
the CellData attributes of the PolyData.
low : float or -np.inf
Lower threshold. Default is -np.inf.
upp : float or np.inf
Upper threshold. Default is np.inf.
Returns
-------
surf_selected : vtkPolyData or BSPolyData
PolyData after selection.
See Also
--------
:func:`select_points`
:func:`drop_cells`
:func:`mask_cells`
"""
return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,
keep=True)
def mask_points(surf, mask):
"""Mask surface points.
Cells corresponding to these points are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : 1D ndarray
Binary boolean array. Zero elements are discarded.
Returns
-------
surf_masked : vtkPolyData or BSPolyData
PolyData after masking.
See Also
--------
:func:`mask_cells`
:func:`drop_points`
:func:`select_points`
"""
return _surface_mask(surf, mask)
def mask_cells(surf, mask):
"""Mask surface cells.
Points corresponding to these cells are also kept.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
mask : 1D ndarray
Binary boolean array. Zero elements are discarded.
Returns
-------
surf_masked : vtkPolyData or BSPolyData
PolyData after masking.
See Also
--------
:func:`mask_points`
:func:`drop_cells`
:func:`select_cells`
"""
return _surface_mask(surf, mask, use_cell=True)
def combine_surfaces(*surfs):
""" Combine surfaces.
Parameters
----------
surfs : sequence of vtkPolyData and/or BSPolyData
Input surfaces.
Returns
-------
res : BSPolyData
Combination of input surfaces.
See Also
--------
:func:`split_surface`
"""
alg = vtkAppendPolyData()
for s in surfs:
alg = connect(s, alg, add_conn=True)
return get_output(alg)
@wrap_input(0)
def split_surface(surf, labeling=None):
""" Split surface according to the labeling.
Parameters
----------
surf : vtkPolyData or BSPolyData
Input surface.
labeling : str, 1D ndarray or None, optional
Array used to perform the splitting. If str, it must be an array in
the PointData attributes of `surf`. If None, split surface in its
connected components. Default is None.
Returns
-------
res : dict[int, BSPolyData]
Dictionary of sub-surfaces for each label.
See Also
--------
:func:`combine_surfaces`
:func:`mask_points`
"""
if labeling is None:
labeling = get_connected_components(surf)
elif isinstance(labeling, str):
labeling = surf.get_array(labeling, at='p')
ulab = np.unique(labeling)
return {l: mask_points(surf, labeling == l) for l in ulab}
| 0 | 0 | 0 |
6e6c4bb0d8e1c54ef117d7ffe34d100e86242340 | 6,117 | py | Python | netdata/importer/protocol_graph.py | mincode/netdata | 4369a3bfb473509eff92083e03f214d5b75f6074 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | netdata/importer/protocol_graph.py | mincode/netdata | 4369a3bfb473509eff92083e03f214d5b75f6074 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | netdata/importer/protocol_graph.py | mincode/netdata | 4369a3bfb473509eff92083e03f214d5b75f6074 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import psycopg2
import networkx
import logging
import math
from datetime import datetime, timezone
def log_info(msg):
"""
Print info log message.
:params msg: message text.
"""
logging.info(' ' + msg)
def log_debug(msg):
"""
Print debug log message.
:params msg: message text.
"""
logging.debug(msg)
def add_record(graph, record):
"""
Add one record to a graph.
:params graph: networkx graph.
:params record: record for an edge.
:return: graph, as modified in place.
"""
graph.add_edge(record.sip, record.dip,
attr_dict={'sport': record.sport, 'dport': record.dport,
'stime_epoch_secs': record.stime_epoch_secs,
'etime_epoch_secs': record.etime_epoch_secs})
return graph
def start_end_epoch(graph):
"""
Start and end epoch of graph.
:return: (start epoch, end epoch).
"""
start = 0
end = 0
for e in graph.edges_iter():
for _, p in graph[e[0]][e[1]].items():
end = max(end, p['etime_epoch_secs'])
if start == 0:
start = p['stime_epoch_secs']
else:
start = min(start, p['stime_epoch_secs'])
return (start, end)
def epoch_utc(s):
"""
Convert epoch seconds to utc DateTime object.
:params s: epoch seconds.
:return: corresponding DateTime object.
"""
return datetime.fromtimestamp(s, timezone.utc)
class _Frame_Iter:
"""
Iterator over frames in the protocol graph database.
"""
_db = None # ProtocolGraph
_iter_start = 0 # start epoch for iterating
_iter_end = -1 # end epoch of time for frames
_iter_frame_secs = 0 # frame length in seconds for iterating over frames
_iter_index = 0 # index of the iterator
_iter_finished = False # indicates whether iterator is finished
def __init__(self, db, name, seconds, start, minutes):
"""
Create iterator with start time over given frame length;
:param db: ProtocolGraph database.
:param name: name of the frame
:param seconds: seconds for the frame;
:param start: start epoch of iterator; start of dataset if < 0.
:param minutes: minutes for the frame;
whole dataset length if minutes and seconds are 0.
:return: iterator.
"""
self._db = db
frame = self._db.frame(name)
if not frame:
self._iter_finished = True
else:
self._iter_finished = False
(_, start_epoch, end_epoch, sink_port) = frame
self._iter_end = end_epoch
if start >= 0:
self._iter_start = start
else:
self._iter_start = start_epoch
if minutes == 0 and seconds == 0:
self._iter_frame_secs = math.ceil(
self._iter_end - self._iter_start)
# print('iter_frame_secs: {}'.format(self._iter_frame_secs))
else:
self._iter_frame_secs = minutes * 60 + seconds
class ProtocolGraph:
"""
Protocol graph obtained from a database with edges.
Typical usage:
db = ProtocolGraph(flow_connection)
g = db.fetch_all()
or
for g in db.iter(sim_name, frame_secs)
process g
"""
_database = None # connection to a database
def __init__(self, flow_connection):
"""
Initialize with connection.
:param flow_connection: FlowConnection.
"""
self._database = flow_connection
self._database.open()
def __del__(self):
"""
Close database if appropriate.
"""
if self._database:
self._database.close()
def fetch_all(self):
"""
Fetch the whole protocol graph.
:return: networkx.MultiDiGraph.
"""
with self._database:
g = networkx.MultiDiGraph()
with self._database.cursor() as cur:
cur.execute("select * from edges;")
for rec in cur:
add_record(g, rec)
return g
def fetch_frame(self, start, minutes=0, seconds=0):
"""
Fetch graph from one frame; include streams that start in the frame.
:param start: epoch start time.
:param minutes: minutes for the frame.
:param seconds: seconds for the frame.
:return: graph.
"""
total_secs = minutes * 60 + seconds
end = start + total_secs
with self._database:
g = networkx.MultiDiGraph()
with self._database.cursor() as cur:
cur.execute('select * from edges where \
(%s<=stime_epoch_secs and stime_epoch_secs<%s);',
(start, end))
for rec in cur:
add_record(g, rec)
return g
def iter(self, name, seconds=0, start=-1, minutes=0):
"""
Create iterator with start time over given frame length;
:param name: name of the frame
:param seconds: seconds for the frame;
:param start: start epoch of iterator; start of dataset if < 0.
:param minutes: minutes for the frame;
whole dataset length if minutes and seconds are 0.
:return: iterator.
"""
return _Frame_Iter(self, name, seconds=seconds, start=start,
minutes=minutes)
| 30.738693 | 79 | 0.571849 | import psycopg2
import networkx
import logging
import math
from datetime import datetime, timezone
def log_info(msg):
"""
Print info log message.
:params msg: message text.
"""
logging.info(' ' + msg)
def log_debug(msg):
"""
Print debug log message.
:params msg: message text.
"""
logging.debug(msg)
def add_record(graph, record):
"""
Add one record to a graph.
:params graph: networkx graph.
:params record: record for an edge.
:return: graph, as modified in place.
"""
graph.add_edge(record.sip, record.dip,
attr_dict={'sport': record.sport, 'dport': record.dport,
'stime_epoch_secs': record.stime_epoch_secs,
'etime_epoch_secs': record.etime_epoch_secs})
return graph
def start_end_epoch(graph):
"""
Start and end epoch of graph.
:return: (start epoch, end epoch).
"""
start = 0
end = 0
for e in graph.edges_iter():
for _, p in graph[e[0]][e[1]].items():
end = max(end, p['etime_epoch_secs'])
if start == 0:
start = p['stime_epoch_secs']
else:
start = min(start, p['stime_epoch_secs'])
return (start, end)
def epoch_utc(s):
"""
Convert epoch seconds to utc DateTime object.
:params s: epoch seconds.
:return: corresponding DateTime object.
"""
return datetime.fromtimestamp(s, timezone.utc)
class _Frame_Iter:
"""
Iterator over frames in the protocol graph database.
"""
_db = None # ProtocolGraph
_iter_start = 0 # start epoch for iterating
_iter_end = -1 # end epoch of time for frames
_iter_frame_secs = 0 # frame length in seconds for iterating over frames
_iter_index = 0 # index of the iterator
_iter_finished = False # indicates whether iterator is finished
def __init__(self, db, name, seconds, start, minutes):
"""
Create iterator with start time over given frame length;
:param db: ProtocolGraph database.
:param name: name of the frame
:param seconds: seconds for the frame;
:param start: start epoch of iterator; start of dataset if < 0.
:param minutes: minutes for the frame;
whole dataset length if minutes and seconds are 0.
:return: iterator.
"""
self._db = db
frame = self._db.frame(name)
if not frame:
self._iter_finished = True
else:
self._iter_finished = False
(_, start_epoch, end_epoch, sink_port) = frame
self._iter_end = end_epoch
if start >= 0:
self._iter_start = start
else:
self._iter_start = start_epoch
if minutes == 0 and seconds == 0:
self._iter_frame_secs = math.ceil(
self._iter_end - self._iter_start)
# print('iter_frame_secs: {}'.format(self._iter_frame_secs))
else:
self._iter_frame_secs = minutes * 60 + seconds
def __iter__(self):
return self
def __next__(self):
if not self._iter_finished:
start = self._iter_start + self._iter_index * self._iter_frame_secs
if start <= self._iter_end:
# print('Fetch frame at {} with {} secs'.format(
# str(epoch_utc(start)), self._iter_frame_secs))
g = self._db.fetch_frame(start, seconds=self._iter_frame_secs)
self._iter_index += 1
return g
else:
self._iter_finished = True
raise StopIteration
else:
raise StopIteration
class ProtocolGraph:
"""
Protocol graph obtained from a database with edges.
Typical usage:
db = ProtocolGraph(flow_connection)
g = db.fetch_all()
or
for g in db.iter(sim_name, frame_secs)
process g
"""
_database = None # connection to a database
def __init__(self, flow_connection):
"""
Initialize with connection.
:param flow_connection: FlowConnection.
"""
self._database = flow_connection
self._database.open()
def __del__(self):
"""
Close database if appropriate.
"""
if self._database:
self._database.close()
def fetch_all(self):
"""
Fetch the whole protocol graph.
:return: networkx.MultiDiGraph.
"""
with self._database:
g = networkx.MultiDiGraph()
with self._database.cursor() as cur:
cur.execute("select * from edges;")
for rec in cur:
add_record(g, rec)
return g
def fetch_frame(self, start, minutes=0, seconds=0):
"""
Fetch graph from one frame; include streams that start in the frame.
:param start: epoch start time.
:param minutes: minutes for the frame.
:param seconds: seconds for the frame.
:return: graph.
"""
total_secs = minutes * 60 + seconds
end = start + total_secs
with self._database:
g = networkx.MultiDiGraph()
with self._database.cursor() as cur:
cur.execute('select * from edges where \
(%s<=stime_epoch_secs and stime_epoch_secs<%s);',
(start, end))
for rec in cur:
add_record(g, rec)
return g
def iter(self, name, seconds=0, start=-1, minutes=0):
"""
Create iterator with start time over given frame length;
:param name: name of the frame
:param seconds: seconds for the frame;
:param start: start epoch of iterator; start of dataset if < 0.
:param minutes: minutes for the frame;
whole dataset length if minutes and seconds are 0.
:return: iterator.
"""
return _Frame_Iter(self, name, seconds=seconds, start=start,
minutes=minutes)
| 590 | 0 | 54 |
151570e8807be229fb2172ad445b66e7ea21f516 | 7,410 | py | Python | markov_pilot/wrappers/varySetpointsWrapper.py | opt12/gym-jsbsim-eee | fa61d0d4679fd65b5736fc562fe268714b4e08d8 | [
"MIT"
] | 7 | 2020-11-10T07:33:40.000Z | 2021-06-23T07:25:43.000Z | markov_pilot/wrappers/varySetpointsWrapper.py | opt12/gym-jsbsim-eee | fa61d0d4679fd65b5736fc562fe268714b4e08d8 | [
"MIT"
] | null | null | null | markov_pilot/wrappers/varySetpointsWrapper.py | opt12/gym-jsbsim-eee | fa61d0d4679fd65b5736fc562fe268714b4e08d8 | [
"MIT"
] | 5 | 2020-07-12T00:10:59.000Z | 2021-06-22T09:13:13.000Z | #!/usr/bin/env python3
import sys
sys.path.append(r'/home/felix/git/gym-jsbsim-eee/') #TODO: Is this a good idea? Dunno! It works!
import gym
import numpy as np
import math
import random
import markov_pilot.environment.properties as prp
from abc import ABC, abstractmethod
from markov_pilot.environment.properties import BoundedProperty
from typing import Tuple, List
class VarySetpointsWrapper(gym.Wrapper):
"""
A wrapper to vary the setpoints at the beginning of each episode
This can be used during training to have bigger variance in the training data
"""
class SetpointVariator(ABC):
"""
A helper that can vary a setpoint between two extreme values following a specific pattern
"""
@abstractmethod
def vary(self):
''' outputs the setpoint for the next step or None if there is nothing to do'''
...
@abstractmethod
def start_variation(self):
''' starts the setpoint variation for the first time in the upcoming interval
:return: the first setpoint to be passed to the env'''
...
def __init__(self, env, setpoint_property: BoundedProperty,
setpoint_range: Tuple[float, float],
interval_length: Tuple[float, float] = (5., 120.),
ramp_time:Tuple[float, float] = (0,0), sine_frequ: Tuple[float, float] = (0,0),
initial_conditions: List[Tuple] = []):
"""
:param setpoint_property: The property which describes the setpoint.
:param setpoint_range: the range the setpoint may be chosen from (min, max)
:param interval_length: the time in seconds for the interval till the next change
:param ramp_time: the time, a ramp may last from current setpoint to target setpoint; (0, 0) disables ramps
:param sine_frequ: the frequqcy range from which sine modulation may be chosen; (0,0) disables sine modulation
:param initial_conditions: TODO: specify the initial conditions that may be varied and their ranges.
"""
self.env = env
self.setpoint_property = setpoint_property
self.setpoint_range = setpoint_range
self.interval_length = interval_length
self.ramp_time = ramp_time
self.sine_frequ = sine_frequ
self.initial_conditions = initial_conditions
#don't restore the VarySetpoints wrapper automatically
# #append the restore data
# self.env_init_dicts.append({
# 'setpoint_property': setpoint_property,
# 'setpoint_range': setpoint_range,
# 'interval_length': interval_length,
# 'ramp_time': ramp_time,
# 'sine_frequ': sine_frequ,
# 'initial_conditions': initial_conditions,
# })
# self.env_classes.append(self.__class__.__name__)
step_variator = self.StepVariator(setpoint_range)
ramp_variator = self.RampVariator(setpoint_range, ramp_time, self.dt)
sine_variator = self.SineVariator(setpoint_range, sine_frequ, self.dt)
self.enabled_variators = [step_variator]
if ramp_time != (0, 0): self.enabled_variators.append(ramp_variator)
if sine_frequ != (0, 0): self.enabled_variators.append(sine_variator)
self.envs_to_vary = [self.env]
def inject_other_env(self, env):
'''if the setpoint changes shall affect more than one environment synchronously; e. g. for benchmarking'''
self.envs_to_vary.append(env)
| 42.83237 | 124 | 0.648988 | #!/usr/bin/env python3
import sys
sys.path.append(r'/home/felix/git/gym-jsbsim-eee/') #TODO: Is this a good idea? Dunno! It works!
import gym
import numpy as np
import math
import random
import markov_pilot.environment.properties as prp
from abc import ABC, abstractmethod
from markov_pilot.environment.properties import BoundedProperty
from typing import Tuple, List
class VarySetpointsWrapper(gym.Wrapper):
"""
A wrapper to vary the setpoints at the beginning of each episode
This can be used during training to have bigger variance in the training data
"""
class SetpointVariator(ABC):
"""
A helper that can vary a setpoint between two extreme values following a specific pattern
"""
@abstractmethod
def vary(self):
''' outputs the setpoint for the next step or None if there is nothing to do'''
...
@abstractmethod
def start_variation(self):
''' starts the setpoint variation for the first time in the upcoming interval
:return: the first setpoint to be passed to the env'''
...
class StepVariator(SetpointVariator):
def __init__(self, setpoint_range):
self.min = setpoint_range[0]
self.max = setpoint_range[1]
def vary(self):
return None #for a step function, there is no more to do after the first step
def start_variation(self, _):
return random.uniform(self.min, self.max)
class RampVariator(SetpointVariator):
def __init__(self, setpoint_range, ramp_time, dt):
self.min = setpoint_range[0]
self.max = setpoint_range[1]
self.t_min = ramp_time[0]
self.t_max = ramp_time[1]
self.dt = dt #the time interval between two subsequent calls
def vary(self):
if self.steps_left >0:
self.current_value += self.delta
self.steps_left -=1
return self.current_value
else:
return None
def start_variation(self, current_value):
self.target_value = random.uniform(self.min, self.max)
ramp_length = random.uniform(self.t_min, self.t_max)
self.steps_left = ramp_length / self.dt
self.delta = (self.target_value - current_value)/self.steps_left
self.current_value = current_value
return self.vary()
class SineVariator(SetpointVariator):
def __init__(self, setpoint_range, sine_frequ, dt):
self.min = setpoint_range[0]
self.max = setpoint_range[1]
self.freq_min = sine_frequ[0]
self.freq_max = sine_frequ[1]
self.dt = dt
def vary(self):
self.step += 1
return self.mean_value + math.sin(self.step * self.sine_increment)*self.amplitude
def start_variation(self, current_value):
frequ = random.uniform(self.freq_min, self.freq_max)
self.sine_increment = self.dt* frequ * 2*math.pi #the increment in the sine argument within dt
self.mean_value = current_value
if self.min > self.mean_value or self.mean_value > self.max:
self.mean_value = random.uniform(self.min, self.max) #this may happen due to unfortunate initial conditions
max_amplitude = min(self.mean_value - self.min, self.max - self.mean_value)
self.amplitude = random.uniform(0, max_amplitude)
self.amplitude *= random.randrange(-1,2, 2) #change the direction
self.step = 0
return self.vary()
def __init__(self, env, setpoint_property: BoundedProperty,
setpoint_range: Tuple[float, float],
interval_length: Tuple[float, float] = (5., 120.),
ramp_time:Tuple[float, float] = (0,0), sine_frequ: Tuple[float, float] = (0,0),
initial_conditions: List[Tuple] = []):
"""
:param setpoint_property: The property which describes the setpoint.
:param setpoint_range: the range the setpoint may be chosen from (min, max)
:param interval_length: the time in seconds for the interval till the next change
:param ramp_time: the time, a ramp may last from current setpoint to target setpoint; (0, 0) disables ramps
:param sine_frequ: the frequqcy range from which sine modulation may be chosen; (0,0) disables sine modulation
:param initial_conditions: TODO: specify the initial conditions that may be varied and their ranges.
"""
self.env = env
self.setpoint_property = setpoint_property
self.setpoint_range = setpoint_range
self.interval_length = interval_length
self.ramp_time = ramp_time
self.sine_frequ = sine_frequ
self.initial_conditions = initial_conditions
#don't restore the VarySetpoints wrapper automatically
# #append the restore data
# self.env_init_dicts.append({
# 'setpoint_property': setpoint_property,
# 'setpoint_range': setpoint_range,
# 'interval_length': interval_length,
# 'ramp_time': ramp_time,
# 'sine_frequ': sine_frequ,
# 'initial_conditions': initial_conditions,
# })
# self.env_classes.append(self.__class__.__name__)
step_variator = self.StepVariator(setpoint_range)
ramp_variator = self.RampVariator(setpoint_range, ramp_time, self.dt)
sine_variator = self.SineVariator(setpoint_range, sine_frequ, self.dt)
self.enabled_variators = [step_variator]
if ramp_time != (0, 0): self.enabled_variators.append(ramp_variator)
if sine_frequ != (0, 0): self.enabled_variators.append(sine_variator)
self.envs_to_vary = [self.env]
def inject_other_env(self, env):
'''if the setpoint changes shall affect more than one environment synchronously; e. g. for benchmarking'''
self.envs_to_vary.append(env)
def _initialize_next_variation(self):
interval = random.uniform(self.interval_length[0], self.interval_length[1])
self.steps_till_next_variation = int(interval / self.dt)
variator_idx = random.randrange(0, len(self.enabled_variators))
self.active_variator = self.enabled_variators[variator_idx]
current_value = self.env.sim[self.setpoint_property]
return self.active_variator.start_variation(current_value)
def step(self, action):
# pylint: disable=method-hidden
if not self.steps_till_next_variation:
varied_setpoint = self._initialize_next_variation()
else:
varied_setpoint = self.active_variator.vary()
self.steps_till_next_variation -= 1
if varied_setpoint: [env.change_setpoints({self.setpoint_property: varied_setpoint}) for env in self.envs_to_vary]
return self.env.step(action)
def reset(self):
# pylint: disable=method-hidden
varied_setpoint = self._initialize_next_variation()
[env.change_setpoints({self.setpoint_property: varied_setpoint}) for env in self.envs_to_vary]
#TODO: here goes the modfication of the initial conditions
# not now [self.envs_to_vary.set_initial_conditions( {})]
return self.env.reset()
| 3,372 | 48 | 432 |