hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f796c47a793c714f545f83a640dc9c73918faf1 | 2,116 | py | Python | pyqt_tag_manager/qt_market/animations.py | Nevolutionize/pyqt_tag_manager | 917d209924271c464d6a99ba1c768445e2436032 | [
"MIT"
] | null | null | null | pyqt_tag_manager/qt_market/animations.py | Nevolutionize/pyqt_tag_manager | 917d209924271c464d6a99ba1c768445e2436032 | [
"MIT"
] | null | null | null | pyqt_tag_manager/qt_market/animations.py | Nevolutionize/pyqt_tag_manager | 917d209924271c464d6a99ba1c768445e2436032 | [
"MIT"
] | null | null | null | # Import external modules.
from pyqt_tag_manager import QtCore
from pyqt_tag_manager import QtGui
from pyqt_tag_manager import QtWidgets
class FailColorAnimation(QtCore.QPropertyAnimation):
"""Property animation to indicate errors.
Displays a color transition as the color red fading out to 0 alpha.
"""
def __init__(self, parent):
super(FailColorAnimation, self).__init__(parent)
effect = QtWidgets.QGraphicsColorizeEffect(parent)
effect.setStrength(1)
parent.setGraphicsEffect(effect)
# Defaults.
self.setPropertyName(b'color')
self.setTargetObject(effect)
self.__start_color = QtGui.QColor(255, 0, 0, 225)
self.__end_color = QtGui.QColor(255, 0, 0, 0)
self.__duration = 500
self.__curve = QtCore.QEasingCurve.InQuint
self.finished.connect(self._on_finish)
# Public.
def play(self, color=None, duration=None, curve=None):
"""Play the property animation.
Args:
color (QtCore.QColor): Override the default "red" color to
indicate errors.
duration (int): Override the default duration of the animation.
curve (QtCore.QEasingCurve): Override the default easing curve
for the animation.
"""
if color:
start_color = color
end_color = QtGui.QColor(start_color)
end_color.setAlpha(0)
else:
start_color = self.__start_color
end_color = self.__end_color
dur = self.__duration if duration is None else duration
curve = self.__curve if curve is None else curve
# Play animation.
self.setStartValue(start_color)
self.setEndValue(end_color)
self.setEasingCurve(curve)
self.setDuration(dur)
self.start()
# Slots.
@QtCore.Slot()
def _on_finish(self):
"""Triggered when animation is finished playing."""
# Remove the graphic effect from the widget so that its base
# styling isn't affected.
self.parent().setGraphicsEffect(None)
| 33.0625 | 75 | 0.645085 | 1,976 | 0.933837 | 0 | 0 | 249 | 0.117675 | 0 | 0 | 680 | 0.321361 |
0f7a4cb016b7e5ab674e68efb9b6f228d4fe3595 | 1,252 | py | Python | img.py | NavneetSurana/Animal-Classification-Using-ResNet | 763608adb448cfa3ef6106825f4a1e8e6568035e | [
"MIT"
] | 2 | 2018-10-08T03:31:59.000Z | 2018-10-27T18:53:27.000Z | img.py | NavneetSurana/Animal-Classification-Using-ResNet | 763608adb448cfa3ef6106825f4a1e8e6568035e | [
"MIT"
] | null | null | null | img.py | NavneetSurana/Animal-Classification-Using-ResNet | 763608adb448cfa3ef6106825f4a1e8e6568035e | [
"MIT"
] | 1 | 2021-07-05T05:04:55.000Z | 2021-07-05T05:04:55.000Z | import os,sys
import shutil
import pandas as pd
data=pd.read_csv('D:/MachineLearning/AnimalClassification/train.csv')
Im_id=data['Image_id']
Animal=data['Animal']
dic_data=dict()
for i in range(0,len(Im_id)):
dic_data[Im_id[i].strip()]=Animal[i].strip()
source_dir='D:/MachineLearning/AnimalClassification/Images/train'
folder='D:/MachineLearning/AnimalClassificationUsingCNN'
lis=os.listdir(source_dir)
j=0
for filename in lis:
src_filename=os.path.join(source_dir,filename)
temp=os.path.join(folder,'Images/test/'+dic_data[filename])
if j<9000:
temp=os.path.join(folder,'Images/train/'+dic_data[filename])
if not os.path.exists(temp):
os.makedirs(temp)
dst_filename=os.path.join(temp,filename)
if os.path.isfile(src_filename) and not os.path.isfile(dst_filename):
shutil.copy(src_filename,temp)
j=j+1;
lis=os.listdir(folder+'/Images/val');
for i in lis:
temp=folder+'/Images/train/'+i.strip();
temp1=folder+'/Images/val/'+i.strip();
if not os.path.exists(temp1):
os.makedirs(temp1)
k=os.listdir(temp1)
for j in range(0,int(len(k))):
shutil.move(temp1+'/'+k[j],temp+'/'+k[j])
lis=os.listdir(folder+'/Images/train')
min=None
for i in lis:
temp=folder+'/Images/train/'+i.strip()
temp=os.listdir(temp)
print(len(temp)) | 26.083333 | 70 | 0.733227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.224441 |
0f7cba4a8767d9b6e3513c79b2ee5313540ef278 | 3,498 | py | Python | tests/test_mgxs_library_ce_to_mg/test_mgxs_library_ce_to_mg.py | scopatz/openmc | 689d7b31677bc945aa21fa710801ac85562f1e87 | [
"MIT"
] | null | null | null | tests/test_mgxs_library_ce_to_mg/test_mgxs_library_ce_to_mg.py | scopatz/openmc | 689d7b31677bc945aa21fa710801ac85562f1e87 | [
"MIT"
] | null | null | null | tests/test_mgxs_library_ce_to_mg/test_mgxs_library_ce_to_mg.py | scopatz/openmc | 689d7b31677bc945aa21fa710801ac85562f1e87 | [
"MIT"
] | 1 | 2020-12-18T08:22:51.000Z | 2020-12-18T08:22:51.000Z | #!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
from input_set import PinCellInputSet
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Set the input set to use the pincell model
self._input_set = PinCellInputSet()
# Generate inputs using parent class routine
super(MGXSTestHarness, self)._build_inputs()
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625e-6,
20.])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._input_set.geometry)
self.mgxs_lib.by_nuclide = False
self.mgxs_lib.mgxs_types = ['total', 'absorption', 'nu-fission matrix',
'nu-scatter matrix', 'multiplicity matrix']
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.correction = None
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Initialize a tallies file
self._input_set.tallies = openmc.Tallies()
self.mgxs_lib.add_to_tallies_file(self._input_set.tallies, merge=False)
self._input_set.tallies.export_to_xml()
def _run_openmc(self):
# Initial run
if self._opts.mpi_exec is not None:
returncode = openmc.run(mpi_procs=self._opts.mpi_np,
openmc_exec=self._opts.exe,
mpi_exec=self._opts.mpi_exec)
else:
returncode = openmc.run(openmc_exec=self._opts.exe)
assert returncode == 0, 'CE OpenMC calculation did not exit' \
'successfully.'
# Build MG Inputs
# Get data needed to execute Library calculations.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
self.mgxs_lib.load_from_statepoint(sp)
self._input_set.mgxs_file, self._input_set.materials, \
self._input_set.geometry = self.mgxs_lib.create_mg_mode()
# Modify settings so we can run in MG mode
self._input_set.settings.cross_sections = './mgxs.xml'
self._input_set.settings.energy_mode = 'multi-group'
# Write modified input files
self._input_set.settings.export_to_xml()
self._input_set.geometry.export_to_xml()
self._input_set.materials.export_to_xml()
self._input_set.mgxs_file.export_to_xml()
# Dont need tallies.xml, so remove the file
if os.path.exists('./tallies.xml'):
os.remove('./tallies.xml')
# Re-run MG mode.
if self._opts.mpi_exec is not None:
returncode = openmc.run(mpi_procs=self._opts.mpi_np,
openmc_exec=self._opts.exe,
mpi_exec=self._opts.mpi_exec)
else:
returncode = openmc.run(openmc_exec=self._opts.exe)
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'mgxs.xml')
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
harness = MGXSTestHarness('statepoint.10.*', False)
harness.main()
| 36.821053 | 79 | 0.620926 | 3,174 | 0.907376 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.190395 |
0f7d75f8317a602b1be813bd3c36c2bb2d625290 | 1,287 | py | Python | setup.py | random1st/cloudwatch-metrics | 33094115abd595f3af472008e0002498aaddf33b | [
"MIT"
] | 1 | 2021-03-24T20:32:06.000Z | 2021-03-24T20:32:06.000Z | setup.py | random1st/cloudwatch-metrics | 33094115abd595f3af472008e0002498aaddf33b | [
"MIT"
] | null | null | null | setup.py | random1st/cloudwatch-metrics | 33094115abd595f3af472008e0002498aaddf33b | [
"MIT"
] | 2 | 2020-04-30T12:56:00.000Z | 2020-05-18T09:57:06.000Z | import os
from setuptools import setup, find_packages
from cloudwatch_metrics.version import VERSION
with open(os.path.join(os.path.dirname(__file__),
'README.md')) as readme:
README = readme.read()
setup(
name='cloudwatch_metrics',
version=VERSION,
description='The Cloudwatch Metrics package enables Python developers to record'
' and emit information from within their applications to the Cloudwatch service.',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/random1st/cloudwatch-metrics',
author='Amazon Web Services',
license="GPLv3",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'aiobotocore',
'boto3<=1.16.52',
'async-property'
],
keywords='aws cloudwatch metrics',
packages=find_packages(exclude=['tests*']),
include_package_data=True
)
| 28.6 | 98 | 0.647242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.494172 |
0f7e825a057f5ebc72d9fb6d24195fedaea8c685 | 216 | py | Python | app/ref/test-ttyP.py | lucid281/pyEfi | c8a5b69820a3c1e7c4c652f7e4194cd8ce8a6e18 | [
"Apache-2.0"
] | 7 | 2017-07-30T20:23:58.000Z | 2021-12-10T19:38:04.000Z | app/ref/test-ttyP.py | lucid281/pyEfi | c8a5b69820a3c1e7c4c652f7e4194cd8ce8a6e18 | [
"Apache-2.0"
] | 2 | 2017-07-31T23:03:39.000Z | 2021-03-26T21:06:02.000Z | app/ref/test-ttyP.py | lucid281/pyEfi | c8a5b69820a3c1e7c4c652f7e4194cd8ce8a6e18 | [
"Apache-2.0"
] | null | null | null | from .. app.pyefi.ttyp import ttyP
ttyP(0, "0 - ttyP test")
ttyP(1, "1 - header")
ttyP(2, "2 - bold")
ttyP(3, "3 - okblue")
ttyP(4, "4 - okgreen")
ttyP(5, "5 - underline")
ttyP(6, "6 - warning")
ttyP(7, "7 - fail")
| 19.636364 | 34 | 0.578704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.462963 |
0f80a9e3135560f0591099bad1bb7a5ddbb51087 | 3,826 | py | Python | src/dirbs/dimensions/duplicate_threshold.py | bryang-qti-qualcomm/DIRBS-Core | 6b48457715338cce4eb6b3948940297ebd789189 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/dimensions/duplicate_threshold.py | bryang-qti-qualcomm/DIRBS-Core | 6b48457715338cce4eb6b3948940297ebd789189 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/dimensions/duplicate_threshold.py | bryang-qti-qualcomm/DIRBS-Core | 6b48457715338cce4eb6b3948940297ebd789189 | [
"BSD-3-Clause-Clear"
] | null | null | null | """
DIRBS dimension function for duplicate threshold within a time period.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from psycopg2 import sql
from .duplicate_abstract_base import DuplicateAbstractBase
class DuplicateThreshold(DuplicateAbstractBase):
"""Implementation of the DuplicateThreshold classification dimension."""
def __init__(self, *, threshold, period_days=None, period_months=None, **kwargs):
"""Constructor."""
super().__init__(period_days=period_days, period_months=period_months, **kwargs)
try:
self._threshold = int(threshold)
except (TypeError, ValueError):
raise ValueError('\'threshold\' parameter must be an integer, got \'{0}\' instead...'.format(threshold))
@property
def algorithm_name(self):
"""Overrides Dimension.algorithm_name."""
return 'Duplicate threshold'
def _matching_imeis_sql(self, conn, app_config, virt_imei_range_start, virt_imei_range_end, curr_date=None):
"""Overrides Dimension._matching_imeis_sql."""
analysis_start_date, analysis_end_date = self._calc_analysis_window(conn, curr_date)
return sql.SQL(
"""SELECT imei_norm
FROM (SELECT DISTINCT imei_norm, imsi
FROM monthly_network_triplets_country
WHERE imei_norm IS NOT NULL
AND last_seen >= {analysis_start_date}
AND first_seen < {analysis_end_date}
AND virt_imei_shard >= {virt_imei_range_start}
AND virt_imei_shard < {virt_imei_range_end}
AND is_valid_imsi(imsi)) all_seen_imei_imsis
GROUP BY imei_norm HAVING COUNT(*) >= {threshold}
""").format(analysis_start_date=sql.Literal(analysis_start_date),
analysis_end_date=sql.Literal(analysis_end_date),
virt_imei_range_start=sql.Literal(virt_imei_range_start),
virt_imei_range_end=sql.Literal(virt_imei_range_end),
threshold=sql.Literal(self._threshold)).as_string(conn)
dimension = DuplicateThreshold
| 50.342105 | 118 | 0.715369 | 1,960 | 0.512284 | 0 | 0 | 126 | 0.032933 | 0 | 0 | 2,619 | 0.684527 |
0f80f105f05bcfcfbae2166d1100ff00893f4f18 | 2,112 | py | Python | scripts/common/__init__.py | Innixma/kaggle2017 | e92a2c1c2bf071343247722187a1c1088de88e37 | [
"MIT"
] | null | null | null | scripts/common/__init__.py | Innixma/kaggle2017 | e92a2c1c2bf071343247722187a1c1088de88e37 | [
"MIT"
] | null | null | null | scripts/common/__init__.py | Innixma/kaggle2017 | e92a2c1c2bf071343247722187a1c1088de88e37 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
def plot_slice(img, slice=80):
# Show some slice in the middle
plt.imshow(img[slice])
plt.show()
def plot_3d(image, threshold=-100):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
# p = image.transpose(2,1,0)
p = image
results = measure.marching_cubes(p, threshold)
verts = results[0]
faces = results[1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.70)
face_color = [0.45, 0.45, 0.75]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.savefig('plot3d.png')
def save(arr, pth):
with open(pth, 'wb+') as fh:
np.savez_compressed(fh, data=arr)
def load(pth):
return np.load(pth)['data']
def read_mapping_file(pth):
return pd.read_csv(pth)
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
| 30.608696 | 112 | 0.693655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.435606 |
0f814ba26527117e6b84e9f229772b778a6cda9e | 537 | py | Python | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | 1 | 2021-05-18T10:55:32.000Z | 2021-05-18T10:55:32.000Z | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | null | null | null | host/Productivity.py | chehansivaruban/Cyber---SDGP | 6676c284c34c4c15279bf3e5cfb73fd4e5b7391a | [
"CC0-1.0"
] | 2 | 2021-03-29T19:00:55.000Z | 2021-04-02T13:18:07.000Z |
class Productivity:
def __init__(self, irradiance, hours,capacity):
self.irradiance = irradiance
self.hours = hours
self.capacity = capacity
def getUnits(self):
print(self.irradiance)
totalpower = 0
print(totalpower)
for i in self.irradiance:
power = int(self.capacity) * int(i) /1000
totalpower = totalpower+power
# units= (self.irradiance*self.area*self.hours)/1000
print(totalpower)
return totalpower
| 28.263158 | 61 | 0.590317 | 531 | 0.988827 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.098696 |
0f81775989722fac86c0501dba1ff10529d51ca2 | 1,313 | py | Python | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2016-02-14T13:39:46.000Z | 2021-09-03T16:02:18.000Z | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sample/all_methods/setNoteApplicationDataEntry.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-03-28T01:08:25.000Z | 2018-03-28T01:08:25.000Z | # Import the Evernote client
from evernote.api.client import EvernoteClient
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# GUID of the note to attach the application data to
note_guid = "insert note GUID to attach key-value storage to here"
# Value of the key for the storage
# 3rd party apps are only allowed 1
key = "your-consumer-key"
# the value of the application data entry
# containg a string arbitray length
value = "this is the value of the application data"
# Each note is given this 4kb map of arbitrary data, shared by all third-party applications.
# adding new data may cause the field's value to exceed the the 4kb limit.
# In this case, an instance of EDAMUserException is thrown with the BAD_DATA_FORMAT error code.
# Setting this value will overwrite any existing data
usn = note_store.setNoteApplicationDataEntry(note_guid, key, value)
print "Application data set for note with GUID, '%s' with the key '%s' and value '%s' (USN %s)" % (note_guid, key, value, usn)
| 37.514286 | 126 | 0.763899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 991 | 0.75476 |
0f817ae1f45415f9be3a1a93f0558743b747ab70 | 1,431 | py | Python | setup.py | davidcarboni/cryptolite-python | e40ce2f9584fdd9015caad9bf443a50201df743a | [
"MIT"
] | null | null | null | setup.py | davidcarboni/cryptolite-python | e40ce2f9584fdd9015caad9bf443a50201df743a | [
"MIT"
] | null | null | null | setup.py | davidcarboni/cryptolite-python | e40ce2f9584fdd9015caad9bf443a50201df743a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import os
import unittest
def test_suite():
loader = unittest.TestLoader()
suite = loader.discover('tests', pattern='test_*.py')
return suite
def readme():
"""
Utility function to read the README file.
Used for the long_description. It's nice, because now 1) we have a top level
README file and 2) it's easier to type in the README file than to put a raw
string in below.
:return: The contents of `README.md`
"""
return open(os.path.join(os.path.dirname(__file__), "README.md")).read()
setup(name='Cryptolite',
version='0.0.3',
description='Simple, "right" cryptography.',
author='David Carboni',
author_email='david@carboni.io',
classifiers=[
'Development Status :: 1 - Planning',
'Programming Language :: Python :: 3.2',
'Topic :: Security :: Cryptography',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
keywords=['cryptography', 'encryption', 'digital signature', 'key generation', 'key management'],
url='https://github.com/davidcarboni/cryptolite/tree/python',
license='MIT',
packages=find_packages(),
test_suite='setup.test_suite',
include_package_data=True,
zip_safe=True,
long_description=readme(),
long_description_content_type="text/markdown",
)
| 31.8 | 103 | 0.645702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.522013 |
0f8389ee228b8ef136ad33164ea0700cfe706102 | 842 | py | Python | final_project/machinetranslation/tests.py | cabrera-carlos/xzceb-flask_eng_fr | 6f3b643102f2d9fa70231a663b43a1348c0f7cc1 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests.py | cabrera-carlos/xzceb-flask_eng_fr | 6f3b643102f2d9fa70231a663b43a1348c0f7cc1 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests.py | cabrera-carlos/xzceb-flask_eng_fr | 6f3b643102f2d9fa70231a663b43a1348c0f7cc1 | [
"Apache-2.0"
] | null | null | null | import unittest
from translator import french_to_english, english_to_french
class TestFrenchToEnglish(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english("Bonjour"), "Hello") # test when "Bonjour" is given as input the output is "Hello".
with self.assertRaises(ValueError): # test when None/null is given as input a ValueError is returned.
french_to_english(None)
class TestEnglishToFrench(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french("Hello"), "Bonjour") # test when "Hello" is given as input the output is "Bonjour".
with self.assertRaises(ValueError): # test when None/null is given as input a ValueError is returned.
english_to_french(None)
unittest.main()
| 42.1 | 131 | 0.672209 | 744 | 0.88361 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.339667 |
0f854456a91aac536aec2fb17db51a2f8344e01c | 74 | py | Python | augur/metrics/platform/routes.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | 1 | 2020-02-28T18:51:43.000Z | 2020-02-28T18:51:43.000Z | augur/metrics/platform/routes.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | 1 | 2021-06-02T00:58:41.000Z | 2021-06-02T00:58:41.000Z | augur/metrics/platform/routes.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | null | null | null |
def create_platform_routes(server):
metrics = server._augur.metrics
| 14.8 | 35 | 0.77027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f8689ae827d935b48b38f4d0615ffaedb18fca7 | 49,355 | py | Python | dataset.py | DerryHub/the-TaobaoLive-Commodity-Identify-Competition | 7e5e5c4fbddd9949fe01810d58bd7994889c007c | [
"MIT"
] | 4 | 2020-08-15T14:49:37.000Z | 2022-01-16T08:34:07.000Z | dataset.py | weilin-droid/the-TaobaoLive-Commodity-Identify-Competition | 7e5e5c4fbddd9949fe01810d58bd7994889c007c | [
"MIT"
] | null | null | null | dataset.py | weilin-droid/the-TaobaoLive-Commodity-Identify-Competition | 7e5e5c4fbddd9949fe01810d58bd7994889c007c | [
"MIT"
] | 2 | 2021-05-26T05:16:09.000Z | 2021-06-09T09:07:49.000Z | import os
import torch
import numpy as np
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from arcface.resnet import ResNet
from arcface.googlenet import GoogLeNet
from arcface.inception_v4 import InceptionV4
from arcface.inceptionresnet_v2 import InceptionResNetV2
from arcface.densenet import DenseNet
from arcface.resnet_cbam import ResNetCBAM
import torchvision.transforms as transforms
import cv2
import random
import jieba
from autoaugment import rand_augment_transform
from PIL import Image
'''
for image-text match
'''
class ITMatchTrain(Dataset):
def __init__(self, opt):
arcfaceDataset = ArcfaceDataset(root_dir=opt.data_path, mode="train", size=(opt.size, opt.size), imgORvdo='video')
batch_size = 256
training_params = {"batch_size": batch_size,
"shuffle": False,
"drop_last": False,
"num_workers": opt.workers}
arcfaceLoader = DataLoader(arcfaceDataset, **training_params)
self.vocab_size = arcfaceDataset.vocab_size
if opt.network == 'resnet':
model = ResNet(opt)
b_name = opt.network+'_'+opt.mode+'_{}'.format(opt.num_layers_r)
elif opt.network == 'googlenet':
model = GoogLeNet(opt)
b_name = opt.network
elif opt.network == 'inceptionv4':
model = InceptionV4(opt)
b_name = opt.network
elif opt.network == 'inceptionresnetv2':
model = InceptionResNetV2(opt)
b_name = opt.network
elif opt.network == 'densenet':
model = DenseNet(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_d)
elif opt.network == 'resnet_cbam':
model = ResNetCBAM(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_c)
else:
raise RuntimeError('Cannot Find the Model: {}'.format(opt.network))
model.load_state_dict(torch.load(os.path.join(opt.saved_path, b_name+'.pth')))
model.cuda()
model.eval()
self.model_name = b_name
self.features = torch.zeros((len(arcfaceDataset), opt.embedding_size))
self.texts = torch.zeros((len(arcfaceDataset), 64)).long()
self.instances = torch.zeros((len(arcfaceDataset))).long()
print('Calculating features...')
for i, d in enumerate(tqdm(arcfaceLoader)):
# img = d['img'].cuda()
text = d['text']
instance = d['instance']
# with torch.no_grad():
# feature = model(img).cpu()
# self.features[i*batch_size:(i+1)*batch_size] = feature
self.texts[i*batch_size:(i+1)*batch_size] = text
self.instances[i*batch_size:(i+1)*batch_size] = instance
def __len__(self):
return self.texts.size(0)
def __getitem__(self, index):
text = self.texts[index]
# feature = self.features[index]
feature = None
instance = self.instances[index]
# return {'feature': feature, 'text':text, 'instance':instance}
return {'text':text, 'instance':instance}
class ITMatchValidation(Dataset):
def __init__(self, size=(224, 224), root_dir='data/validation_instance/', maxLen=64, PAD=0, imgORvdo='video'):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
assert imgORvdo in ['image', 'video']
tat = 'validation_'+imgORvdo+'s'
# tat = 'train_'+imgORvdo+'s'
with open(os.path.join('data', tat+'_text.json'), 'r') as f:
textDic = json.load(f)
for k in textDic.keys():
textDic[k] = text2num(textDic[k])
instances = os.listdir(root_dir)
self.items = []
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
l = []
for img in imgs:
if imgORvdo in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(textDic[text_name])
break
if len(l) < 2:
continue
self.items.append(l)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
imgPath, text = self.items[index]
text = torch.Tensor(text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
hi, wi, ci = img.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {
'img': img,
'text': text
}
'''
for text
'''
class Text2Num:
def __init__(self, maxLen, root_dir='data', PAD=0):
with open(os.path.join(root_dir, 'vocab.json'), 'r') as f:
self.vocab = json.load(f)
self.PAD = PAD
self.maxLen = maxLen
self.vocab_size = len(self.vocab)
def __call__(self, text):
words = jieba.cut(text, cut_all=False, HMM=True)
# l = [len(self.vocab)]# CLS
l = []
for w in words:
if w.strip() in self.vocab:
l.append(self.vocab[w.strip()])
if len(l) > self.maxLen:
l = l[:self.maxLen]
elif len(l) < self.maxLen:
l += [self.PAD]*(self.maxLen-len(l))
assert len(l) == self.maxLen
return l
'''
for efficientdet
'''
class EfficientdetDataset(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='all', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['image', 'video', 'all']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
if imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
else:
tats = [mode + '_images', mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if len(d['annotations']) == 0:
continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.images.append(t)
# print(len(self.images))
# self.images = self.images[:1000]
print('Done')
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def label2index(self, label):
return self.labelDic['label2index'][label]
def index2label(self, index):
return self.labelDic['index2label'][str(index)]
def getImagePath(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
return imgPath
def getImageInfo(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
imgID, frame = imgName[:-4].split('_')
return imgPath, imgID, frame
class EfficientdetDatasetVideo(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='video', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['video']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
tats = [mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
self.videos = {}
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if d['img_name'][:6] not in self.videos:
self.videos[d['img_name'][:6]] = []
# if len(d['annotations']) == 0:
# continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.videos[d['img_name'][:6]].append(t)
# self.images.append(t)
self.videos = list(self.videos.values())
for l in self.videos:
assert len(l) == 10
# print(len(self.images))
self.videos = self.videos[:100]
print('Done')
def __len__(self):
return len(self.videos)
def __getitem__(self, index):
lst = self.videos[index]
datas = []
for imgPath, annotationsList, imgName, t in lst:
# imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
datas.append(sample)
if self.transform:
datas = self.transform(datas)
return datas
# def label2index(self, label):
# return self.labelDic['label2index'][label]
# def index2label(self, index):
# return self.labelDic['index2label'][str(index)]
# def getImagePath(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# return imgPath
# def getImageInfo(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# imgID, frame = imgName[:-4].split('_')
# return imgPath, imgID, frame
'''
for arcface
'''
class ArcfaceDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train', 'all']
assert imgORvdo in ['all', 'image', 'video']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.textDics = {}
for mode in modes:
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
# img_tat = mode + '_images'
# vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
d = []
textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
textDic.append(json.load(f))
for i in range(len(textDic)):
for k in textDic[i].keys():
textDic[i][k] = text2num(textDic[i][k])
self.textDics[mode] = textDic
l = [dd['annotations'] for dd in d]
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(self.savePath, str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(i)
t.append(mode)
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName, instance_id, textName, iORv, mode = self.images[index]
img = np.load(imgName[:-4]+'.npy')
# img = cv2.imread(imgName[:-4]+'.jpg')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
text = self.textDics[mode][iORv][textName]
text = torch.tensor(text).long()
iORv = torch.tensor(iORv).long()
h, w, c = img.shape
# print(h,w,c)
rh = random.randint(0, h-256)
rw = random.randint(0, w-256)
img = img[rh:256+rh, rw:256+rw, :]
img = cv2.resize(img, self.size)
# '''random erasing'''
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
# print(img.shape)
instance = torch.tensor(self.clsDic[str(instance_id)])
label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {'img':img, 'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
# return {'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
class ArcfaceDatasetSeparate(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train']
assert imgORvdo in ['all']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
d = []
self.textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
self.textDic.append(json.load(f))
for i in range(len(self.textDic)):
for k in self.textDic[i].keys():
self.textDic[i][k] = text2num(self.textDic[i][k])
l = [dd['annotations'] for dd in d]
self.images = []
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
names = ['image', 'video']
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(names[i])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
self.dic = {}
for i in range(len(self.images)):
imgName, instance_id, textName, iORv = self.images[i]
if instance_id not in self.dic:
self.dic[instance_id] = {}
self.dic[instance_id]['image'] = []
self.dic[instance_id]['video'] = []
self.dic[instance_id][iORv].append(i)
for k in self.dic.keys():
if len(self.dic[k]['image']) == 0 or len(self.dic[k]['video']) == 0:
del self.dic[k]
self.dic = list(self.dic.items())
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.dic)
def __getitem__(self, index):
imgIndex = random.choice(self.dic[index][1]['image'])
vdoIndex = random.choice(self.dic[index][1]['video'])
sample = []
instances = []
for index in [imgIndex, vdoIndex]:
imgName, instance_id, textName, iORv = self.images[index]
img = np.load(os.path.join(self.savePath, imgName)[:-4]+'.npy')
# text = self.textDic[iORv][textName]
# text = torch.tensor(text).long()
# iORv = torch.tensor(iORv).long()
h, w, c = img.shape
rh_1 = random.randint(0, h-224)
rh_2 = random.randint(224, h)
rw_1 = random.randint(0, w-224)
rw_2 = random.randint(224, w)
img = img[rh_1:rh_2, rw_1:rw_2, :]
img = cv2.resize(img, self.size)
instances.append(torch.tensor(self.clsDic[str(instance_id)]))
# label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
sample.append(img)
assert instances[0] == instances[1]
return {'img': sample[0], 'vdo':sample[1], 'instance':instances[0]}
class TripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5):
assert mode in ['train']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
instance2label = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
self.images = []
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(instance2label.values()))
self.cls_ins_dic = {}
for i, l in enumerate(self.images):
imgName, instance_id, label = l
if label not in self.cls_ins_dic:
self.cls_ins_dic[label] = {}
if instance_id not in self.cls_ins_dic[label]:
self.cls_ins_dic[label][instance_id] = []
self.cls_ins_dic[label][instance_id].append(i)
for k in self.cls_ins_dic.keys():
if len(self.cls_ins_dic[k]) < 2:
raise RuntimeError('size of self.cls_ins_dic[k] must be larger than 1')
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName_q, instance_id_q, label_q = self.images[index]
p_index = index
while p_index == index:
p_index = random.choice(self.cls_ins_dic[label_q][instance_id_q])
instance_id_n = instance_id_q
while instance_id_n == instance_id_q:
instance_id_n = random.choice(list(self.cls_ins_dic[label_q].keys()))
n_index = random.choice(self.cls_ins_dic[label_q][instance_id_n])
imgName_p, instance_id_p, label_p = self.images[p_index]
imgName_n, instance_id_n, label_n = self.images[n_index]
assert len(set([label_q, label_p, label_n])) == 1
assert len(set([instance_id_q, instance_id_p])) == 1
instance_id_q = torch.tensor(instance_id_q)
instance_id_p = torch.tensor(instance_id_p)
instance_id_n = torch.tensor(instance_id_n)
img_q = np.load(os.path.join(self.savePath, imgName_q)[:-4]+'.npy')
img_p = np.load(os.path.join(self.savePath, imgName_p)[:-4]+'.npy')
img_n = np.load(os.path.join(self.savePath, imgName_n)[:-4]+'.npy')
hq, wq, cq = img_q.shape
hp, wp, cp = img_p.shape
hn, wn, cn = img_n.shape
rh = random.randint(0, hq-self.size[0])
rw = random.randint(0, wq-self.size[1])
img_q = img_q[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hp-self.size[0])
rw = random.randint(0, wp-self.size[1])
img_p = img_p[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hn-self.size[0])
rw = random.randint(0, wn-self.size[1])
img_n = img_n[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if np.random.rand() < self.flip_x:
img_q = img_q[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_p = img_p[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_n = img_n[:, ::-1, :].copy()
img_q = torch.from_numpy(img_q).permute(2, 0, 1)
img_p = torch.from_numpy(img_p).permute(2, 0, 1)
img_n = torch.from_numpy(img_n).permute(2, 0, 1)
img_q = self.transform(img_q)
img_p = self.transform(img_p)
img_n = self.transform(img_n)
return {
'img_q':img_q,
'img_p':img_p,
'img_n':img_n,
'img_q_instance':instance_id_q,
'img_p_instance':instance_id_p,
'img_n_instance':instance_id_n,
}
class HardTripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, n_samples=4):
assert mode in ['train', 'all', 'train_2']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
self.n_samples = n_samples
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
self.samples = {}
for mode in modes:
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
self.num_classes = len(self.clsDic)
for k in self.samples.keys():
while len(self.samples[k]) < n_samples:
self.samples[k] *= 2
assert len(self.samples[k]) >= n_samples
self.instances = list(self.samples.keys())
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
imgPaths = random.sample(self.samples[instance], self.n_samples)
imgs = []
instances = []
for imgPath in imgPaths:
img = np.load(imgPath[:-4]+'.npy')
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
assert self.size[0] == 256
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(270/r), int(270/r)))
h, w, c = img.shape
rh = random.randint(0, h-self.size[0])
rw = random.randint(0, w-self.size[1])
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
instance_t = torch.tensor(instance)
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
imgs.append(img)
instances.append(instance_t)
imgs = torch.stack(imgs, dim=0)
instances = torch.stack(instances, dim=0)
return {'img': imgs, 'instance': instances}
'''
for validation
'''
class ValidationArcfaceDataset(Dataset):
def __init__(self, size=(112, 112), root_dir='data/validation_instance/', maxLen=64, PAD=0):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
img_tat = 'validation_images'
vdo_tat = 'validation_videos'
with open(os.path.join('data', img_tat+'_text.json'), 'r') as f:
self.textDic_i = json.load(f)
with open(os.path.join('data', vdo_tat+'_text.json'), 'r') as f:
self.textDic_v = json.load(f)
for k in self.textDic_i.keys():
self.textDic_i[k] = text2num(self.textDic_i[k])
for k in self.textDic_v.keys():
self.textDic_v[k] = text2num(self.textDic_v[k])
instances = os.listdir(root_dir)
self.items = []
# s = ''
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
if len(imgs) < 2:
continue
l = []
for img in imgs:
if 'images' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) == 0:
continue
for img in imgs:
if 'videos' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) < 4:
continue
l.append(instance)
# s += '{}\t{}\n'.format(l[0], l[2])
self.items.append(l)
# with open('validation_path.txt', 'w') as f:
# f.write(s)
self.length = len(self.items)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
imgPath, textName_img, vdoPath, textName_vdo, instance = self.items[index%self.length]
img_text = self.textDic_i[textName_img]
vdo_text = self.textDic_v[textName_vdo]
img_text = torch.Tensor(img_text).long()
vdo_text = torch.Tensor(vdo_text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
# vdo = np.load(os.path.join(self.root_dir, vdoPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
vdo = cv2.imread(os.path.join(self.root_dir, vdoPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
vdo = cv2.cvtColor(vdo, cv2.COLOR_BGR2RGB)
vdo = vdo.astype(np.float32) / 255
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(hi/r), int(wi/r)))
vdo = cv2.resize(vdo, (int(hv/r), int(wv/r)))
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = (hv-self.size[0])//2
rw = (wv-self.size[1])//2
vdo = vdo[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if index >= self.length:
img = img[:, ::-1, :].copy()
vdo = vdo[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
vdo = torch.from_numpy(vdo)
vdo = vdo.permute(2, 0, 1)
img = self.transform(img)
vdo = self.transform(vdo)
return {
'img': img,
'vdo': vdo,
'img_text': img_text,
'vdo_text': vdo_text,
'instance':instance,
'img_e': torch.tensor(0),
'vdo_e': torch.tensor(1)
}
class ValidationDataset(Dataset):
def __init__(self, root_dir, items, size):
self.size = size
self.root_dir = root_dir
self.imgPath = None
self.img = None
self.items = items
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes = self.items[index]
if imgPath != self.imgPath:
self.imgPath = imgPath
self.img = cv2.imread(os.path.join(self.root_dir, imgPath))
det = self.img[ymin:ymax, xmin:xmax, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
# print(classes)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes}
'''
for test
'''
class TestImageDataset(Dataset):
def __init__(self, root_dir='data', dir_list=['validation_dataset_part1', 'validation_dataset_part2'], transform=None, maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.mode = 'image'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
dirs = [os.path.join(root_dir, d) for d in dir_list]
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.ids = []
self.frames = []
self.textDic = {}
for di in dirs:
img_dir_list = os.listdir(os.path.join(di, 'image'))
for img_dir in img_dir_list:
img_names = os.listdir(os.path.join(di, 'image', img_dir))
for img_name in img_names:
self.images.append(os.path.join(di, 'image', img_dir, img_name))
self.frames.append(img_name.split('.')[0])
self.ids.append(img_dir)
textPath = os.path.join(di, 'image_text', img_dir+'.txt')
with open(textPath, 'r') as f:
self.textDic[img_dir] = text2num(f.readline())
# self.images = self.images[:100]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath = self.images[index]
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
img_id = self.ids[index]
text = self.textDic[img_id]
text = torch.Tensor(text).long()
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
imgPath = self.images[index]
img_id = self.ids[index]
frame = self.frames[index]
return imgPath, img_id, frame
# class TestVideoDataset(Dataset):
# def __init__(self, root_dir, transform=None, n=20, maxLen=64, PAD=0):
# self.root_dir = root_dir
# self.transform = transform
# self.n = n
# self.mode = 'video'
# label_file = 'label.json'
# with open(label_file, 'r') as f:
# self.labelDic = json.load(f)
# self.num_classes = len(self.labelDic['label2index'])
# text2num = Text2Num(maxLen=maxLen, PAD=PAD)
# self.vocab_size = text2num.vocab_size
# # gap = 400 // n
# # self.frames_ids = [i*gap for i in range(n)]
# self.videos = []
# self.ids = []
# self.textDic = {}
# vdo_names = os.listdir(os.path.join(root_dir, 'video'))
# for vdo_name in vdo_names:
# self.videos.append(os.path.join(root_dir, 'video', vdo_name))
# self.ids.append(vdo_name.split('.')[0])
# textPath = os.path.join(root_dir, 'video_text', vdo_name.split('.')[0]+'.txt')
# with open(textPath, 'r') as f:
# self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# # self.videos = self.videos[:100]
# def __len__(self):
# return len(self.videos)*self.n
# def __getitem__(self, index):
# v_index = index // self.n
# # f_index = self.frames_ids[index % self.n]
# vdo_name = self.videos[v_index]
# cap = cv2.VideoCapture(vdo_name)
# frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# f_index = int((frames // self.n) * (index % self.n))
# cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
# ret, img = cap.read()
# cap.release()
# vdo_id = self.ids[v_index]
# text = self.textDic[vdo_id]
# text = torch.tensor(text).long()
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# sample = {'img': img, 'text': text}
# if self.transform:
# sample = self.transform(sample)
# return sample
class TestVideoDataset(Dataset):
def __init__(self, root_dir, transform=None, n=20, dir_list=['validation_dataset_part1', 'validation_dataset_part2'], maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.n = n
self.mode = 'video'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
dirs = [os.path.join(root_dir, d) for d in dir_list]
# gap = 400 // n
# self.frames_ids = [i*gap for i in range(n)]
self.videos = []
self.ids = []
self.textDic = {}
for di in dirs:
vdo_names = os.listdir(os.path.join(di, 'video'))
for vdo_name in vdo_names:
self.videos.append(os.path.join(di, 'video', vdo_name))
self.ids.append(vdo_name.split('.')[0])
textPath = os.path.join(di, 'video_text', vdo_name.split('.')[0]+'.txt')
with open(textPath, 'r') as f:
self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# self.videos = self.videos[:10]
def __len__(self):
return len(self.videos)*self.n
def __getitem__(self, index):
v_index = index // self.n
# f_index = self.frames_ids[index % self.n]
vdo_name = self.videos[v_index]
cap = cv2.VideoCapture(vdo_name)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
f_index = int((frames // self.n) * (index % self.n))
cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
ret, img = cap.read()
cap.release()
vdo_id = self.ids[v_index]
text = self.textDic[vdo_id]
text = torch.Tensor(text).long()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
v_index = index // self.n
# frame = self.frames_ids[index % self.n]
vdoPath = self.videos[v_index]
cap = cv2.VideoCapture(vdoPath)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frame = int((frames // self.n) * (index % self.n))
cap.release()
vdo_id = self.ids[v_index]
return vdoPath, vdo_id, str(frame)
class TestDataset(Dataset):
def __init__(self, root_dir, items, size, mode):
assert mode in ['image', 'video']
self.mode = mode
self.size = size
self.root_dir = root_dir
self.items = items
self.length = len(items)
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes, text = self.items[index%self.length]
if self.mode == 'image':
img = cv2.imread(imgPath)
else:
cap = cv2.VideoCapture(imgPath)
cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame))
ret, img = cap.read()
cap.release()
det = img[ymin:ymax, xmin:xmax, :]
if index >= self.length:
det = det[:, ::-1, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes,
'text': text}
if __name__ == "__main__":
from config import get_args_arcface
opt = get_args_arcface()
dataset = ArcfaceDataset()
# print(len(dataset))
print(dataset[0])
# from utils import collater_HardTriplet
# from torch.utils.data import DataLoader
# training_params = {"batch_size": 20,
# "shuffle": True,
# "drop_last": True,
# "collate_fn": collater_HardTriplet,
# "num_workers": 4}
# from PIL import Image
# dataset = ArcfaceDataset()
# print(dataset[0])
# loader = DataLoader(dataset, **training_params)
# for data in loader:
# print(data['img'].size())
# break
# print(len(dataset))
# for d in tqdm(dataset):
# pass
# img = dataset[100]['img']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('aaa.jpg')
# img = dataset[0]['vdo']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('bbb.jpg')
# mean = np.zeros(3)
# std = np.zeros(3)
# for d in tqdm(dataset):
# img = d['img']
# for i in range(3):
# mean[i] += img[:, :, i].mean()
# std[i] += img[:, :, i].std()
# mean = mean / len(dataset)
# std = std / len(dataset)
# print(mean, std)
| 36.025547 | 141 | 0.532975 | 44,896 | 0.909655 | 0 | 0 | 0 | 0 | 0 | 0 | 10,120 | 0.205045 |
0f86f6bca80816832afd66dee019b511e4eb11bc | 2,325 | py | Python | WebMirror/util/StatusUpdater/Updater.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/util/StatusUpdater/Updater.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/util/StatusUpdater/Updater.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z |
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import pickle
from common import database
import config
import common.LogBase
import WebMirror.rules
from WebMirror.OutputFilters.util.MessageConstructors import pack_message
import WebMirror.TimedTriggers.TriggerBase
import common.get_rpyc
# import WebMirror.OutputFilters.AmqpInterface
class MetaUpdater(WebMirror.TimedTriggers.TriggerBase.TriggerBaseClass):
pluginName = "Meta Updater"
loggerPath = 'MetaUpdater'
def __init__(self):
super().__init__()
# print()
self.rpc_interface = common.get_rpyc.RemoteJobInterface("FeedUpdater")
# if config.C_DO_RABBIT:
# print("No message queue! Doing independent RabbitMQ connection!")
# # traceback.print_stack()
# # print("Wat?")
# # print()
# self.msg_q = False
# amqp_settings = {
# "RABBIT_LOGIN" : config.C_RABBIT_LOGIN,
# "RABBIT_PASWD" : config.C_RABBIT_PASWD,
# "RABBIT_SRVER" : config.C_RABBIT_SRVER,
# "RABBIT_VHOST" : config.C_RABBIT_VHOST,
# 'taskq_task' : 'task.master.q',
# 'taskq_response' : 'response.master.q',
# }
# self._amqpint = WebMirror.OutputFilters.AmqpInterface.RabbitQueueHandler(amqp_settings)
def get_feed_count_message(self):
feeds = set()
for ruleset in WebMirror.rules.load_rules():
feeds |= set(ruleset['feedurls'])
data = {
"feed-count" : len(feeds)
}
return pack_message("system-feed-counts", data)
def get_times(self):
with common.database.session_context() as conn:
aps = conn.execute("SELECT job_state FROM apscheduler_jobs;")
update_times = []
for blob, in aps:
job_dict = pickle.loads(blob)
update_times.append((
job_dict['id'],
job_dict['next_run_time'].isoformat()
))
data = {
"update-times" : update_times,
}
database.delete_db_session()
return pack_message("system-update-times", data)
def go(self):
feeds = self.get_feed_count_message()
times = self.get_times()
self.rpc_interface.put_feed_job(feeds)
self.rpc_interface.put_feed_job(times)
# self._amqpint.put_item(feeds)
# self._amqpint.put_item(times)
def do_meta_update():
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
if __name__ == '__main__':
do_meta_update()
| 22.142857 | 92 | 0.712688 | 1,765 | 0.75914 | 0 | 0 | 0 | 0 | 0 | 0 | 853 | 0.366882 |
0f87054df9c74ee2c34ad2435d0071591357f748 | 93 | py | Python | dynamic_databases/__init__.py | sligodave/dynamic_databases | 8bb85d59ef5ad6b0176b192a82ec457fb00601c5 | [
"MIT"
] | 4 | 2017-05-14T23:45:42.000Z | 2020-12-21T06:33:15.000Z | dynamic_databases/__init__.py | sligodave/dynamic_databases | 8bb85d59ef5ad6b0176b192a82ec457fb00601c5 | [
"MIT"
] | 2 | 2017-05-14T22:47:40.000Z | 2018-04-19T11:01:29.000Z | dynamic_databases/__init__.py | sligodave/dynamic_databases | 8bb85d59ef5ad6b0176b192a82ec457fb00601c5 | [
"MIT"
] | 3 | 2017-05-26T22:42:25.000Z | 2021-10-21T01:14:23.000Z |
__version__ = '0.1.7'
default_app_config = 'dynamic_databases.apps.DynamicDatabasesConfig'
| 18.6 | 68 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.580645 |
0f884a672a35bf47e7942c640cb7243b3a973258 | 2,886 | py | Python | Practica3PR3/PYTHON/PartitionProblemBruteForceRecursive.py | Prashant-JT/PartitionProblem | 3922bba85dac40e3780cf90cf0b687d07458a910 | [
"MIT"
] | null | null | null | Practica3PR3/PYTHON/PartitionProblemBruteForceRecursive.py | Prashant-JT/PartitionProblem | 3922bba85dac40e3780cf90cf0b687d07458a910 | [
"MIT"
] | null | null | null | Practica3PR3/PYTHON/PartitionProblemBruteForceRecursive.py | Prashant-JT/PartitionProblem | 3922bba85dac40e3780cf90cf0b687d07458a910 | [
"MIT"
] | null | null | null | import time
import argparse
import sys
class Auxiliar:
vector = []
found = False
s = 0
combination = []
size = 0
def readFile(v):
try:
file = args.folder
except Exception as e:
print(e)
sys.exit("Directory Not Found, for help use the -h option")
try:
f = open(file, 'r')
N = int (f.readline())
for j in range (N):
element = f.readline()
v.append(int(element))
except Exception as e:
print(e)
sys.exit("Vector is not named in the correct way, for help use the -h option")
finally:
f.close()
def printInput(v):
print("\nVector is: ", v)
def printTime(initial, final, vector):
n = str (len(vector))
runtime = final - initial
print("For", n, "elements -> Taken time: ", str(runtime) + " s.")
def printOutput(v, found):
if found:
print("\nVector can be partitioned")
print("Results: ", int ((sum(v))/2))
else:
print("\nVector can not be partitioned")
def partitionProblem(num):
size = len(num)
found=False
combination = [False for i in range(size)]
def searchedCombination():
actualSum=0
for i in range(0, Auxiliar.size):
if combination[i] == True:
actualSum = actualSum + Auxiliar.vector[i]
if actualSum == Auxiliar.s:
Auxiliar.found=True
def canPartition(num):
if sum(num) % 2 != 0:
return False
else:
return True
def can_partition_Recursive(pos):
if pos == 0:
searchedCombination()
else:
combination[pos] = True
can_partition_Recursive(pos - 1)
combination[pos] = False
can_partition_Recursive(pos - 1)
return Auxiliar.found
if __name__ == '__main__':
# Command line library implementation
parser = argparse.ArgumentParser(description = "Partition problem is to determine whether a given set can be partitioned into two subsets such that the sum of elements in both subsets is same. For further information, use the option '-h'.")
parser.add_argument('-di', '--input', action='store_true', help='Display input data')
parser.add_argument('-do', '--output', action='store_true', help='Display output data')
parser.add_argument('-dt', '--time', action='store_true', help='Display taken time in seconds')
parser.add_argument('-f', '--folder', metavar='', type=str, required=True, help='Read elements from a file')
args = parser.parse_args()
readFile(Auxiliar.vector)
Auxiliar.s = int (sum(Auxiliar.vector)/2)
Auxiliar.size = len(Auxiliar.vector)
combination = [False for i in range(Auxiliar.size)]
if canPartition(Auxiliar.vector):
initial=time.time()
found=can_partition_Recursive(Auxiliar.size - 1)
final=time.time()
else:
print("Vector can not be partitioned")
# Control of options
if args.input: printInput(Auxiliar.vector)
if args.output: printOutput(Auxiliar.vector, found)
if args.time: printTime(initial, final, Auxiliar.vector)
| 29.44898 | 242 | 0.671518 | 83 | 0.02876 | 0 | 0 | 0 | 0 | 0 | 0 | 727 | 0.251906 |
0f89c68a648bbf2920a3ee4f9f43df0ba5e7f56b | 800 | py | Python | sasquatch/error/exec.py | tmacro/s4 | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | 6 | 2018-12-04T01:13:10.000Z | 2019-06-18T23:25:59.000Z | sasquatch/error/exec.py | tmacro/s4 | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | 15 | 2018-12-01T00:13:41.000Z | 2021-06-01T23:05:08.000Z | sasquatch/error/exec.py | tmacro/sasquatch | 58729b61f56ff928c8fe928fb02816e5025ad588 | [
"BSD-3-Clause"
] | null | null | null | from .base import SQError, BaseErrorHelper
from .context import ContextAwareError
class ExecutionError(SQError):
'''raised when an error is encountered during script execution'''
class ExecErrorHelper(BaseErrorHelper):
_default = ExecutionError
@staticmethod
def throw(cls = ExecutionError, **kwargs):
if 'ctx' in kwargs and kwargs['ctx'] is not None:
ctx = kwargs.get('ctx')
return BaseErrorHelper.throw(cls, **ctx._asdict(), **kwargs)
class MissingKeywordError(ExecutionError):
'''Raised when a required keyword argument can not be collected'''
_msg = 'Unable to collect keyword {keyword}'
class InvalidFilePathError(ContextAwareError, ExecutionError):
'''Raised when a file path passed as input does not exist or is invalid'''
_msg = 'Path {filepath} is not a valid location'
| 34.782609 | 75 | 0.76125 | 709 | 0.88625 | 0 | 0 | 200 | 0.25 | 0 | 0 | 298 | 0.3725 |
0f89d4ca8644c99769b346ed9af4e2842f02189a | 19,484 | py | Python | Pr_LandMarkDetection_pix2pixArc+HeatMap.py | mohaEs/Train-Predict-Landmarks-by-Autoencoder | 5bc1bc4b0679ae465335f6ca76595b0accba57af | [
"MIT"
] | null | null | null | Pr_LandMarkDetection_pix2pixArc+HeatMap.py | mohaEs/Train-Predict-Landmarks-by-Autoencoder | 5bc1bc4b0679ae465335f6ca76595b0accba57af | [
"MIT"
] | null | null | null | Pr_LandMarkDetection_pix2pixArc+HeatMap.py | mohaEs/Train-Predict-Landmarks-by-Autoencoder | 5bc1bc4b0679ae465335f6ca76595b0accba57af | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:44:34 2018
@author: Moha-Thinkpad
"""
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow.keras
import argparse
import tensorflow as tf
from tensorflow.keras import backend as K
#cfg = K.tf.ConfigProto()
#cfg.gpu_options.allow_growth = True
#K.set_session(K.tf.Session(config=cfg))
####################################
########################################################################
####################################
def custom_loss_seg (y_true, y_pred):
#A = tensorflow.keras.losses.mean_squared_error(y_true, y_pred)
B = tensorflow.keras.losses.mean_absolute_error(y_true, y_pred)
return(B)
from tensorflow.keras.layers import Lambda
sum_dim_channel = Lambda(lambda xin: K.sum(xin, axis=3))
def lrelu(x): #from pix2pix code
a=0.2
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def lrelu_output_shape(input_shape):
shape = list(input_shape)
return tuple(shape)
layer_lrelu=Lambda(lrelu, output_shape=lrelu_output_shape)
def PreProcess(InputImages):
#output=np.zeros(InputImages.shape,dtype=np.float)
InputImages=InputImages.astype(np.float)
for i in range(InputImages.shape[0]):
try:
InputImages[i,:,:,:]=InputImages[i,:,:,:]/np.max(InputImages[i,:,:,:])
# output[i,:,:,:] = (output[i,:,:,:]* 2)-1
except:
InputImages[i,:,:]=InputImages[i,:,:]/np.max(InputImages[i,:,:])
# output[i,:,:] = (output[i,:,:]* 2) -1
return InputImages
####################################
########################################################################
####################################
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["train", "test", "export"])
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--target_dir", help="where to")
parser.add_argument("--checkpoint", help="where to ")
parser.add_argument("--output_dir", help="where to p")
parser.add_argument("--landmarks", help=" -,-,-")
parser.add_argument("--lr", help="adam learning rate")
parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
# export options
a = parser.parse_args()
a.batch_size=40
a.max_epochs_seg=1
a.lr_seg=0.0001
a.beta1=0.5
a.ngf=64
#a.seed=1
# a.mode="train"
# a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_png/'
# a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_lm/'
# a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.landmarks='43,43,43'
#a.mode="test"
#a.batch_size=1
#a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_png/'
#a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_lm/'
#a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.landmarks='43,43,43'
######## ------------ Config
#Ind_impo_landmarks_matlab=np.array([5, 6, 15,16,17,18,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,41])
#Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
#Num_landmarks=25
# 33,23,16 - 29,15, - 30,20,26 - 5,18,21 - 44,17,41 - 28,22,34, - 27,43,37
StrLandmarks=a.landmarks
StrLandmarks=StrLandmarks.split(",")
Ind_impo_landmarks_matlab=np.array([0,0,0])
Ind_impo_landmarks_matlab[0]=int(StrLandmarks[0])
Ind_impo_landmarks_matlab[1]=int(StrLandmarks[1])
Ind_impo_landmarks_matlab[2]=int(StrLandmarks[2])
Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
Num_landmarks=3
print('============================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
#########----------------------DATA
from os import listdir
ImageFileNames=[]
FileNames=listdir(a.input_dir)
for names in FileNames:
if names.endswith(".png"):
ImageFileNames.append(names)
#LMFileNames=listdir(a.target_dir)
from skimage import io as ioSK
from numpy import genfromtxt
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.uint8)
#Images_seg=np.zeros((len(ImageFileNames),256,256),dtype=np.uint8)
LandmarkLocations=np.zeros((len(ImageFileNames),2,44),dtype=np.uint8)
for i in range(len(ImageFileNames)):
Image = ioSK.imread(a.input_dir+'/'+ImageFileNames[i])
Images[i,:,:,:]=Image
FileName=ImageFileNames[i]
FileName=FileName[:-4]
# Image = ioSK.imread(a.target_dir_seg+'/'+ImageFileNames[i])
# Images_seg[i,:,:]=Image
Landmarks0 = genfromtxt(a.target_dir+'/'+FileName+'.csv', delimiter=',')
Landmarks0 = Landmarks0.astype(int)
LandmarkLocations[i,0,:]=Landmarks0[:,0]
LandmarkLocations[i,1,:]=Landmarks0[:,1]
#Landmarks = np.flip(Landmarks0, axis=1)
#plt.figure()
#plt.imshow(Images[100,:,:,:])
#plt.scatter(LandmarkLocations[100,0,:],LandmarkLocations[100,1,:])
X_train = PreProcess(Images)
del Images
import gc
gc.collect()
LandmarkLocations_row=LandmarkLocations[:,0,:]
LandmarkLocations_col=LandmarkLocations[:,1,:]
LandmarkLocations_row=LandmarkLocations_row[:,Ind_impo_landmarks_python]
LandmarkLocations_col=LandmarkLocations_col[:,Ind_impo_landmarks_python]
from scipy.ndimage import gaussian_filter
Images_HeatMaps=np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],Num_landmarks),dtype=np.float)
Image_heatmap=np.zeros((256,256),dtype=np.float)
for i in range(X_train.shape[0]):
for k in range(Num_landmarks):
# h=np.argwhere(Images_seg[i,:,:]==2*Ind_impo_landmarks_matlab[k])
lms_1=LandmarkLocations_row[i,k]
lms_2=LandmarkLocations_col[i,k]
Image_heatmap[:,:]=0
Image_heatmap[lms_2,lms_1]=1
Image_heatmap=gaussian_filter(Image_heatmap, sigma=10)
Image_heatmap=(Image_heatmap/np.max(Image_heatmap))
Images_HeatMaps[i,:,:,k]=Image_heatmap
gc.collect()
#plt.figure()
#plt.imshow(np.squeeze(Images_HeatMaps[2,:,:,5]), cmap='gray')
#plt.imshow(Images[2,:,:,:],cmap='jet', alpha=0.5)
#plt.show()
Y_train_heatmap = PreProcess(Images_HeatMaps)
del Images_HeatMaps
gc.collect()
# del Images_seg
import os
if not os.path.exists(a.checkpoint):
os.makedirs(a.checkpoint)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
if a.mode=='test':
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('loading model ...')
model_final=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('model is loaded ')
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.float)
newLandmarks=np.zeros((Num_landmarks,2),dtype=np.float16)
Y_test_heatmap=Y_train_heatmap
X_test=X_train
# fig = plt.figure()
# plt.imshow(X_train[0,:,:,:],cmap='gray', alpha=0.95)
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.grid(True)
pred_example_heatmaps=model_final.predict(X_test[:,:,:,:])
print('writing results ...')
for i in range(len(ImageFileNames)):
# print(i)
FileName=ImageFileNames[i]
FileName=FileName[:-4]
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for k in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
True_chan=np.squeeze(Y_test_heatmap[i,:,:,k])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[k,:]=lms_True
Pred_chan=np.squeeze(pred_example_heatmaps[i,:,:,k])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[k,:]=lms_pred
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_test_heatmap[i,:,:,i])
# ax[1].imshow(pred_example_heatmaps[i,:,:,i])
# plt.show()
np.savetxt(a.output_dir+FileName+'_pred.csv',
lms_pred_all , delimiter=",", fmt='%i')
np.savetxt(a.output_dir+FileName+'_true.csv',
lms_True_all , delimiter=",", fmt='%i')
fig = plt.figure()
plt.imshow(X_test[i,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
# plt.grid(True)
fig.savefig(a.output_dir+FileName+'.png')
plt.close(fig)
if a.mode=='train':
# plt.figure()
# plt.imshow(X_train[90,:,:,:])
# plt.figure()
# plt.imshow(Y_train_heatmap[90,:,:,4])
try: # continue training
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('======== loading model ...')
model_4_heatmap=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('======== continue training ...')
except: # new training
print('======== new training ...')
checkpoint_model_file=a.output_dir+'LandMarkModel'
########### network
kernelSize=(4,4)
InputLayer=tensorflow.keras.layers.Input(shape=(256,256,3))
e_1=tensorflow.keras.layers.Conv2D(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(InputLayer)
e_2=layer_lrelu(e_1)
e_2=tensorflow.keras.layers.Conv2D(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_2)
e_2=tensorflow.keras.layers.BatchNormalization()(e_2)
e_3=layer_lrelu(e_2)
e_3=tensorflow.keras.layers.Conv2D(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_3)
e_3=tensorflow.keras.layers.BatchNormalization()(e_3)
e_4=layer_lrelu(e_3)
e_4=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_4)
e_4=tensorflow.keras.layers.BatchNormalization()(e_4)
e_5=layer_lrelu(e_4)
e_5=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_5)
e_5=tensorflow.keras.layers.BatchNormalization()(e_5)
e_6=layer_lrelu(e_5)
e_6=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_6)
e_6=tensorflow.keras.layers.BatchNormalization()(e_6)
e_7=layer_lrelu(e_6)
e_7=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_7)
e_7=tensorflow.keras.layers.BatchNormalization()(e_7)
e_8=layer_lrelu(e_7)
e_8=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_8)
e_8=tensorflow.keras.layers.BatchNormalization()(e_8)
d_8=e_8
d_8=tensorflow.keras.layers.Activation('relu')(d_8)
d_8=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_8)
d_8=tensorflow.keras.layers.BatchNormalization()(d_8)
d_8=tensorflow.keras.layers.Dropout(0.5)(d_8)
d_7=tensorflow.keras.layers.concatenate(inputs=[d_8, e_7], axis=3)
d_7=tensorflow.keras.layers.Activation('relu')(d_7)
d_7=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_7)
d_7=tensorflow.keras.layers.BatchNormalization()(d_7)
d_7=tensorflow.keras.layers.Dropout(0.5)(d_7)
d_6=tensorflow.keras.layers.concatenate(inputs=[d_7, e_6], axis=3)
d_6=tensorflow.keras.layers.Activation('relu')(d_6)
d_6=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_6)
d_6=tensorflow.keras.layers.BatchNormalization()(d_6)
d_6=tensorflow.keras.layers.Dropout(0.5) (d_6)
d_5=tensorflow.keras.layers.concatenate(inputs=[d_6, e_5], axis=3)
d_5=tensorflow.keras.layers.Activation('relu')(d_5)
d_5=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_5)
d_5=tensorflow.keras.layers.BatchNormalization()(d_5)
d_5=tensorflow.keras.layers.Dropout(0.5) (d_5)
d_4=tensorflow.keras.layers.concatenate(inputs=[d_5, e_4], axis=3)
d_4=tensorflow.keras.layers.Activation('relu')(d_4)
d_4=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_4)
d_4=tensorflow.keras.layers.BatchNormalization()(d_4)
d_3=tensorflow.keras.layers.concatenate(inputs=[d_4, e_3], axis=3)
d_3=tensorflow.keras.layers.Activation('relu')(d_3)
d_3=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_3)
d_3=tensorflow.keras.layers.BatchNormalization()(d_3)
d_2=tensorflow.keras.layers.concatenate(inputs=[d_3, e_2], axis=3)
d_2=tensorflow.keras.layers.Activation('relu')(d_2)
# d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.BatchNormalization()(d_2)
d_1=tensorflow.keras.layers.concatenate(inputs=[d_2, e_1], axis=3)
d_1=tensorflow.keras.layers.Activation('relu')(d_1)
d_1=tensorflow.keras.layers.Conv2DTranspose(Num_landmarks, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_1)
HeatMaps=tensorflow.keras.layers.Activation('sigmoid', name='last_layer_of_decoder')(d_1)
model_4_heatmap=Model(inputs=InputLayer, outputs=HeatMaps)
###########Train
print('trainable_count =',int(np.sum([K.count_params(p) for p in set(model_4_heatmap.trainable_weights)])))
print('non_trainable_count =', int(np.sum([K.count_params(p) for p in set(model_4_heatmap.non_trainable_weights)])))
# fix random seed for reproducibility
seed = 1
import random
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
#### compile and train the model
UsedOptimizer=optimizers.Adam(lr=a.lr_seg, beta_1=a.beta1)
model_4_heatmap.compile(loss=custom_loss_seg, optimizer=UsedOptimizer)
History=model_4_heatmap.fit(X_train, Y_train_heatmap,
batch_size=a.batch_size, shuffle=True, validation_split=0.05,
epochs=a.max_epochs_seg,
verbose=1)
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.grid()
plt.savefig(a.output_dir+'History_'+str(a.lr)+'.png')
plt.close()
import pickle
Dict={'History_loss_train':History.history['loss'],
'History_loss_val':History.history['val_loss'],}
pickle.dump( Dict, open(a.output_dir+'History_'+str(a.lr)+'.pkl', "wb" ) )
# show an exemplary result
Num_example_train=0
pred_example_heatmaps=model_4_heatmap.predict(X_train[Num_example_train:Num_example_train+1,:,:,:])
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for i in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(X_train[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
Pred_chan=np.squeeze(pred_example_heatmaps[0,:,:,i])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[i,:]=lms_pred
True_chan=np.squeeze(Y_train_heatmap[Num_example_train,:,:,i])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[i,:]=lms_True
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_train_heatmap[Num_example_train,:,:,i])
# ax[1].imshow(pred_example_heatmaps[0,:,:,i])
# plt.show()
fig = plt.figure()
plt.imshow(X_train[Num_example_train,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
plt.grid(True)
# fig.savefig('scatter-result'+str(i)+'_pred.png')
plt.close(fig)
print('===========training done=================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
print('Saving model ...')
model_4_heatmap.save(checkpoint_model_file+'_weights.h5')
| 37.83301 | 158 | 0.615325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,424 | 0.278382 |
0f8a5d70a416813aa7903ebf63db4aff07a7359b | 5,479 | py | Python | pydsge/tests/export_getting_started_to_pkl.py | florabudianto/pydsge | 51ea4c206e481866f92398cb573852e48fea7335 | [
"MIT"
] | 2 | 2022-02-15T10:39:24.000Z | 2022-02-15T10:40:26.000Z | pydsge/tests/export_getting_started_to_pkl.py | florabudianto/pydsge | 51ea4c206e481866f92398cb573852e48fea7335 | [
"MIT"
] | 4 | 2021-12-31T16:27:48.000Z | 2022-01-27T17:16:19.000Z | pydsge/tests/export_getting_started_to_pkl.py | pcschreiber1/pydsge_OSE_Project_Fork | 4222dbe187e47958d2f5b732615c9ba97547f67a | [
"MIT"
] | 1 | 2022-02-15T10:40:32.000Z | 2022-02-15T10:40:32.000Z | """This file contains functions for converting and storing jupyter notebooks."""
import nbformat
import pickle
import numpy as np
import os
from nbconvert import PythonExporter
from pathlib import Path # for windows-Unix compatibility
def nbconvert_python(path):
"""Use nbconvert to convert jupyter notebook to python code.
Return the string of python code. You can then excute it with `exec()`.
Args:
path (str): Path of jupyter notebook
Returns:
str: The string of python code converted from notebook
"""
with open(path) as f:
nb = nbformat.read(f, as_version=4)
body, _ = PythonExporter().from_notebook_node(nb)
return body
def is_picklable(obj):
"""Check if an obj can be dumped into a pickle file.
Args:
obj : The Object to be judged
Returns:
bool: The result if the input can be picklable
"""
try:
pickle.dumps(obj)
except Exception:
return False
return True
def filter_pickable(global_vars):
"""Filter the variables that are pickable.
Args:
global_vars (array-like): The names of variables to get
Returns:
dict: Dictionary containing names of objects and their values
"""
bk = {}
for k in global_vars:
obj = global_vars[k]
if is_picklable(obj):
try:
bk.update({k: obj})
except TypeError:
pass
return bk
def notebook_to_pickable_dict(path):
"""Excute jupyter notebook and then save variables defined in notebook.
This function converts notebook to python code and then excutes the code.
Finally it put all public variables that defined in notebook into dictionary
and return it.
Parameters
----------
path : str
Path of jupyter notebook
Returns
-------
bk : :dict
Dictionary containing names of variables and variables that defined in notebook.
"""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Filter for pickable variables
bk = filter_pickable(d)
return bk
def save_to_pkl(path, obj):
"""Save object to pickle file.
Args:
path (str): Path to save pickle file
obj : Object to be saved
"""
with open(path, "wb") as f:
pickle.dump(obj, f)
def basic_type_or_list(obj):
"""Check type of object."""
return not np.asanyarray(obj).dtype.hasobject
def flatten_to_dict(obj):
"""Reduce dimensionality of dictionary."""
def _flatten(value, key):
"""Reduce dimensionality of object recursively."""
if isinstance(value, (list, tuple, set)):
if basic_type_or_list(value):
return {key: value} if key is not None else value
else:
tile_d = {}
for i, v in enumerate(value):
tile_d.update(_flatten(v, f"{key}_{i}" if key is not None else i))
return tile_d
elif isinstance(value, dict):
tile_d = {}
for k, v in value.items():
tile_d.update(_flatten(v, f"{key}_{k}" if key is not None else k))
return tile_d
else:
return {key: value} if key is not None else value
return _flatten(value=obj, key=None)
def to_ndarray(obj):
"""Convert to numpy array."""
if isinstance(obj, dict):
return {k: np.asanyarray(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)) and not basic_type_or_list(obj):
return [np.asanyarray(v) for v in obj]
else:
return np.asanyarray(obj)
def is_path(path):
"""Judge if object is path or string of exists path."""
if isinstance(path, os.PathLike):
return True
if not isinstance(path, str):
return False
return os.path.exists(path)
def contains_path(obj):
"""Judge if an array contains path."""
if isinstance(obj, (np.ndarray, list, tuple, set)):
for v in obj:
if is_path(v):
return True
return False
else:
return is_path(obj)
def notebook_exec_result_flattened(path):
"""Prepare notebook for numpy savez."""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Flatten all variables
bk = flatten_to_dict(d)
# Step 4: Filter for variables which is basic type or list of basic type
bk_filted = {k: v for k, v in bk.items() if basic_type_or_list(v)}
# Step 5: Remove environmental variables
bk_filted = {k: v for k, v in bk_filted.items() if not contains_path(v)}
for key in {"__warningregistry___version"}:
bk_filted.pop(key)
return bk_filted
def main():
"""Excute jupyter notebook and save global variables."""
notebook_path = Path("docs/getting_started.ipynb")
bk = notebook_exec_result_flattened(notebook_path)
# to save session
save_path = Path("pydsge/tests/resources/getting_started_stable.npz")
with open(save_path, "wb") as f:
np.savez_compressed(f, **bk)
if __name__ == "__main__":
main()
| 26.726829 | 88 | 0.627669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,335 | 0.426173 |
0f8b3458e256dcb554d3948811ed8426a35f7e37 | 2,067 | py | Python | tests/test_entrez.py | ckrusemd/meta-analysis-tool | 31685973f767c198952df4b87813a9c8345554b6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_entrez.py | ckrusemd/meta-analysis-tool | 31685973f767c198952df4b87813a9c8345554b6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_entrez.py | ckrusemd/meta-analysis-tool | 31685973f767c198952df4b87813a9c8345554b6 | [
"BSD-3-Clause"
] | null | null | null |
import requests
from loguru import logger
def test_entrez_query():
json_ = { "query": "kruse eiken vestergaard", "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/query", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_summary_single():
json_ = { "uid": "28197643", "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/summary/single", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_summary_list():
json_ = { "uid_list": ["28197643","29679305","27848006"], "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/summary/list", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_abstract_single():
json_ = { "uid": "28197643", "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/abstract/single", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_abstract_list():
json_ = { "uid_list": ["28197643","29679305","27848006"], "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/abstract/list", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_elink_single():
json_ = { "uid": "28197643", "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/elink/single", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() )
def test_entrez_elink_list():
json_ = { "uid_list": ["28197643","29679305","27848006"], "email": "XXX@YYY.com" }
response = requests.post("http://api:8080/entrez/elink/list", json = json_)
assert response.status_code == 200
assert response.json()
logger.info( response.json() ) | 39.75 | 86 | 0.669086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.286889 |
0f8bf192c00ccdfe2c080948f76d42451737fa89 | 9,816 | py | Python | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | src/tools/cluster/cluster.py | uct-cbio/galaxy-tools | b9422088dc41099fdde1edaf9c014825c8ee1cbf | [
"MIT"
] | null | null | null | #!/usr/bin/python
# EST clustering
# Currently clustering are done using the wcd clustering algorithm. Other algorithms will later be supported.
# The wcd program does not use qual score for clustering. If seq qual scores are specified the
# wcd clustered information will be used to create cluster qual scores. This quality scores can then be used as
# input for further processing (e.g. assembly).
import sys, re, string, os, subprocess, commands
from optparse import OptionParser
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import UnknownSeq
def main():
usage = "usage: %prog -s SEQ_FILE -S QUAL_FILE -c CLUSTER_SEQ_FILE -C CLUSTER_QUAL_FILE -i CLUSTER_ID -r CLUSTER_SUMMARY_FILE -p CLUSTER_ALGORITHM -w PROG_WORK_DIR -d"
parser = OptionParser(usage=usage)
parser.add_option("-s", "--seq", dest="seq_file", help="File with sequences to be clustered.")
parser.add_option("-S", "--qual", dest="qual_file", help="Quality scores for the input sequences (optional).")
parser.add_option("-c", "--cluster_seq", dest="cluster_seq_file", help="Zipped FASTA sequences file. Archive contains cluster file with corresponding clustered FASTA sequence.")
parser.add_option("-C", "--cluster_qual", dest="cluster_qual_file", help="Zipped QUAL scores file. Archive contains cluster files with corresponding clustered QUAL scores (optional).")
parser.add_option("-i", "--cluster_id", dest="cluster_id", help="Cluster id")
parser.add_option("-r", "--cluster_summary", dest="cluster_summary_file", help="Cluster summary report.")
parser.add_option("-p", "--cluster_algorithm", dest="cluster_algorithm", help="The cluster algorithm (currently wcd only).")
parser.add_option("-w", "--prog_work", dest="prog_work_dir", help="Program working directory, contains all processed files.")
parser.add_option("-d", "--delete_program_work", action="store_true", dest="delete_program_work", default=False, help="Delete program working directory after program has completed.")
(options, args) = parser.parse_args()
if not options.seq_file:
print "Please specify the FASTA sequence file (-s SEQ_FILE)"
return - 1
if not options.cluster_seq_file:
print "Please specify the zipped cluster FASTA sequence file (-c CLUSTER_SEQ_FILE)"
return - 3
if not options.cluster_id:
print "Please specify the cluster id (-i CLUSTER_ID)"
return - 5
if not options.cluster_summary_file:
print "Please specify the cluster summary report file (-r CLUSTER_SUMMARY_FILE)"
return - 6
if not options.cluster_algorithm:
print "Please specify the cluster algorithm (-p CLUSTER_ALGORITHM)"
return - 7
if not options.prog_work_dir:
print "Please specify the program working directory (-w PROG_WORK_DIR)"
return - 8
if (len(args) > 0):
print "Too many input arguments"
return - 9
# Do some initialization
print "Initialize..."
root_dir = os.getcwd()
cluster_id = options.cluster_id
# Get full file paths
seq_file = os.path.abspath(options.seq_file)
if options.qual_file:
qual_file = os.path.abspath(options.qual_file)
cluster_seq_file = os.path.abspath(options.cluster_seq_file)
if options.cluster_qual_file:
cluster_qual_file = os.path.abspath(options.cluster_qual_file)
cluster_summary_file = os.path.abspath(options.cluster_summary_file)
prog_work_dir = os.path.abspath(options.prog_work_dir)
timestamp = commands.getoutput("date +%Y-%m-%d_%H_%M_%S_%N")
base_dir = prog_work_dir + "/cluster_" + timestamp
if not os.path.isdir(prog_work_dir):
os.system("mkdir " + prog_work_dir)
if os.path.isdir(prog_work_dir):
if os.path.isdir(base_dir):
os.system("rm -rf " + base_dir)
os.system("mkdir " + base_dir)
else:
os.system("mkdir " + base_dir);
else:
print "Program working directory does not exist."
return - 10
# Create working directory
if os.path.isdir(base_dir):
os.system("rm -rf " + base_dir)
os.system("mkdir " + base_dir)
else:
os.system("mkdir " + base_dir)
# Run wcd clustering
if (options.cluster_algorithm == "wcd"):
print "Run wcd..."
cluster_seq_dir = run_wcd(base_dir, seq_file, cluster_id)
else:
print "Cluster algorithm not supported (currently wcd only)"
return - 10
# Prepare cluster qual files
if(options.qual_file and options.cluster_qual_file):
print "Prepare cluster quality scores..."
cluster_qual_dir = prepare_cluster_qual_files(base_dir, qual_file, cluster_seq_dir)
elif (not options.qual_file and options.cluster_qual_file):
print "No cluster quality score will be prepared"
print "Please specify the QUAL score file (-S QUAL_FILE)"
elif (options.qual_file and not options.cluster_qual_file):
print "No cluster quality score will be prepared"
print "Please specify the zipped clustered QUAL scores file (-C CLUSTER_QUAL_FILE)"
else:
print "No cluster quality score will be prepared"
print "Please specify the QUAL score file (-S QUAL_FILE)"
print "Please specify the zipped clustered QUAL scores file(-C CLUSTER_QUAL_FILE)"
# Write summary report
print "Write summary report..."
write_cluster_summary(cluster_seq_dir, cluster_summary_file)
# Prepare output
print "Prepare output..."
tmp_zip = base_dir + "/tmp.zip" # Galaxy work around need to create a temporary zip archive and move to the output data set
if(options.qual_file and options.cluster_qual_file):
# os.system ("zip -j " + cluster_qual_file + " " + cluster_qual_dir + "/*")
os.system ("zip -jqq " + tmp_zip + " " + cluster_qual_dir + "/*")
os.system("mv " + tmp_zip + " " + cluster_qual_file)
# os.system("zip -j " + cluster_seq_file + " " + cluster_seq_dir + "/*")
os.system("zip -jqq " + tmp_zip + " " + cluster_seq_dir + "/*")
os.system("mv " + tmp_zip + " " + cluster_seq_file)
# Delete program working directory if indicated
if(options.delete_program_work):
print "Delete working directory"
os.system("rm -rf " + base_dir)
# Done
print "Done."
return 0
def run_wcd(work_dir, seq_file, cluster_id):
cluster_seq_dir = work_dir + "/cluster_fasta"
os.mkdir(cluster_seq_dir)
os.chdir(work_dir)
os.system("wcd --show_clusters -o cluster.cls " + seq_file) # get clusters
os.chdir(cluster_seq_dir)
os.system("wcd --init_cluster ../cluster.cls --split " + cluster_id + " " + seq_file) # prepare cluster FASTA files
os.chdir(work_dir)
return cluster_seq_dir
def prepare_cluster_qual_files(work_dir, qual_file, cluster_seq_dir):
cluster_qual_dir = work_dir + "/cluster_qual"
os.mkdir(cluster_qual_dir)
# get a list of all quality scores
fd_qual = open(qual_file, "rU");
quals = SeqIO.to_dict(SeqIO.parse(fd_qual, "qual"));
# get quality scores for the clusters
for cluster_seq_file in os.listdir(cluster_seq_dir):
if os.path.isfile(cluster_seq_dir + "/" + cluster_seq_file): # check if file, can do some more checking here e.g. is fasta file
fd_cluster_seq = open(cluster_seq_dir + "/" + cluster_seq_file, "rU")
cluster_seqs = SeqIO.parse(fd_cluster_seq, "fasta")
cluster_quals = []
for seq in cluster_seqs:
qual = quals[seq.name]
cluster_qual = SeqRecord(seq=UnknownSeq(len(qual.letter_annotations["phred_quality"])), id="", description=qual.description)
cluster_qual.letter_annotations["phred_quality"] = qual.letter_annotations["phred_quality"]
cluster_quals.append(cluster_qual)
cluster_qual_file = cluster_qual_dir + "/" + cluster_seq_file.split(".")[0] + ".qual"
fd_cluster_qual = open(cluster_qual_file, "w")
SeqIO.write(cluster_quals, fd_cluster_qual, "qual")
fd_cluster_qual.close()
os.system("sed -i \"s/> />/g\" " + cluster_qual_file) # need to replace the space after the > in header
fd_cluster_seq.close()
fd_qual.close()
return cluster_qual_dir
def write_cluster_summary(cluster_seq_dir, summary_file):
fd_summary = open(summary_file, 'w')
summary = ""
for cluster_seq_file in sorted(os.listdir(cluster_seq_dir)):
if os.path.isfile(cluster_seq_dir + "/" + cluster_seq_file): # check if file, can do some more checking here e.g. is fasta file
fd_cluster_seq = open(cluster_seq_dir + "/" + cluster_seq_file, "rU")
cluster_seqs = SeqIO.parse(fd_cluster_seq, "fasta")
summary = summary + cluster_seq_file.split(".")[0]
for seq in cluster_seqs:
summary = summary + "\t" + seq.name
summary = summary + "\n"
fd_cluster_seq.close()
process = subprocess.Popen("grep -v \"^[[:digit:]]*.$\" cluster.cls | wc -l", stdout=subprocess.PIPE, shell=True)
nr_clusters = process.communicate()[0]
process = subprocess.Popen("grep \"^[[:digit:]]*.$\" cluster.cls | wc -l", stdout=subprocess.PIPE, shell=True)
nr_singletons = process.communicate()[0]
header = "# nr Clusters: " + nr_clusters
header = header + "# nr Singletons: " + nr_singletons
header = header + "# Column 1: Cluster_id/Singleton_id" + "\n"
header = header + "# Columns 2 to n: Member Sequences" + "\n"
summary = header + summary
fd_summary.write(summary)
fd_summary.close()
if __name__ == "__main__":
sys.exit(main())
| 47.650485 | 188 | 0.667787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,832 | 0.390383 |
0f8dcf564e74ca0b9bc7fa9771086a7026224d90 | 12,676 | py | Python | source/sent_classif.py | blazejdolicki/LASER | 6a4d4677ecf8756a020300983a2147f012a8e850 | [
"BSD-3-Clause"
] | null | null | null | source/sent_classif.py | blazejdolicki/LASER | 6a4d4677ecf8756a020300983a2147f012a8e850 | [
"BSD-3-Clause"
] | null | null | null | source/sent_classif.py | blazejdolicki/LASER | 6a4d4677ecf8756a020300983a2147f012a8e850 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
# LASER Language-Agnostic SEntence Representations
# is a toolkit to calculate multilingual sentence embeddings
# and to use them for document classification, bitext filtering
# and mining
#
# --------------------------------------------------------
#
# Simple MLP classifier for sentence embeddings
import argparse
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data_utils
################################################
def LoadData(bdir, dfn, lfn, dim=1024, bsize=32, shuffle=False, quiet=False):
x = np.fromfile(bdir + dfn, dtype=np.float32, count=-1)
x.resize(x.shape[0] // dim, dim)
lbl = np.loadtxt(bdir + lfn, dtype=np.int32)
lbl.reshape(lbl.shape[0], 1)
if not quiet:
print(' - read {:d}x{:d} elements in {:s}'.format(x.shape[0], x.shape[1], dfn))
print(' - read {:d} labels [{:d},{:d}] in {:s}'
.format(lbl.shape[0], lbl.min(), lbl.max(), lfn))
D = data_utils.TensorDataset(torch.from_numpy(x), torch.from_numpy(lbl))
loader = data_utils.DataLoader(D, batch_size=bsize, shuffle=shuffle)
return loader
################################################
class Net(nn.Module):
def __init__(self, idim=1024, odim=2, nhid=None,
dropout=0.0, gpu=0, activation='TANH'):
super(Net, self).__init__()
self.gpu = gpu
modules = []
modules = []
print(' - mlp {:d}'.format(idim), end='')
if len(nhid) > 0:
if dropout > 0:
modules.append(nn.Dropout(p=dropout))
nprev = idim
for nh in nhid:
if nh > 0:
modules.append(nn.Linear(nprev, nh))
nprev = nh
if activation == 'TANH':
modules.append(nn.Tanh())
print('-{:d}t'.format(nh), end='')
elif activation == 'RELU':
modules.append(nn.ReLU())
print('-{:d}r'.format(nh), end='')
else:
raise Exception('Unrecognized activation {activation}')
if dropout > 0:
modules.append(nn.Dropout(p=dropout))
modules.append(nn.Linear(nprev, odim))
print('-{:d}, dropout={:.1f}'.format(odim, dropout))
else:
modules.append(nn.Linear(idim, odim))
print(' - mlp %d-%d'.format(idim, odim))
self.mlp = nn.Sequential(*modules)
# Softmax is included CrossEntropyLoss !
if self.gpu >= 0:
self.mlp = self.mlp.cuda()
def forward(self, x):
return self.mlp(x)
def TestCorpus(self, dset, name=' Dev', nlbl=4):
correct = 0
total = 0
all_predicted = torch.Tensor().long()
self.mlp.train(mode=False)
corr = np.zeros(nlbl, dtype=np.int32)
# iterate through batches - dset is a DataLoader, data is a 2-element list.
# First element is a (12,1024) tensor (12 batches each with sentence embedding of 1024 length)
# Second element is a (12,1) tensor with corresponding labels
for data in dset:
X, Y = data
Y = Y.long()
if self.gpu >= 0:
X = X.cuda()
Y = Y.cuda()
outputs = self.mlp(X)
_, predicted = torch.max(outputs.data, 1)
total += Y.size(0)
correct += (predicted == Y).int().sum()
for i in range(nlbl):
corr[i] += (predicted == i).int().sum()
# add batch predictions to previous predictions
all_predicted = torch.cat((all_predicted, predicted.cpu()), 0)
print(' | {:4s}: {:5.2f}%'
.format(name, 100.0 * correct.float() / total), end='')
# TODO uncomment later, for now i don't want to see classes
# print(' | classes:', end='')
# for i in range(nlbl):
# print(' {:5.2f}'.format(100.0 * corr[i] / total), end='')
return correct, total, all_predicted
def get_labels(self, dset):
all_predicted = torch.Tensor().long()
all_y = torch.Tensor().long()
self.mlp.train(mode=False)
# iterate through batches - dset is a DataLoader, data is a 2-element list.
# First element is a (12,1024) tensor (12 batches each with sentence embedding of 1024 length)
# Second element is a (12,1) tensor with corresponding labels
for data in dset:
X, Y = data
Y = Y.long()
if self.gpu >= 0:
X = X.cuda()
Y = Y.cuda()
outputs = self.mlp(X)
_, predicted = torch.max(outputs.data, 1)
# add batch predictions to previous predictions
all_predicted = torch.cat((all_predicted, predicted.cpu()), 0)
all_y = torch.cat((all_y, Y.cpu()), 0)
return all_y, all_predicted
################################################
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Simple sentence classifier")
# Data
parser.add_argument(
'--base-dir', '-b', type=str, required=True, metavar='PATH',
help="Directory with all the data files)")
parser.add_argument(
'--save', '-s', type=str, required=False, metavar='PATH', default="",
help="File in which to save best network")
parser.add_argument(
'--train', '-t', type=str, required=True, metavar='STR',
help="Name of training corpus")
parser.add_argument(
'--train-labels', '-T', type=str, required=True, metavar='STR',
help="Name of training corpus (labels)")
parser.add_argument(
'--dev', '-d', type=str, required=True, metavar='STR',
help="Name of development corpus")
parser.add_argument(
'--dev-labels', '-D', type=str, required=True, metavar='STR',
help="Name of development corpus (labels)")
parser.add_argument(
'--test', '-e', type=str, required=True, metavar='STR',
help="Name of test corpus without language extension")
parser.add_argument(
'--test-labels', '-E', type=str, required=True, metavar='STR',
help="Name of test corpus without language extension (labels)")
parser.add_argument(
'--lang', '-L', nargs='+', default=None,
help="List of languages to test on")
# network definition
parser.add_argument(
"--dim", "-m", type=int, default=1024,
help="Dimension of sentence embeddings")
parser.add_argument(
'--nhid', '-n', type=int, default=[0], nargs='+',
help="List of hidden layer(s) dimensions")
parser.add_argument(
"--nb-classes", "-c", type=int, default=2,
help="Number of output classes")
parser.add_argument(
'--dropout', '-o', type=float, default=0.0, metavar='FLOAT',
help="Value of dropout")
parser.add_argument(
'--nepoch', '-N', type=int, default=100, metavar='INT',
help="Number of epochs")
parser.add_argument(
'--bsize', '-B', type=int, default=128, metavar='INT',
help="Batch size")
parser.add_argument(
'--seed', '-S', type=int, default=123456789, metavar='INT',
help="Initial random seed")
parser.add_argument(
'--lr', type=float, default=0.001, metavar='FLOAT',
help='Learning rate')
parser.add_argument(
'--wdecay', type=float, default=0.0, metavar='FLOAT',
help='Weight decay')
parser.add_argument(
'--gpu', '-g', type=int, default=-1, metavar='INT',
help="GPU id (-1 for CPU)")
parser.add_argument(
'--create_labels', '-l', action='store_true',
help="Whether to create labels (true) or just train and evaluate (false)")
args = parser.parse_args()
print(' - base directory: {}'.format(args.base_dir))
args.base_dir = args.base_dir + "/"
train_loader = LoadData(args.base_dir, args.train, args.train_labels,
dim=args.dim, bsize=args.bsize, shuffle=True)
dev_loader = LoadData(args.base_dir, args.dev, args.dev_labels,
dim=args.dim, bsize=args.bsize, shuffle=False)
# set GPU and random seed
torch.cuda.set_device(args.gpu)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print(" - setting seed to %d" % args.seed)
# create network
net = Net(idim=args.dim, odim=args.nb_classes,
nhid=args.nhid, dropout=args.dropout, gpu=args.gpu)
if args.gpu >= 0:
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss()
#optimizer = optim.Adam(net.parameters(), weight_decay=0.0)
# default: pytorch/optim/adam.py
# Py0.4: lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
# Py1.0: lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
optimizer = optim.Adam(net.parameters(),
lr=args.lr,
weight_decay=args.wdecay,
betas=(0.9, 0.999),
eps=1e-8,
amsgrad=False)
corr_best = 0
# loop multiple times over the dataset
for epoch in range(args.nepoch):
loss_epoch = 0.0
print('Ep {:4d}'.format(epoch), end='')
# for inputs, labels in train_loader:
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
labels = labels.long()
if args.gpu >= 0:
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
net.zero_grad()
# forward + backward + optimize
net.train(mode=True)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
loss_epoch += loss.item()
print(' | loss {:e}'.format(loss_epoch), end='')
# corr_train, nbex_train, _ = net.TestCorpus(train_loader, 'Train')
corr, nbex, _ = net.TestCorpus(dev_loader, 'Dev')
if corr >= corr_best:
print(' | saved')
corr_best = corr
# corr_train_best = corr_train
net_best = copy.deepcopy(net)
else:
print('')
if 'net_best' in globals():
if args.save != '':
torch.save(net_best.cpu(), args.save)
print("# epochs: {}, lr: {}, nhid: {}, drop: {}, bsize: {}"
.format(args.nepoch,args.lr, args.nhid, args.dropout, args.bsize))
# print('Best dev - Dev {:5.2f}% Train {:5.2f}%'
# .format(100.0 * corr_best.float() / nbex, 100.0 * corr_train_best.float() / nbex_train))
if args.gpu >= 0:
net_best = net_best.cuda()
# TODO comment for finetuning because then we dont want to see the test set <- instead of uncommenting add a parameter and an if statement
# At first I thought we don't want to test when creating labels but later i decided that it's good to see the test metrics to make sure they are
# as good as expected. Maybe we should delete create_labels later.
# if not args.create_labels:
# else:
# save ground truth and labels predicted on train and dev set
# for l in args.lang:
# print("Language",l)
# for part in ['train','dev']:
# data_loader = LoadData(args.base_dir,f"{part}.enc." + l,
# f"{part}.lbl." + l,
# dim=args.dim, bsize=args.bsize,
# shuffle=False, quiet=True)
# actuals, preds = net_best.get_labels(data_loader)
# # unsqueeze(1) changes tensor shape from (n) to (n,1)
# merge = torch.cat((actuals.unsqueeze(1), preds.unsqueeze(1)), 1)
# # multifit uses "ja" instead of "jp"
# if l=="jp":
# np.savetxt(f"preds-{part}-ja.csv",merge.numpy().astype(int),delimiter=",")
# else:
# np.savetxt(f"preds-{part}-{l}.csv",merge.numpy().astype(int),delimiter=",")
# test on (several) languages
for l in args.lang:
test_loader = LoadData(args.base_dir, args.test + '.' + l,
args.test_labels + '.' + l,
dim=args.dim, bsize=args.bsize,
shuffle=False, quiet=True)
print('Ep best | Eval Test lang {:s}'.format(l), end='')
_, _, preds = net_best.TestCorpus(test_loader, 'Test')
print('')
| 36.956268 | 144 | 0.568713 | 3,841 | 0.303014 | 0 | 0 | 0 | 0 | 0 | 0 | 4,837 | 0.381587 |
0f8ddc53734f66db428cdaafd04e0f9951eeb50d | 2,852 | py | Python | pandas/tests/scalar/timestamp/test_arithmetic.py | BenRussert/pandas | 9179e633b1e54ac31c5ea42ec0ec24e9a1709aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2018-11-11T22:18:13.000Z | 2018-11-11T22:18:13.000Z | pandas/tests/scalar/timestamp/test_arithmetic.py | BenRussert/pandas | 9179e633b1e54ac31c5ea42ec0ec24e9a1709aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/scalar/timestamp/test_arithmetic.py | BenRussert/pandas | 9179e633b1e54ac31c5ea42ec0ec24e9a1709aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas.compat import long
from pandas.tseries import offsets
from pandas import Timestamp, Timedelta
class TestTimestampArithmetic(object):
def test_overflow_offset(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp('2017-01-13 00:00:00', freq='D')
offset = 20169940 * offsets.Day(1)
with pytest.raises(OverflowError):
stamp + offset
with pytest.raises(OverflowError):
offset + stamp
with pytest.raises(OverflowError):
stamp - offset
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
ts = Timestamp(dt, freq='D')
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert type(ts + 1) == Timestamp
assert type(ts - 1) == Timestamp
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, 'D')
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
def test_addition_subtraction_preserve_frequency(self):
ts = Timestamp('2014-03-05', freq='D')
td = timedelta(days=1)
original_freq = ts.freq
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert (ts + 1).freq == original_freq
assert (ts - 1).freq == original_freq
assert (ts + td).freq == original_freq
assert (ts - td).freq == original_freq
td64 = np.timedelta64(1, 'D')
assert (ts + td64).freq == original_freq
assert (ts - td64).freq == original_freq
| 34.361446 | 79 | 0.631837 | 2,610 | 0.915147 | 0 | 0 | 0 | 0 | 0 | 0 | 683 | 0.239481 |
0f8e473c098b6e6372a376f56a8b418162a63f4d | 1,224 | py | Python | utlts/uniprot_api.py | proteins247/proteomevis_scripts | 33d864f02787659a5d75103baad556fb48b3bd1e | [
"MIT"
] | 1 | 2020-11-11T06:14:10.000Z | 2020-11-11T06:14:10.000Z | utlts/uniprot_api.py | proteins247/proteomevis_scripts | 33d864f02787659a5d75103baad556fb48b3bd1e | [
"MIT"
] | null | null | null | utlts/uniprot_api.py | proteins247/proteomevis_scripts | 33d864f02787659a5d75103baad556fb48b3bd1e | [
"MIT"
] | 1 | 2019-05-28T19:13:24.000Z | 2019-05-28T19:13:24.000Z | import urllib, urllib2
from parse_data import taxid
#UniProt column names are found at
#https://www.uniprot.org/help/uniprotkb_column_names
class UniProtAPI():
def __init__(self, columns):
self.columns = columns
self.url = 'https://www.uniprot.org/uniprot/'
self.batch_size = 350 #491 is limit
self.raw_data = []
def info(self):
data = urllib.urlencode(self.params)
request = urllib2.Request(self.url, data)
response = urllib2.urlopen(request)
labels = next(response).split('\t')
self.raw_data.extend(response)
self.labels = [label.rstrip() for label in labels]
def uniprot_info(self, uniprots):
for batch_i in range(len(uniprots) / self.batch_size + 1):
self.params = {'query':','.join(uniprots[(batch_i)*self.batch_size:(batch_i+1)*self.batch_size]), 'columns':','.join(self.columns), 'format':'tab'} #as of 5/29/18 cannot get multiple uniprots at once
self.info()
return self.labels, self.raw_data
def organism_info(self, organism = ''):
if not organism:
from parse_data import organism
self.params = {'query':'organism:{0} AND reviewed:yes'.format(taxid()[organism]), 'columns':','.join(self.columns), 'format':'tab'}
self.info()
return self.labels, self.raw_data
| 33.081081 | 202 | 0.710784 | 1,080 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.235294 |
0f90448ac74fa457d3c66dd70e9a31f1a0170da1 | 59 | py | Python | ditto/flickr/__init__.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 54 | 2016-08-15T17:32:41.000Z | 2022-02-27T03:32:05.000Z | ditto/flickr/__init__.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 229 | 2015-07-23T12:50:47.000Z | 2022-03-24T10:33:20.000Z | ditto/flickr/__init__.py | garrettc/django-ditto | fcf15beb8f9b4d61634efd4a88064df12ee16a6f | [
"MIT"
] | 8 | 2015-09-10T17:10:35.000Z | 2022-03-25T13:05:01.000Z | default_app_config = "ditto.flickr.apps.DittoFlickrConfig"
| 29.5 | 58 | 0.847458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.627119 |
0f9355816927a8701eaeb991e776ae193c108d11 | 561 | py | Python | cartes/dataviz/markers/__init__.py | xoolive/cartes | a9e8144e8e40eec66e2a21e96c46d09ad4f63853 | [
"MIT"
] | 20 | 2021-01-04T21:23:48.000Z | 2021-12-14T15:28:15.000Z | cartes/dataviz/markers/__init__.py | xoolive/cartes | a9e8144e8e40eec66e2a21e96c46d09ad4f63853 | [
"MIT"
] | null | null | null | cartes/dataviz/markers/__init__.py | xoolive/cartes | a9e8144e8e40eec66e2a21e96c46d09ad4f63853 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
import numpy as np
from matplotlib import path
current_dir = Path(__file__).parent
__all__ = list(p.stem for p in current_dir.glob("*.json"))
def __getattr__(name: str) -> path.Path:
file_path = current_dir / (name + ".json")
if file_path.exists():
data = json.loads(file_path.read_text())
return path.Path(
vertices=data["vertices"], codes=np.array(data["codes"], np.uint8)
)
raise AttributeError(
f"No {name}.json file found in {current_dir.absolute()}."
)
| 25.5 | 78 | 0.654189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.158645 |
0f935d49aac5857a6ea03a3d954a89f57e8ce440 | 3,169 | py | Python | flash/text/seq2seq/core/input.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | 1 | 2022-03-09T22:40:05.000Z | 2022-03-09T22:40:05.000Z | flash/text/seq2seq/core/input.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | null | null | null | flash/text/seq2seq/core/input.py | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import flash
from flash.core.data.io.input import DataKeys, Input
from flash.core.data.utilities.paths import PATH_TYPE
from flash.core.utilities.imports import _TEXT_AVAILABLE, requires
if _TEXT_AVAILABLE:
from datasets import Dataset, load_dataset
else:
Dataset = object
class Seq2SeqInputBase(Input):
@requires("text")
def load_data(
self,
hf_dataset: Dataset,
input_key: str,
target_key: Optional[str] = None,
) -> Dataset:
# remove extra columns
extra_columns = set(hf_dataset.column_names) - {input_key, target_key}
hf_dataset = hf_dataset.remove_columns(extra_columns)
if input_key != DataKeys.INPUT:
hf_dataset = hf_dataset.rename_column(input_key, DataKeys.INPUT)
if target_key in hf_dataset.column_names and target_key != DataKeys.TARGET:
hf_dataset = hf_dataset.rename_column(target_key, DataKeys.TARGET)
if flash._IS_TESTING:
# NOTE: must subset in this way to return a Dataset
hf_dataset = [sample for sample in hf_dataset.select(range(40), keep_in_memory=True)]
return hf_dataset
class Seq2SeqCSVInput(Seq2SeqInputBase):
@requires("text")
def load_data(
self,
csv_file: PATH_TYPE,
input_key: str,
target_key: Optional[str] = None,
) -> Dataset:
dataset_dict = load_dataset("csv", data_files={"data": str(csv_file)})
return super().load_data(
dataset_dict["data"],
input_key,
target_key,
)
class Seq2SeqJSONInput(Seq2SeqInputBase):
@requires("text")
def load_data(
self,
json_file: PATH_TYPE,
field: str,
input_key: str,
target_key: Optional[str] = None,
) -> Dataset:
dataset_dict = load_dataset("json", data_files={"data": str(json_file)}, field=field)
return super().load_data(
dataset_dict["data"],
input_key,
target_key,
)
class Seq2SeqListInput(Seq2SeqInputBase):
@requires("text")
def load_data(
self,
inputs: List[str],
targets: Optional[List[str]] = None,
) -> Dataset:
if targets is not None:
hf_dataset = Dataset.from_dict({DataKeys.INPUT: inputs, DataKeys.TARGET: targets})
else:
hf_dataset = Dataset.from_dict({DataKeys.INPUT: inputs})
return super().load_data(
hf_dataset,
DataKeys.INPUT,
DataKeys.TARGET,
)
| 31.376238 | 97 | 0.652887 | 2,254 | 0.711265 | 0 | 0 | 2,082 | 0.65699 | 0 | 0 | 705 | 0.222468 |
7e13e1f7cd0f8dfc86c33515f8eed65171e22242 | 612 | py | Python | pymitblod/gender.py | slimcdk/pymitblod | 8bae9401abec73e53a0687a5429b215722076b39 | [
"Apache-2.0"
] | null | null | null | pymitblod/gender.py | slimcdk/pymitblod | 8bae9401abec73e53a0687a5429b215722076b39 | [
"Apache-2.0"
] | null | null | null | pymitblod/gender.py | slimcdk/pymitblod | 8bae9401abec73e53a0687a5429b215722076b39 | [
"Apache-2.0"
] | null | null | null | '''
All model classes for pymitblod
'''
from __future__ import annotations
from typing import Callable
class Gender():
'''
Class representing an institution.
'''
def __init__(
self,
id:int,
name:str,
blood_volume_ml_lambda:Callable[[float, float, float], float],
):
self._id = id
self._name = name
self._blood_volume_ml_lambda = blood_volume_ml_lambda
def __repr__(self) -> str:
return str(self._name)
def blood_volume_ml_lambda(self) -> Callable:
return self._blood_volume_ml_lambda
| 20.4 | 74 | 0.616013 | 505 | 0.825163 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.145425 |
7e13fe81701a2fe45cfed9171d56097965bceb41 | 56 | py | Python | findtime/utils.py | MattCCS/FindTime | 8c38965661444d342a28eadc0cdedaf70e9f9279 | [
"MIT"
] | null | null | null | findtime/utils.py | MattCCS/FindTime | 8c38965661444d342a28eadc0cdedaf70e9f9279 | [
"MIT"
] | null | null | null | findtime/utils.py | MattCCS/FindTime | 8c38965661444d342a28eadc0cdedaf70e9f9279 | [
"MIT"
] | null | null | null |
def day_of_year(dt):
return dt.timetuple().tm_yday
| 14 | 33 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e1465fa84580938e15bf43f26376b15ecb19b23 | 1,408 | py | Python | tools/merge-inputs.py | dice-project/DICE-deployment-service | e209c6a061a78f170e81cfc03d2959af0283ed15 | [
"Apache-2.0"
] | 2 | 2018-04-03T20:45:26.000Z | 2022-02-07T19:53:42.000Z | tools/merge-inputs.py | dice-project/DICE-deployment-service | e209c6a061a78f170e81cfc03d2959af0283ed15 | [
"Apache-2.0"
] | 3 | 2016-11-15T10:41:43.000Z | 2020-03-16T07:49:03.000Z | tools/merge-inputs.py | dice-project/DICE-deployment-service | e209c6a061a78f170e81cfc03d2959af0283ed15 | [
"Apache-2.0"
] | 2 | 2018-07-04T11:37:12.000Z | 2022-02-07T19:53:43.000Z | #!/usr/bin/env python
import sys
import json
import argparse
class ArgParser(argparse.ArgumentParser):
"""
Argument parser that displays help on error
"""
def error(self, message):
sys.stderr.write("error: {}\n".format(message))
self.print_help()
sys.exit(2)
def make_dict(inputsfile):
try:
inputs = json.load(inputsfile)
except Exception as e:
print("Error reading {}: {}".format(inputsfile.name, e))
sys.exit(1)
return { entry['key']: entry for entry in inputs }
def make_outputs(inputdict):
return list(inputdict.values())
def main(newinputs, oldinputs, output):
basedict = make_dict(oldinputs)
newdict = make_dict(newinputs)
basedict.update(newdict)
outputdata = make_outputs(basedict)
json.dump(outputdata, output, indent=2)
if __name__ == "__main__":
parser = ArgParser(description="Update and extend contents of JSON")
parser.add_argument("newinputs", help="JSON file containing new values",
type=argparse.FileType("r"))
parser.add_argument("oldinputs", help="JSON file containing existing values",
type=argparse.FileType("r"))
parser.add_argument("output", help="Output JSON file",
type=argparse.FileType("w"))
args = parser.parse_args()
main(args.newinputs, args.oldinputs, args.output)
| 26.566038 | 81 | 0.649858 | 238 | 0.169034 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.208807 |
7e14ccb15609f75e18eb6393772f3e6bcda7ab38 | 947 | py | Python | carpyncho/migrations/versions/a169bf8b211d_lightcurve_to_npy.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T11:10:52.000Z | 2016-02-16T11:10:52.000Z | carpyncho/migrations/versions/a169bf8b211d_lightcurve_to_npy.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | null | null | null | carpyncho/migrations/versions/a169bf8b211d_lightcurve_to_npy.py | toros-astro/carpyncho3 | 27b00e7539e081c563f1a09c70fb255ac5e71583 | [
"BSD-3-Clause"
] | null | null | null | """lightcurve_to_npy
Revision ID: a169bf8b211d
Revises: 423507fdb18e
Create Date: 2017-09-09 22:32:17.558901
"""
# revision identifiers, used by Alembic.
revision = 'a169bf8b211d'
down_revision = '423507fdb18e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('LightCurves', sa.Column('features_filename', sa.Text(), nullable=True))
op.add_column('LightCurves', sa.Column('lc_filename', sa.Text(), nullable=True))
op.drop_column('LightCurves', 'hdf_filename')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('LightCurves', sa.Column('hdf_filename', sa.TEXT(), nullable=True))
op.drop_column('LightCurves', 'lc_filename')
op.drop_column('LightCurves', 'features_filename')
### end Alembic commands ###
| 28.69697 | 90 | 0.713833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 526 | 0.555438 |
7e16eb8d9b19794eaeee0cf0580050bfbc0a9996 | 3,328 | py | Python | core/window_manager.py | kennethnym/Subliminal | ffa49eba5bcaf1a7cc566d77e15db28fc33cdafe | [
"MIT"
] | null | null | null | core/window_manager.py | kennethnym/Subliminal | ffa49eba5bcaf1a7cc566d77e15db28fc33cdafe | [
"MIT"
] | null | null | null | core/window_manager.py | kennethnym/Subliminal | ffa49eba5bcaf1a7cc566d77e15db28fc33cdafe | [
"MIT"
] | null | null | null | import asyncio
import os
import subprocess
from threading import Thread
from typing import Dict, Set
from .plugin_settings import PluginSettings
from .rpc.api.daemon import DaemonConnectedEvent
from .project import CurrentProject
from .rpc import FlutterRpcProcess, FlutterRpcClient
from .env import Env
import sublime
class WindowManager:
def __init__(self, window: sublime.Window) -> None:
super().__init__()
env_dict = sublime.load_settings("LSP-Dart.sublime-settings").get("env", dict(os.environ))
settings = sublime.load_settings("Subliminal.sublime-settings").to_dict()
self.__plugin_settings = PluginSettings(**settings)
if "FLUTTER_ROOT" in env_dict:
env = Env.from_dict(env_dict)
loop = asyncio.new_event_loop()
self.__env = env
self.__window = window
self.__is_daemon_started = False
self.__event_loop = loop
self.__daemon = FlutterRpcProcess([env.flutter_path, "daemon"], loop)
self.__daemon_client = FlutterRpcClient(self.__daemon)
self.__project = CurrentProject(window, env, self.__daemon_client, loop)
else:
sublime.error_message('Unable to determine the path to the Flutter SDK. Please define "FLUTTER_ROOT" under the "env" key in LSP-Dart settings.')
@property
def env(self):
return self.__env
@property
def project(self):
return self.__project
@property
def event_loop(self):
return self.__event_loop
@property
def plugin_settings(self):
return self.__plugin_settings
def start_daemon(self):
if self.__is_daemon_started:
return
Thread(target=self.__event_loop.run_forever).start()
self.__daemon_client.add_event_listener(self.__daemon_event_listener)
self.__daemon.start()
self.__is_daemon_started = True
def unload(self):
self.__daemon.terminate()
_unregister_window_manager(self.__window)
def __daemon_event_listener(self, event):
if isinstance(event, DaemonConnectedEvent):
asyncio.run_coroutine_threadsafe(self.__initialize(), self.__event_loop)
async def __initialize(self):
await self.__project.initialize()
await self.__daemon_client.device.enable()
_window_managers: Dict[int, WindowManager] = {}
_ignored_window: Set[int] = set()
def _unregister_window_manager(window: sublime.Window):
try:
_window_managers.pop(window.id())
except KeyError:
pass
def ignore_window(window: sublime.Window):
_ignored_window.add(window.id())
def unignore_window(window: sublime.Window):
_ignored_window.remove(window.id())
def is_window_ignored(window: sublime.Window):
return window.id() in _ignored_window
def get_window_manager(window) -> WindowManager:
win_id = window.id()
try:
return _window_managers[win_id]
except KeyError:
wm = WindowManager(window)
_window_managers[window.id()] = wm
return wm
def unload_window_manager(window: sublime.Window):
try:
_window_managers[window.id()].unload()
except KeyError:
pass
def unload_window_managers():
for _, wm in _window_managers.items():
wm.unload()
| 25.79845 | 156 | 0.684796 | 2,036 | 0.611779 | 0 | 0 | 262 | 0.078726 | 122 | 0.036659 | 204 | 0.061298 |
7e18044f5ac21085211914b4557e38ac75f7285e | 278 | py | Python | tests/context.py | eyal0/lcovparse | 3c29ef50870339d692ec116b68521904692a926a | [
"Apache-2.0"
] | 2 | 2021-09-12T23:07:54.000Z | 2021-11-20T02:57:30.000Z | tests/context.py | eyal0/lcovparse | 3c29ef50870339d692ec116b68521904692a926a | [
"Apache-2.0"
] | null | null | null | tests/context.py | eyal0/lcovparse | 3c29ef50870339d692ec116b68521904692a926a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
"""Context for all tests."""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)) + "../lcovparse"))
import lcovparse # pylint: disable=wrong-import-position,unused-import
| 25.272727 | 91 | 0.76259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.420863 |
7e18bcbc29a2c01e32c9c2370ce6613e24de3611 | 10,660 | py | Python | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 97 | 2018-03-24T14:18:21.000Z | 2022-01-16T16:25:55.000Z | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 30 | 2018-03-25T01:06:44.000Z | 2022-01-28T10:24:19.000Z | stellarisdashboard/game_info.py | Akuukis/stellaris-dashboard | baf0de5abfaf1ade719f06c5d0ce76e7ee9d17df | [
"MIT",
"Unlicense"
] | 17 | 2018-03-25T15:45:30.000Z | 2022-01-09T13:58:32.000Z | PHYSICS_TECHS = {
"tech_databank_uplinks",
"tech_basic_science_lab_1",
"tech_curator_lab",
"tech_archeology_lab",
"tech_physics_lab_1",
"tech_physics_lab_2",
"tech_physics_lab_3",
"tech_global_research_initiative",
"tech_administrative_ai",
"tech_cryostasis_1",
"tech_cryostasis_2",
"tech_self_aware_logic",
"tech_automated_exploration",
"tech_sapient_ai",
"tech_positronic_implants",
"tech_combat_computers_1",
"tech_combat_computers_2",
"tech_combat_computers_3",
"tech_combat_computers_autonomous",
"tech_auxiliary_fire_control",
"tech_synchronized_defences",
"tech_fission_power",
"tech_fusion_power",
"tech_cold_fusion_power",
"tech_antimatter_power",
"tech_zero_point_power",
"tech_reactor_boosters_1",
"tech_reactor_boosters_2",
"tech_reactor_boosters_3",
"tech_shields_1",
"tech_shields_2",
"tech_shields_3",
"tech_shields_4",
"tech_shields_5",
"tech_shield_rechargers_1",
"tech_planetary_shield_generator",
"tech_sensors_2",
"tech_sensors_3",
"tech_sensors_4",
"tech_power_plant_1",
"tech_power_plant_2",
"tech_power_plant_3",
"tech_power_plant_4",
"tech_power_hub_1",
"tech_power_hub_2",
"tech_hyper_drive_1",
"tech_hyper_drive_2",
"tech_hyper_drive_3",
"tech_wormhole_stabilization",
"tech_gateway_activation",
"tech_gateway_construction",
"tech_jump_drive_1",
"tech_ftl_inhibitor",
"tech_matter_generator",
}
SOCIETY_TECHS = {
"tech_planetary_defenses",
"tech_eco_simulation",
"tech_hydroponics",
"tech_gene_crops",
"tech_nano_vitality_crops",
"tech_nutrient_replication",
"tech_biolab_1",
"tech_biolab_2",
"tech_biolab_3",
"tech_alien_life_studies",
"tech_colonization_1",
"tech_colonization_2",
"tech_colonization_3",
"tech_colonization_4",
"tech_colonization_5",
"tech_tomb_world_adaption",
"tech_space_trading",
"tech_frontier_health",
"tech_frontier_hospital",
"tech_tb_mountain_range",
"tech_tb_volcano",
"tech_tb_dangerous_wildlife",
"tech_tb_dense_jungle",
"tech_tb_quicksand_basin",
"tech_tb_noxious_swamp",
"tech_tb_massive_glacier",
"tech_tb_toxic_kelp",
"tech_tb_deep_sinkhole",
"tech_terrestrial_sculpting",
"tech_ecological_adaptation",
"tech_climate_restoration",
"tech_genome_mapping",
"tech_vitality_boosters",
"tech_epigenetic_triggers",
"tech_cloning",
"tech_gene_banks",
"tech_gene_seed_purification",
"tech_morphogenetic_field_mastery",
"tech_gene_tailoring",
"tech_glandular_acclimation",
"tech_genetic_resequencing",
"tech_gene_expressions",
"tech_selected_lineages",
"tech_capacity_boosters",
"tech_regenerative_hull_tissue",
"tech_doctrine_fleet_size_1",
"tech_doctrine_fleet_size_2",
"tech_doctrine_fleet_size_3",
"tech_doctrine_fleet_size_4",
"tech_doctrine_fleet_size_5",
"tech_interstellar_fleet_traditions",
"tech_refit_standards",
"tech_command_matrix",
"tech_doctrine_navy_size_1",
"tech_doctrine_navy_size_2",
"tech_doctrine_navy_size_3",
"tech_doctrine_navy_size_4",
"tech_centralized_command",
"tech_combat_training",
"tech_ground_defense_planning",
"tech_global_defense_grid",
"tech_psionic_theory",
"tech_telepathy",
"tech_precognition_interface",
"tech_psi_jump_drive_1",
"tech_galactic_ambitions",
"tech_manifest_destiny",
"tech_interstellar_campaigns",
"tech_galactic_campaigns",
"tech_planetary_government",
"tech_planetary_unification",
"tech_colonial_centralization",
"tech_galactic_administration",
"tech_galactic_markets",
"tech_subdermal_stimulation",
"tech_galactic_benevolence",
"tech_adaptive_bureaucracy",
"tech_colonial_bureaucracy",
"tech_galactic_bureaucracy",
"tech_living_state",
"tech_collective_self",
"tech_autonomous_agents",
"tech_embodied_dynamism",
"tech_neural_implants",
"tech_artificial_moral_codes",
"tech_synthetic_thought_patterns",
"tech_collective_production_methods",
"tech_resource_processing_algorithms",
"tech_cultural_heritage",
"tech_heritage_site",
"tech_hypercomms_forum",
"tech_autocurating_vault",
"tech_holographic_rituals",
"tech_consecration_fields",
"tech_transcendent_faith",
"tech_ascension_theory",
"tech_ascension_theory_apoc",
"tech_psionic_shield",
}
ENGINEERING_TECHS = {
"tech_space_exploration",
"tech_corvettes",
"tech_destroyers",
"tech_cruisers",
"tech_battleships",
"tech_titans",
"tech_corvette_build_speed",
"tech_corvette_hull_1",
"tech_corvette_hull_2",
"tech_destroyer_build_speed",
"tech_destroyer_hull_1",
"tech_destroyer_hull_2",
"tech_cruiser_build_speed",
"tech_cruiser_hull_1",
"tech_cruiser_hull_2",
"tech_battleship_build_speed",
"tech_battleship_hull_1",
"tech_battleship_hull_2",
"tech_titan_hull_1",
"tech_titan_hull_2",
"tech_starbase_1",
"tech_starbase_2",
"tech_starbase_3",
"tech_starbase_4",
"tech_starbase_5",
"tech_modular_engineering",
"tech_space_defense_station_improvement",
"tech_strike_craft_1",
"tech_strike_craft_2",
"tech_strike_craft_3",
"tech_assault_armies",
"tech_ship_armor_1",
"tech_ship_armor_2",
"tech_ship_armor_3",
"tech_ship_armor_4",
"tech_ship_armor_5",
"tech_crystal_armor_1",
"tech_crystal_armor_2",
"tech_thrusters_1",
"tech_thrusters_2",
"tech_thrusters_3",
"tech_thrusters_4",
"tech_space_defense_station_1",
"tech_defense_platform_hull_1",
"tech_basic_industry",
"tech_powered_exoskeletons",
"tech_mining_network_2",
"tech_mining_network_3",
"tech_mining_network_4",
"tech_mineral_processing_1",
"tech_mineral_processing_2",
"tech_engineering_lab_1",
"tech_engineering_lab_2",
"tech_engineering_lab_3",
"tech_robotic_workers",
"tech_droid_workers",
"tech_synthetic_workers",
"tech_synthetic_leaders",
"tech_space_construction",
"tech_afterburners_1",
"tech_afterburners_2",
"tech_assembly_pattern",
"tech_construction_templates",
"tech_mega_engineering",
}
ALL_KNOWN_TECHS = set.union(PHYSICS_TECHS, ENGINEERING_TECHS, SOCIETY_TECHS)
ASCENSION_PERKS = {
"ap_enigmatic_engineering", #: "Enigmatic Engineering",
"ap_nihilistic_acquisition", #: "Nihilistic Acquisition",
"ap_colossus", #: "Colossus",
"ap_engineered_evolution", #: "Engineered Evolution",
"ap_evolutionary_mastery", #: "Evolutionary Mastery",
"ap_the_flesh_is_weak", #: "The Flesh is Weak",
"ap_synthetic_evolution", #: "Synthetic Evolution",
"ap_mind_over_matter", #: "Mind over Matter",
"ap_transcendence", #: "Transcendence",
"ap_world_shaper", #: "World Shaper",
"ap_galactic_force_projection", #: "Galactic Force Projection",
"ap_defender_of_the_galaxy", #: "Defender of the Galaxy",
"ap_interstellar_dominion", #: "Interstellar Dominion",
"ap_grasp_the_void", #: "Grasp the Void",
"ap_eternal_vigilance", #: "Eternal Vigilance",
"ap_galactic_contender", #: "Galactic Contender",
"ap_technological_ascendancy", #: "Technological Ascendancy",
"ap_one_vision", #: "One Vision",
"ap_consecrated_worlds", #: "Consecrate Worlds",
"ap_mastery_of_nature", #: "Mastery of Nature",
"ap_imperial_prerogative", #: "Imperial Prerogative",
"ap_executive_vigor", #: "Executive Vigor",
"ap_transcendent_learning", #: "Transcendent Learning",
"ap_shared_destiny", #: "Shared Destiny",
"ap_voidborn", #: "Voidborn",
"ap_master_builders", #: "Master Builders",
"ap_galactic_wonders", #: "Galactic Wonders",
"ap_synthetic_age", #: "Synthetic Age",
"ap_machine_worlds", #: "Machine Worlds",
}
COLONIZABLE_PLANET_CLASSES_PLANETS = {
"pc_desert",
"pc_arid",
"pc_savannah",
"pc_tropical",
"pc_continental",
"pc_ocean",
"pc_tundra",
"pc_arctic",
"pc_alpine",
"pc_gaia",
"pc_nuked",
"pc_machine",
}
COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES = {
"pc_ringworld_habitable",
"pc_habitat",
}
# Planet classes for the planetary diversity mod
# (see https://steamcommunity.com/workshop/filedetails/discussion/1466534202/3397295779078104093/)
COLONIZABLE_PLANET_CLASSES_PD_PLANETS = {
"pc_antarctic",
"pc_deadcity",
"pc_retinal",
"pc_irradiated_terrestrial",
"pc_lush",
"pc_geocrystalline",
"pc_marginal",
"pc_irradiated_marginal",
"pc_marginal_cold",
"pc_crystal",
"pc_floating",
"pc_graveyard",
"pc_mushroom",
"pc_city",
"pc_archive",
"pc_biolumen",
"pc_technoorganic",
"pc_tidallylocked",
"pc_glacial",
"pc_frozen_desert",
"pc_steppe",
"pc_hadesert",
"pc_boreal",
"pc_sandsea",
"pc_subarctic",
"pc_geothermal",
"pc_cascadian",
"pc_swamp",
"pc_mangrove",
"pc_desertislands",
"pc_mesa",
"pc_oasis",
"pc_hajungle",
"pc_methane",
"pc_ammonia",
}
COLONIZABLE_PLANET_CLASSES = (
COLONIZABLE_PLANET_CLASSES_PLANETS
| COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES
| COLONIZABLE_PLANET_CLASSES_PD_PLANETS
)
DESTROYED_BY_WEAPONS_PLANET_CLASSES = {
"pc_shattered",
"pc_shielded",
"pc_ringworld_shielded",
"pc_habitat_shielded",
"pc_ringworld_habitable_damaged",
}
DESTROYED_BY_EVENTS_AND_CRISES_PLANET_CLASSES = {
"pc_egg_cracked",
"pc_shrouded",
"pc_ai",
"pc_infested",
"pc_gray_goo",
}
DESTROYED_PLANET_CLASSES = (
DESTROYED_BY_WEAPONS_PLANET_CLASSES | DESTROYED_BY_EVENTS_AND_CRISES_PLANET_CLASSES
)
def is_destroyed_planet(planet_class):
return planet_class in DESTROYED_PLANET_CLASSES
def is_colonizable_planet(planet_class):
return planet_class in COLONIZABLE_PLANET_CLASSES
def is_colonizable_megastructure(planet_class):
return planet_class in COLONIZABLE_PLANET_CLASSES_MEGA_STRUCTURES
LOWERCASE_WORDS = {"the", "in", "of", "for", "is", "over", "under"}
WORD_REPLACEMENT = {
"Ai": "AI",
"Ftl": "FTL",
"Tb": "Tile Blocker",
}
def convert_id_to_name(object_id: str, remove_prefix="") -> str:
words = [word for word in object_id.split("_") if word != remove_prefix]
words = [
word.capitalize() if word not in LOWERCASE_WORDS else word for word in words
]
words = [WORD_REPLACEMENT.get(word, word) for word in words]
return " ".join(words)
| 28.810811 | 98 | 0.711069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,369 | 0.691276 |
7e19545cdf8ccda12d1556b38db20f32c4aa3c1c | 1,112 | py | Python | web/misc/webapi_client/__init__.py | procool/mygw | f35b72b5915d314e883dcde45c3c33ff26f173df | [
"BSD-2-Clause"
] | null | null | null | web/misc/webapi_client/__init__.py | procool/mygw | f35b72b5915d314e883dcde45c3c33ff26f173df | [
"BSD-2-Clause"
] | null | null | null | web/misc/webapi_client/__init__.py | procool/mygw | f35b72b5915d314e883dcde45c3c33ff26f173df | [
"BSD-2-Clause"
] | null | null | null | import re
from flaskcbv.conf import settings
from misc.httpclient import httpClient
re_session = re.compile(r"session=(.*?);")
class webapiClient(httpClient):
host = settings.WEBAPI_HOST
port = settings.WEBAPI_PORT
def __check_session(self, r):
try: self.set_session(re_session.findall(r.headers['set-cookie'])[0])
except: self.__session = None
def isauthed(self, sessionid):
try: r, b = self.call_handler('tests/isauthed', session=sessionid)
except Exception as err:
##print err, dir(err), err.code
return False
self.__check_session(r)
return True
def profile(self, **kwargs):
r, b = self.call_handler('contacts/profile/', **kwargs)
self.__check_session(r)
return b
def orders_list(self, **kwargs):
r, b = self.call_handler('orders/my/list/', **kwargs)
self.__check_session(r)
return b
def order_details(self, order_id, **kwargs):
r, b = self.call_handler('orders/%s/' % order_id, **kwargs)
self.__check_session(r)
return b
| 25.272727 | 77 | 0.629496 | 975 | 0.876799 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.111511 |
7e1a810629ff4e92bba844d970fa5906ec4f3338 | 3,689 | py | Python | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | 1 | 2018-10-07T03:53:43.000Z | 2018-10-07T03:53:43.000Z | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | null | null | null | starfish/pipeline/filter/gaussian_high_pass.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | null | null | null | import argparse
from functools import partial
from numbers import Number
from typing import Callable, Union, Tuple, Optional
import numpy as np
from skimage import img_as_uint
from starfish.errors import DataFormatWarning
from starfish.image import ImageStack
from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass
from ._base import FilterAlgorithmBase
class GaussianHighPass(FilterAlgorithmBase):
def __init__(
self, sigma: Union[Number, Tuple[Number]], is_volume: bool=False, verbose: bool=False, **kwargs
) -> None:
"""Gaussian high pass filter
Parameters
----------
sigma : Union[Number, Tuple[Number]]
standard deviation of gaussian kernel
is_volume : bool
If True, 3d (z, y, x) volumes will be filtered, otherwise, filter 2d tiles independently.
verbose : bool
if True, report on filtering progress (default = False)
"""
if isinstance(sigma, tuple):
message = ("if passing an anisotropic kernel, the dimensionality must match the data shape ({shape}), not "
"{passed_shape}")
if is_volume and len(sigma) != 3:
raise ValueError(message.format(shape=3, passed_shape=len(sigma)))
if not is_volume and len(sigma) != 2:
raise ValueError(message.format(shape=2, passed_shape=len(sigma)))
self.sigma = sigma
self.is_volume = is_volume
self.verbose = verbose
@classmethod
def add_arguments(cls, group_parser: argparse.ArgumentParser) -> None:
group_parser.add_argument(
"--sigma", type=float, help="standard deviation of gaussian kernel")
group_parser.add_argument(
"--is-volume", action="store_true", help="indicates that the image stack should be filtered in 3d")
@staticmethod
def high_pass(image: np.ndarray, sigma: Union[Number, Tuple[Number]]) -> np.ndarray:
"""
Applies a gaussian high pass filter to an image
Parameters
----------
image : numpy.ndarray[np.uint32]
2-d or 3-d image data
sigma : Union[Number, Tuple[Number]]
Standard deviation of gaussian kernel
Returns
-------
np.ndarray :
Standard deviation of the Gaussian kernel that will be applied. If a float, an isotropic kernel will be
assumed, otherwise the dimensions of the kernel give (z, y, x)
"""
if image.dtype != np.uint16:
DataFormatWarning('gaussian filters currently only support uint16 images. Image data will be converted.')
image = img_as_uint(image)
blurred: np.ndarray = GaussianLowPass.low_pass(image, sigma)
over_flow_ind: np.ndarray[bool] = image < blurred
filtered: np.ndarray = image - blurred
filtered[over_flow_ind] = 0
return filtered
def filter(self, stack: ImageStack, in_place: bool=True) -> Optional[ImageStack]:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
Returns
-------
Optional[ImageStack] :
if in-place is False, return the results of filter as a new stack
"""
high_pass: Callable = partial(self.high_pass, sigma=self.sigma)
result = stack.apply(high_pass, is_volume=self.is_volume, verbose=self.verbose, in_place=in_place)
if not in_place:
return result
return None
| 35.815534 | 119 | 0.632421 | 3,314 | 0.898346 | 0 | 0 | 1,418 | 0.384386 | 0 | 0 | 1,643 | 0.445378 |
7e1b0a8590a411950b17f4ae15287aa43064930c | 2,040 | py | Python | WDJN/eval/svm_eval_acc.py | silverriver/Stylized_Dialog | 559dd97c4ec9c91e94deb048f789684ef3f1f9fa | [
"MIT"
] | 21 | 2020-12-16T08:53:38.000Z | 2022-01-21T09:08:55.000Z | WDJN/eval/svm_eval_acc.py | silverriver/Stylized_Dialog | 559dd97c4ec9c91e94deb048f789684ef3f1f9fa | [
"MIT"
] | 1 | 2020-12-27T07:56:01.000Z | 2020-12-30T05:13:11.000Z | WDJN/eval/svm_eval_acc.py | silverriver/Stylized_Dialog | 559dd97c4ec9c91e94deb048f789684ef3f1f9fa | [
"MIT"
] | 1 | 2022-02-28T12:19:19.000Z | 2022-02-28T12:19:19.000Z | import torch
import torch.nn as nn
import sklearn
from sklearn import svm
from sklearn import metrics
from sklearn.externals import joblib
import numpy as np
from text import Vocab
import random
import os
import json
from tqdm import tqdm, trange
from random import sample
import pickle
models = [
["bt_beam", "eval/infer_out_beam_model-10.ckpt-1.txt_lp2.0"],
]
vocab_path = 'chinese_gpt_original/dict.txt'
cgpt_model = 'chinese_gpt_original/Cgpt_model.bin'
cls_model = '../data/linear_model.bin'
vocab = Vocab(vocab_path)
cgpt_model = torch.load(cgpt_model, map_location='cpu')
# embedding = cgpt_model['decoder.embeddings.weight']
embeddings = nn.Embedding(13088, 768, padding_idx=1)
embeddings.weight = nn.Parameter(cgpt_model['decoder.embeddings.weight'])
def sent_vec(texts):
ids = [vocab.string2ids(' '.join(t.replace(' ', ''))) for t in texts]
res = [embeddings(torch.LongTensor(i)).mean(dim=0).detach().numpy() for i in ids]
return np.asarray(res)
clf = joblib.load(cls_model)
def test_model(input):
with open(input) as f:
test = [json.loads(i) for i in f.readlines()]
test_text_s0 = []
test_text_s1 = []
for i in test:
text = [i for i in i['pred_style0'] if len(i.strip()) != 0]
test_text_s0 = test_text_s0 + text
text = [i for i in i['pred_style1'] if len(i.strip()) != 0]
test_text_s1 = test_text_s1 + text
test_label_s0 = np.asarray([0] * len(test_text_s0))
test_label_s1 = np.asarray([1] * len(test_text_s1))
test_vec_s0 = sent_vec(test_text_s0)
test_vec_s1 = sent_vec(test_text_s1)
pred_s0 = clf.predict(test_vec_s0)
pred_s1 = clf.predict(test_vec_s1)
return pred_s0, pred_s1, test_label_s0, test_label_s1
def main(file_path):
pred_s0, pred_s1, test_label_s0, test_label_s1 = test_model(file_path)
r = []
acc0 = metrics.accuracy_score(test_label_s0, pred_s0)
acc1 = metrics.accuracy_score(test_label_s1, pred_s1)
print('SVM:', 's0', acc0 * 100, 's1', acc1 * 100, 'mean', (acc0 + acc1) / 2 * 100)
| 30 | 86 | 0.701961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.141667 |
7e1d6767408fd3f3df4bbb5588442b63585f4595 | 5,937 | py | Python | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | factom_core/blockchains/base.py | sourcery-ai-bot/factom-core | 186dca550d98d758e9f8dab878e6382153efeaf3 | [
"MIT"
] | null | null | null | from typing import Any, List
import factom_core.blocks as blocks
from factom_core.db import FactomdLevelDB
from .pending_block import PendingBlock
class BaseBlockchain:
"""The base class for all Blockchain objects"""
network_id: bytes = None
vms: List[Any] = None
data_path: str = None
db: FactomdLevelDB = None
current_block: PendingBlock = None
def __init__(self, data_path: str = None) -> None:
if not isinstance(self.network_id, bytes) or len(self.network_id) != 4:
raise ValueError("The Blockchain class must be instantiated with a `network_id` bytes object of length 4")
# if not isinstance(self.vms, list) or len(self.vms) == 0:
# raise ValueError(
# "The Blockchain class must be instantiated with a `vms` list of length > 1"
# )
self.data_path = data_path
self.db = FactomdLevelDB(path=data_path, create_if_missing=True)
def load_genesis_block(self) -> blocks.DirectoryBlock:
raise NotImplementedError("Blockchain classes must implement this method")
def vm_for_hash(self, h: bytes) -> int:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_minute(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def rotate_vms(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
def seal_block(self) -> None:
raise NotImplementedError("Blockchain classes must implement this method")
class Blockchain(BaseBlockchain):
"""
A Blockchain is a combination of VM classes. Each VM is associated
with a range of chains. The Blockchain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block / minute number.
"""
def __init__(self, data_path: str = None):
super().__init__(data_path)
def load_genesis_block(self) -> blocks.DirectoryBlock:
pass
def vm_for_hash(self, h: bytes) -> int:
"""
Compute the VM index responsible for hash h
Taken from: factomd/state/processList.go/VMindexFor(hash []byte)
"""
if len(self.vms) == 0:
return 0
v = sum(h)
return v % len(self.vms)
def seal_minute(self) -> None:
"""Finalize the current block minute"""
self.rotate_vms()
if self.current_block.current_minute == 10:
self.seal_block()
else:
self.current_block.current_minute += 1
def rotate_vms(self) -> None:
"""Rotate the responsibilities of the VM set (if necessary)"""
# TODO: see processList.go/MakgeMap for formula per block height
if len(self.vms) == 1:
return
self.vms = self.vms[1:] + self.vms[:1]
def seal_block(self):
"""
Bundles all added transactions, entries, and other elements into a set of finalized
blocks with headers.
"""
block = self.current_block
entry_blocks: List[blocks.EntryBlock] = []
for chain_id, block_body in block.entry_blocks.items():
prev = self.db.get_entry_block_head(chain_id)
header = block_body.construct_header(
chain_id=chain_id,
prev_keymr=prev.keymr if prev is not None else bytes(32),
prev_full_hash=prev.full_hash if prev is not None else bytes(32),
sequence=prev.header.sequence + 1 if prev is not None else 0,
height=block.height,
)
entry_blocks.append(blocks.EntryBlock(header, block_body))
prev = self.db.get_entry_credit_block(height=block.height - 1)
header = block.entry_credit_block.construct_header(
prev_header_hash=prev.header_hash, prev_full_hash=prev.full_hash, height=block.height,
)
entry_credit_block = blocks.EntryCreditBlock(header, block.entry_credit_block)
prev = self.db.get_factoid_block(height=block.height - 1)
header = block.factoid_block.construct_header(
prev_keymr=block.previous.body.factoid_block_keymr,
prev_ledger_keymr=prev.ledger_keymr,
ec_exchange_rate=1000, # TODO
height=block.height,
)
factoid_block = blocks.FactoidBlock(header, block.factoid_block)
prev = self.db.get_admin_block(height=block.height - 1)
header = block.admin_block.construct_header(
prev_back_reference_hash=prev.back_reference_hash, height=block.height
)
admin_block = blocks.AdminBlock(header, block.admin_block)
# Compile all the above blocks and the previous directory block, into a new one
directory_block_body = blocks.DirectoryBlockBody(
admin_block_lookup_hash=admin_block.lookup_hash,
entry_credit_block_header_hash=entry_credit_block.header_hash,
factoid_block_keymr=factoid_block.keymr,
entry_blocks=[
{"chain_id": entry_block.header.chain_id, "keymr": entry_block.keymr} for entry_block in entry_blocks
],
)
header = directory_block_body.construct_header(
network_id=self.network_id,
prev_keymr=block.previous.keymr,
prev_full_hash=block.previous.full_hash,
timestamp=block.timestamp,
height=block.height,
)
directory_block = blocks.DirectoryBlock(header, directory_block_body)
# Persist the blocks as new chain heads
self.db.put_directory_block_head(directory_block)
self.db.put_admin_block_head(admin_block)
self.db.put_entry_credit_block_head(entry_credit_block)
self.db.put_factoid_block_head(factoid_block)
for entry_block in entry_blocks:
self.db.put_entry_block_head(entry_block)
| 39.58 | 118 | 0.662961 | 5,782 | 0.973893 | 0 | 0 | 0 | 0 | 0 | 0 | 1,406 | 0.23682 |
7e1f08195236c36e7e4adbc510902d90c5d6a72a | 492 | py | Python | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 62 | 2019-11-16T22:07:42.000Z | 2022-03-08T20:50:01.000Z | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 30 | 2019-03-19T15:05:55.000Z | 2022-03-24T05:00:53.000Z | bot/plugins/bongo.py | Preocts/twitch-chat-bot | 50341c30d8eada4b50634c8f25a9eb0eed681735 | [
"MIT"
] | 56 | 2019-06-08T20:34:31.000Z | 2022-02-21T20:10:38.000Z | from __future__ import annotations
from typing import Match
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!bongo')
async def cmd_bongo(config: Config, match: Match[str]) -> str:
_, _, rest = match['msg'].partition(' ')
rest = rest.strip()
if rest:
rest = f'{rest} '
return format_msg(
match,
f'awcBongo awcBongo awcBongo {esc(rest)}awcBongo awcBongo awcBongo',
)
| 22.363636 | 76 | 0.676829 | 0 | 0 | 0 | 0 | 311 | 0.632114 | 292 | 0.593496 | 93 | 0.189024 |
7e1f4ae6822590bfe77246c78771d04e3114e141 | 532 | py | Python | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | null | null | null | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | 2 | 2020-02-12T01:18:25.000Z | 2020-06-05T18:40:51.000Z | sensorsproject/settings/dev_edwin.py | edwinsteele/sensorsproject | 21164b9bb437a3b1bbcc05f05c58cd5621bd4501 | [
"CC0-1.0"
] | null | null | null | __author__ = 'esteele'
# Common settings
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sensors',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Is it a problem that this is at the end rather than before sensors?
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
| 22.166667 | 74 | 0.62218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.539474 |
7e20dfa0bd8d4cf4dfc335988fbdb52ffa2d141f | 110 | py | Python | EduRec/meta/__init__.py | tswsxk/EduRec | b7514acc4dea805375fe512534d8ab6ad8916374 | [
"MIT"
] | 2 | 2021-03-03T11:04:53.000Z | 2021-09-03T09:12:32.000Z | EduRec/meta/__init__.py | tswsxk/EduRec | b7514acc4dea805375fe512534d8ab6ad8916374 | [
"MIT"
] | null | null | null | EduRec/meta/__init__.py | tswsxk/EduRec | b7514acc4dea805375fe512534d8ab6ad8916374 | [
"MIT"
] | 1 | 2021-03-22T02:55:53.000Z | 2021-03-22T02:55:53.000Z | # coding: utf-8
# 2021/2/10 @ tongshiwei
from .MeasurementModel import MeasurementModel
from .SLM import SLM
| 18.333333 | 46 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.354545 |
7e20e4b11c633a3d5298ade6bd642d38cd22c88d | 1,066 | py | Python | load_model.py | zhfeing/cifar-10-test | c0b8695db73888b1756e5b078cd03203ab233297 | [
"MIT"
] | null | null | null | load_model.py | zhfeing/cifar-10-test | c0b8695db73888b1756e5b078cd03203ab233297 | [
"MIT"
] | null | null | null | load_model.py | zhfeing/cifar-10-test | c0b8695db73888b1756e5b078cd03203ab233297 | [
"MIT"
] | null | null | null | import keras
import os
def load_model(version, new_model, retrain=False, *args):
"""
:param version: model version
:param new_model: method for call to get a new model e.g. my_ResNet.my_ResNet
:param retrain: True: load new model
:return:
"""
create_new_model = False
# load model
if not retrain:
try:
with open(os.path.join("./model", "model_structure_{}.json".format(version)), "r") as file:
model_json = file.read()
print("[info]: loading model...")
model = keras.models.model_from_json(model_json)
model.load_weights(os.path.join("./model", "model_weights_{}.h5".format(version)))
print("[info]: load model done.")
except OSError:
print("[info]: load model file failed, creating model")
model = new_model(*args)
create_new_model = True
else:
print("[info]: retrain, creating model")
model = new_model(*args)
create_new_model = True
return model, create_new_model
| 33.3125 | 103 | 0.604128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.368668 |
7e22285b67f177bfff44a11d4ab8750cb32facff | 953 | py | Python | src/bioplottemplates/cli_labeldots.py | joaomcteixeira/python-bioplottemplates | 17f617fba3ce29eb458ec701363027f11beaf7c5 | [
"MIT"
] | null | null | null | src/bioplottemplates/cli_labeldots.py | joaomcteixeira/python-bioplottemplates | 17f617fba3ce29eb458ec701363027f11beaf7c5 | [
"MIT"
] | null | null | null | src/bioplottemplates/cli_labeldots.py | joaomcteixeira/python-bioplottemplates | 17f617fba3ce29eb458ec701363027f11beaf7c5 | [
"MIT"
] | null | null | null | import argparse
from bioplottemplates.libs import libcli, libio
from bioplottemplates.plots import label_dots
ap = libcli.CustomParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument(
'data_csv',
help='The CSVs files to plot',
nargs='+',
)
ap.add_argument(
'-v',
'--plotvars',
help=(
'Plot variables. '
'Example: -v xlabel=frames ylabel=RMSD color=red.'
),
nargs='*',
action=libcli.ParamsToDict,
)
def maincli():
cmd = load_args()
main(**vars(cmd))
def main(data_csv, plotvars, **kwargs):
data, labels = libio.extract_labels_data(*data_csv)
plotvars = plotvars or dict()
plotvars.setdefault('series_labels', data_csv)
print(plotvars['series_labels'])
label_dots.plot(
labels,
data,
**plotvars,
)
pass
if __name__ == '__main__':
maincli()
| 17.648148 | 59 | 0.623295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.172088 |
7e245908b2f4ad986cdcf99565e90fb90d1d0b11 | 94 | py | Python | tadataka/feature/__init__.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 54 | 2019-11-15T16:30:34.000Z | 2022-01-13T15:18:54.000Z | tadataka/feature/__init__.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 11 | 2019-02-28T08:28:24.000Z | 2020-04-07T04:47:12.000Z | tadataka/feature/__init__.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 1 | 2020-02-26T13:59:40.000Z | 2020-02-26T13:59:40.000Z | from tadataka.feature.feature import (
extract_features, empty_match, Features, Matcher
)
| 23.5 | 52 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e249d769adff61f3ed7920cc7d27c1e95686e3c | 887 | py | Python | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | class/cls_006.py | rpoliselit/python-for-dummies | d6f45a966a5238058953f93d8660832fa692b3d4 | [
"MIT"
] | null | null | null | # Custom errors in classes.
class TooManyPagesReadError(ValueError):
pass
class Book:
def __init__(self, title, page_count):
self.title = title
self.page_count = page_count
self.pages_read = 0
def __repr__(self):
return (
f"<Book {self.title}, read {self.pages_read} pages out of {self.page_count}>"
)
def read(self, pages):
if self.pages_read + pages > self.page_count:
msg = f"You tried to read {self.pages_read + pages} but this book only has {self.page_count} pages."
raise TooManyPagesReadError(msg)
self.pages_read += pages
print(f"You have now read {self.pages_read} pages out of {self.page_count}")
book_1 = Book("Fluent Python", 800)
try:
book_1.read(450)
book_1.read(800)
except TooManyPagesReadError as e:
print(e)
finally:
print(book_1)
| 26.878788 | 112 | 0.641488 | 695 | 0.78354 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.317926 |
7e25064cbef2ae141cd04ce9086cb43f954ff0bf | 661 | py | Python | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/test_stanford_nltk.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
import sys, bz2
sys.path.insert(0, '/Users/timpalpant/Documents/Workspace/corenlp-python')
import nltk
from nltk.tree import Tree
from corenlp import StanfordCoreNLP
from remove_random_word import remove_random_word
print "Booting StanfordCoreNLP"
nlp = StanfordCoreNLP()
print "Initializing train file"
train = bz2.BZ2File('../data/train_v2.txt.bz2')
for line in train:
rline = remove_random_word(line)
lparse = nlp.raw_parse(line)
ltree = Tree.fromstring(lparse['sentences'][0]['parsetree'])
rparse = nlp.raw_parse(rline)
rtree = Tree.fromstring(rparse['sentences'][0]['parsetree'])
print ltree
print rtree | 30.045455 | 74 | 0.748865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.295008 |
7e25a7271dfc02edc1e4d7ee8ec27394dc69dcb1 | 35,165 | py | Python | psyneulink/core/rpc/graph_pb2.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | 67 | 2018-01-05T22:18:44.000Z | 2022-03-27T11:27:31.000Z | psyneulink/core/rpc/graph_pb2.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | 1,064 | 2017-12-01T18:58:27.000Z | 2022-03-31T22:22:24.000Z | psyneulink/core/rpc/graph_pb2.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | 25 | 2017-12-01T20:27:07.000Z | 2022-03-08T21:49:39.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: graph.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='graph.proto',
package='graph',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0bgraph.proto\x12\x05graph\"\x0e\n\x0cNullArgument\"\x1e\n\x0cHealthStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\"\x17\n\x07PNLPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"\x1a\n\nScriptPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"*\n\x12ScriptCompositions\x12\x14\n\x0c\x63ompositions\x18\x01 \x03(\t\"&\n\x10ScriptComponents\x12\x12\n\ncomponents\x18\x01 \x03(\t\"\x19\n\tGraphName\x12\x0c\n\x04name\x18\x01 \x01(\t\"#\n\rParameterList\x12\x12\n\nparameters\x18\x01 \x03(\t\"\x1d\n\rComponentName\x12\x0c\n\x04name\x18\x01 \x01(\t\"3\n\tGraphJSON\x12\x13\n\x0bobjectsJSON\x18\x01 \x01(\t\x12\x11\n\tstyleJSON\x18\x02 \x01(\t\"\x1e\n\tStyleJSON\x12\x11\n\tstyleJSON\x18\x01 \x01(\t\"&\n\x07ndArray\x12\r\n\x05shape\x18\x01 \x03(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x03(\x01\"6\n\x06Matrix\x12\x0c\n\x04rows\x18\x01 \x01(\r\x12\x0c\n\x04\x63ols\x18\x02 \x01(\r\x12\x10\n\x04\x64\x61ta\x18\x03 \x03(\x01\x42\x02\x10\x01\"s\n\x05\x45ntry\x12\x15\n\rcomponentName\x18\x01 \x01(\t\x12\x15\n\rparameterName\x18\x02 \x01(\t\x12\x0c\n\x04time\x18\x03 \x01(\t\x12\x0f\n\x07\x63ontext\x18\x04 \x01(\t\x12\x1d\n\x05value\x18\x05 \x01(\x0b\x32\x0e.graph.ndArray\"c\n\tServePref\x12\x15\n\rcomponentName\x18\x01 \x01(\t\x12\x15\n\rparameterName\x18\x02 \x01(\t\x12(\n\tcondition\x18\x03 \x01(\x0e\x32\x15.graph.serveCondition\"4\n\nServePrefs\x12&\n\x0cservePrefSet\x18\x01 \x03(\x0b\x32\x10.graph.ServePref\"\xa6\x01\n\rRunTimeParams\x12\x30\n\x06inputs\x18\x01 \x03(\x0b\x32 .graph.RunTimeParams.InputsEntry\x12%\n\nservePrefs\x18\x02 \x01(\x0b\x32\x11.graph.ServePrefs\x1a<\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.graph.Matrix:\x02\x38\x01*\x92\x01\n\x0eserveCondition\x12\x12\n\x0eINITIALIZATION\x10\x00\x12\x0e\n\nVALIDATION\x10\x01\x12\r\n\tEXECUTION\x10\x02\x12\x0e\n\nPROCESSING\x10\x03\x12\x0c\n\x08LEARNING\x10\x04\x12\x0b\n\x07\x43ONTROL\x10\x05\x12\x0e\n\nSIMULATION\x10\x06\x12\t\n\x05TRIAL\x10\x07\x12\x07\n\x03RUN\x10\x08\x32\xe8\x04\n\nServeGraph\x12\x36\n\rLoadCustomPnl\x12\x0e.graph.PNLPath\x1a\x13.graph.NullArgument\"\x00\x12<\n\nLoadScript\x12\x11.graph.ScriptPath\x1a\x19.graph.ScriptCompositions\"\x00\x12\x35\n\x0cLoadGraphics\x12\x11.graph.ScriptPath\x1a\x10.graph.StyleJSON\"\x00\x12\x45\n\x15GetLoggableParameters\x12\x14.graph.ComponentName\x1a\x14.graph.ParameterList\"\x00\x12\x43\n\x0fGetCompositions\x12\x13.graph.NullArgument\x1a\x19.graph.ScriptCompositions\"\x00\x12<\n\rGetComponents\x12\x10.graph.GraphName\x1a\x17.graph.ScriptComponents\"\x00\x12/\n\x07GetJSON\x12\x10.graph.GraphName\x1a\x10.graph.GraphJSON\"\x00\x12\x39\n\x0bHealthCheck\x12\x13.graph.NullArgument\x1a\x13.graph.HealthStatus\"\x00\x12=\n\x10UpdateStylesheet\x12\x10.graph.StyleJSON\x1a\x13.graph.NullArgument\"\x00(\x01\x12\x38\n\x0eRunComposition\x12\x14.graph.RunTimeParams\x1a\x0c.graph.Entry\"\x00\x30\x01\x62\x06proto3'
)
_SERVECONDITION = _descriptor.EnumDescriptor(
name='serveCondition',
full_name='graph.serveCondition',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='INITIALIZATION', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VALIDATION', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXECUTION', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROCESSING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LEARNING', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONTROL', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SIMULATION', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIAL', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUN', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=925,
serialized_end=1071,
)
_sym_db.RegisterEnumDescriptor(_SERVECONDITION)
serveCondition = enum_type_wrapper.EnumTypeWrapper(_SERVECONDITION)
INITIALIZATION = 0
VALIDATION = 1
EXECUTION = 2
PROCESSING = 3
LEARNING = 4
CONTROL = 5
SIMULATION = 6
TRIAL = 7
RUN = 8
_NULLARGUMENT = _descriptor.Descriptor(
name='NullArgument',
full_name='graph.NullArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=36,
)
_HEALTHSTATUS = _descriptor.Descriptor(
name='HealthStatus',
full_name='graph.HealthStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='graph.HealthStatus.status', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=68,
)
_PNLPATH = _descriptor.Descriptor(
name='PNLPath',
full_name='graph.PNLPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='graph.PNLPath.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=93,
)
_SCRIPTPATH = _descriptor.Descriptor(
name='ScriptPath',
full_name='graph.ScriptPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='graph.ScriptPath.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=121,
)
_SCRIPTCOMPOSITIONS = _descriptor.Descriptor(
name='ScriptCompositions',
full_name='graph.ScriptCompositions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='compositions', full_name='graph.ScriptCompositions.compositions', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=165,
)
_SCRIPTCOMPONENTS = _descriptor.Descriptor(
name='ScriptComponents',
full_name='graph.ScriptComponents',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='components', full_name='graph.ScriptComponents.components', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=205,
)
_GRAPHNAME = _descriptor.Descriptor(
name='GraphName',
full_name='graph.GraphName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='graph.GraphName.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=207,
serialized_end=232,
)
_PARAMETERLIST = _descriptor.Descriptor(
name='ParameterList',
full_name='graph.ParameterList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='graph.ParameterList.parameters', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=269,
)
_COMPONENTNAME = _descriptor.Descriptor(
name='ComponentName',
full_name='graph.ComponentName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='graph.ComponentName.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=271,
serialized_end=300,
)
_GRAPHJSON = _descriptor.Descriptor(
name='GraphJSON',
full_name='graph.GraphJSON',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='objectsJSON', full_name='graph.GraphJSON.objectsJSON', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='styleJSON', full_name='graph.GraphJSON.styleJSON', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=302,
serialized_end=353,
)
_STYLEJSON = _descriptor.Descriptor(
name='StyleJSON',
full_name='graph.StyleJSON',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='styleJSON', full_name='graph.StyleJSON.styleJSON', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=385,
)
_NDARRAY = _descriptor.Descriptor(
name='ndArray',
full_name='graph.ndArray',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='graph.ndArray.shape', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='graph.ndArray.data', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=387,
serialized_end=425,
)
_MATRIX = _descriptor.Descriptor(
name='Matrix',
full_name='graph.Matrix',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rows', full_name='graph.Matrix.rows', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cols', full_name='graph.Matrix.cols', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='graph.Matrix.data', index=2,
number=3, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=427,
serialized_end=481,
)
_ENTRY = _descriptor.Descriptor(
name='Entry',
full_name='graph.Entry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='componentName', full_name='graph.Entry.componentName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameterName', full_name='graph.Entry.parameterName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='graph.Entry.time', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context', full_name='graph.Entry.context', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='graph.Entry.value', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=483,
serialized_end=598,
)
_SERVEPREF = _descriptor.Descriptor(
name='ServePref',
full_name='graph.ServePref',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='componentName', full_name='graph.ServePref.componentName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameterName', full_name='graph.ServePref.parameterName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='condition', full_name='graph.ServePref.condition', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=600,
serialized_end=699,
)
_SERVEPREFS = _descriptor.Descriptor(
name='ServePrefs',
full_name='graph.ServePrefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='servePrefSet', full_name='graph.ServePrefs.servePrefSet', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=701,
serialized_end=753,
)
_RUNTIMEPARAMS_INPUTSENTRY = _descriptor.Descriptor(
name='InputsEntry',
full_name='graph.RunTimeParams.InputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='graph.RunTimeParams.InputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='graph.RunTimeParams.InputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=922,
)
_RUNTIMEPARAMS = _descriptor.Descriptor(
name='RunTimeParams',
full_name='graph.RunTimeParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='inputs', full_name='graph.RunTimeParams.inputs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='servePrefs', full_name='graph.RunTimeParams.servePrefs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_RUNTIMEPARAMS_INPUTSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=756,
serialized_end=922,
)
_ENTRY.fields_by_name['value'].message_type = _NDARRAY
_SERVEPREF.fields_by_name['condition'].enum_type = _SERVECONDITION
_SERVEPREFS.fields_by_name['servePrefSet'].message_type = _SERVEPREF
_RUNTIMEPARAMS_INPUTSENTRY.fields_by_name['value'].message_type = _MATRIX
_RUNTIMEPARAMS_INPUTSENTRY.containing_type = _RUNTIMEPARAMS
_RUNTIMEPARAMS.fields_by_name['inputs'].message_type = _RUNTIMEPARAMS_INPUTSENTRY
_RUNTIMEPARAMS.fields_by_name['servePrefs'].message_type = _SERVEPREFS
DESCRIPTOR.message_types_by_name['NullArgument'] = _NULLARGUMENT
DESCRIPTOR.message_types_by_name['HealthStatus'] = _HEALTHSTATUS
DESCRIPTOR.message_types_by_name['PNLPath'] = _PNLPATH
DESCRIPTOR.message_types_by_name['ScriptPath'] = _SCRIPTPATH
DESCRIPTOR.message_types_by_name['ScriptCompositions'] = _SCRIPTCOMPOSITIONS
DESCRIPTOR.message_types_by_name['ScriptComponents'] = _SCRIPTCOMPONENTS
DESCRIPTOR.message_types_by_name['GraphName'] = _GRAPHNAME
DESCRIPTOR.message_types_by_name['ParameterList'] = _PARAMETERLIST
DESCRIPTOR.message_types_by_name['ComponentName'] = _COMPONENTNAME
DESCRIPTOR.message_types_by_name['GraphJSON'] = _GRAPHJSON
DESCRIPTOR.message_types_by_name['StyleJSON'] = _STYLEJSON
DESCRIPTOR.message_types_by_name['ndArray'] = _NDARRAY
DESCRIPTOR.message_types_by_name['Matrix'] = _MATRIX
DESCRIPTOR.message_types_by_name['Entry'] = _ENTRY
DESCRIPTOR.message_types_by_name['ServePref'] = _SERVEPREF
DESCRIPTOR.message_types_by_name['ServePrefs'] = _SERVEPREFS
DESCRIPTOR.message_types_by_name['RunTimeParams'] = _RUNTIMEPARAMS
DESCRIPTOR.enum_types_by_name['serveCondition'] = _SERVECONDITION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NullArgument = _reflection.GeneratedProtocolMessageType('NullArgument', (_message.Message,), {
'DESCRIPTOR' : _NULLARGUMENT,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.NullArgument)
})
_sym_db.RegisterMessage(NullArgument)
HealthStatus = _reflection.GeneratedProtocolMessageType('HealthStatus', (_message.Message,), {
'DESCRIPTOR' : _HEALTHSTATUS,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.HealthStatus)
})
_sym_db.RegisterMessage(HealthStatus)
PNLPath = _reflection.GeneratedProtocolMessageType('PNLPath', (_message.Message,), {
'DESCRIPTOR' : _PNLPATH,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.PNLPath)
})
_sym_db.RegisterMessage(PNLPath)
ScriptPath = _reflection.GeneratedProtocolMessageType('ScriptPath', (_message.Message,), {
'DESCRIPTOR' : _SCRIPTPATH,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ScriptPath)
})
_sym_db.RegisterMessage(ScriptPath)
ScriptCompositions = _reflection.GeneratedProtocolMessageType('ScriptCompositions', (_message.Message,), {
'DESCRIPTOR' : _SCRIPTCOMPOSITIONS,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ScriptCompositions)
})
_sym_db.RegisterMessage(ScriptCompositions)
ScriptComponents = _reflection.GeneratedProtocolMessageType('ScriptComponents', (_message.Message,), {
'DESCRIPTOR' : _SCRIPTCOMPONENTS,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ScriptComponents)
})
_sym_db.RegisterMessage(ScriptComponents)
GraphName = _reflection.GeneratedProtocolMessageType('GraphName', (_message.Message,), {
'DESCRIPTOR' : _GRAPHNAME,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.GraphName)
})
_sym_db.RegisterMessage(GraphName)
ParameterList = _reflection.GeneratedProtocolMessageType('ParameterList', (_message.Message,), {
'DESCRIPTOR' : _PARAMETERLIST,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ParameterList)
})
_sym_db.RegisterMessage(ParameterList)
ComponentName = _reflection.GeneratedProtocolMessageType('ComponentName', (_message.Message,), {
'DESCRIPTOR' : _COMPONENTNAME,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ComponentName)
})
_sym_db.RegisterMessage(ComponentName)
GraphJSON = _reflection.GeneratedProtocolMessageType('GraphJSON', (_message.Message,), {
'DESCRIPTOR' : _GRAPHJSON,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.GraphJSON)
})
_sym_db.RegisterMessage(GraphJSON)
StyleJSON = _reflection.GeneratedProtocolMessageType('StyleJSON', (_message.Message,), {
'DESCRIPTOR' : _STYLEJSON,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.StyleJSON)
})
_sym_db.RegisterMessage(StyleJSON)
ndArray = _reflection.GeneratedProtocolMessageType('ndArray', (_message.Message,), {
'DESCRIPTOR' : _NDARRAY,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ndArray)
})
_sym_db.RegisterMessage(ndArray)
Matrix = _reflection.GeneratedProtocolMessageType('Matrix', (_message.Message,), {
'DESCRIPTOR' : _MATRIX,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.Matrix)
})
_sym_db.RegisterMessage(Matrix)
Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), {
'DESCRIPTOR' : _ENTRY,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.Entry)
})
_sym_db.RegisterMessage(Entry)
ServePref = _reflection.GeneratedProtocolMessageType('ServePref', (_message.Message,), {
'DESCRIPTOR' : _SERVEPREF,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ServePref)
})
_sym_db.RegisterMessage(ServePref)
ServePrefs = _reflection.GeneratedProtocolMessageType('ServePrefs', (_message.Message,), {
'DESCRIPTOR' : _SERVEPREFS,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.ServePrefs)
})
_sym_db.RegisterMessage(ServePrefs)
RunTimeParams = _reflection.GeneratedProtocolMessageType('RunTimeParams', (_message.Message,), {
'InputsEntry' : _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), {
'DESCRIPTOR' : _RUNTIMEPARAMS_INPUTSENTRY,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.RunTimeParams.InputsEntry)
})
,
'DESCRIPTOR' : _RUNTIMEPARAMS,
'__module__' : 'graph_pb2'
# @@protoc_insertion_point(class_scope:graph.RunTimeParams)
})
_sym_db.RegisterMessage(RunTimeParams)
_sym_db.RegisterMessage(RunTimeParams.InputsEntry)
_MATRIX.fields_by_name['data']._options = None
_RUNTIMEPARAMS_INPUTSENTRY._options = None
_SERVEGRAPH = _descriptor.ServiceDescriptor(
name='ServeGraph',
full_name='graph.ServeGraph',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1074,
serialized_end=1690,
methods=[
_descriptor.MethodDescriptor(
name='LoadCustomPnl',
full_name='graph.ServeGraph.LoadCustomPnl',
index=0,
containing_service=None,
input_type=_PNLPATH,
output_type=_NULLARGUMENT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='LoadScript',
full_name='graph.ServeGraph.LoadScript',
index=1,
containing_service=None,
input_type=_SCRIPTPATH,
output_type=_SCRIPTCOMPOSITIONS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='LoadGraphics',
full_name='graph.ServeGraph.LoadGraphics',
index=2,
containing_service=None,
input_type=_SCRIPTPATH,
output_type=_STYLEJSON,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLoggableParameters',
full_name='graph.ServeGraph.GetLoggableParameters',
index=3,
containing_service=None,
input_type=_COMPONENTNAME,
output_type=_PARAMETERLIST,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetCompositions',
full_name='graph.ServeGraph.GetCompositions',
index=4,
containing_service=None,
input_type=_NULLARGUMENT,
output_type=_SCRIPTCOMPOSITIONS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetComponents',
full_name='graph.ServeGraph.GetComponents',
index=5,
containing_service=None,
input_type=_GRAPHNAME,
output_type=_SCRIPTCOMPONENTS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetJSON',
full_name='graph.ServeGraph.GetJSON',
index=6,
containing_service=None,
input_type=_GRAPHNAME,
output_type=_GRAPHJSON,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='HealthCheck',
full_name='graph.ServeGraph.HealthCheck',
index=7,
containing_service=None,
input_type=_NULLARGUMENT,
output_type=_HEALTHSTATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateStylesheet',
full_name='graph.ServeGraph.UpdateStylesheet',
index=8,
containing_service=None,
input_type=_STYLEJSON,
output_type=_NULLARGUMENT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RunComposition',
full_name='graph.ServeGraph.RunComposition',
index=9,
containing_service=None,
input_type=_RUNTIMEPARAMS,
output_type=_ENTRY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SERVEGRAPH)
DESCRIPTOR.services_by_name['ServeGraph'] = _SERVEGRAPH
# @@protoc_insertion_point(module_scope)
| 34.34082 | 2,882 | 0.746936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,844 | 0.223063 |
7e264599a8cb2bf8b1fd89fbf7da642cc5d1526e | 7,549 | py | Python | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | null | null | null | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | 6 | 2020-06-06T00:35:23.000Z | 2022-03-12T00:15:47.000Z | parsifal/library/models.py | reeta1234/parsifal | dc2ea19916e7430634e2efd3e1bacc89ebbde4f1 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
class SharedFolder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(User, through='Collaborator', related_name='shared_folders')
class Meta:
verbose_name = 'Shared Folder'
verbose_name_plural = 'Shared Folders'
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
super(SharedFolder, self).save(*args, **kwargs)
base_slug = slugify(self.name)
if len(base_slug) > 0:
base_slug = slugify(u'{0} {1}'.format(self.name, self.pk))
else:
base_slug = self.pk
i = 0
unique_slug = base_slug
while SharedFolder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(SharedFolder, self).save(*args, **kwargs)
class Collaborator(models.Model):
READ = 'R'
WRITE = 'W'
ADMIN = 'A'
ACCESS_TYPES = (
(READ, 'Read'),
(WRITE, 'Write'),
(ADMIN, 'Admin'),
)
user = models.ForeignKey(User,on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder,on_delete=models.CASCADE)
joined_at = models.DateTimeField(auto_now_add=True)
is_owner = models.BooleanField(default=False)
access = models.CharField(max_length=1, choices=ACCESS_TYPES, default=READ)
class Meta:
verbose_name = 'Collaborator'
verbose_name_plural = 'Collaborators'
def save(self, *args, **kwargs):
if self.is_owner:
self.access = Collaborator.ADMIN
super(Collaborator, self).save(*args, **kwargs)
class Document(models.Model):
ARTICLE = 'article'
BOOK = 'book'
BOOKLET = 'booklet'
CONFERENCE = 'conference'
INBOOK = 'inbook'
INCOLLECTION = 'incollection'
INPROCEEDINGS = 'inproceedings'
MANUAL = 'manual'
MASTERSTHESIS = 'mastersthesis'
MISC = 'misc'
PHDTHESIS = 'phdthesis'
PROCEEDINGS = 'proceedings'
TECHREPORT = 'techreport'
UNPUBLISHED = 'unpublished'
ENTRY_TYPES = (
(ARTICLE, 'Article'),
(BOOK, 'Book'),
(BOOKLET, 'Booklet'),
(CONFERENCE, 'Conference'),
(INBOOK, 'Inbook'),
(INCOLLECTION, 'Incollection'),
(INPROCEEDINGS, 'Inproceedings'),
(MANUAL, 'Manual'),
(MASTERSTHESIS, 'Master\'s Thesis'),
(MISC, 'Misc'),
(PHDTHESIS, 'Ph.D. Thesis'),
(PROCEEDINGS, 'Proceedings'),
(TECHREPORT, 'Tech Report'),
(UNPUBLISHED, 'Unpublished'),
)
# Bibtex required fields
bibtexkey = models.CharField('Bibtex key', max_length=255, null=True, blank=True)
entry_type = models.CharField('Document type', max_length=13, choices=ENTRY_TYPES, null=True, blank=True)
# Bibtex base fields
address = models.CharField(max_length=2000, null=True, blank=True)
author = models.TextField(max_length=1000, null=True, blank=True)
booktitle = models.CharField(max_length=1000, null=True, blank=True)
chapter = models.CharField(max_length=1000, null=True, blank=True)
crossref = models.CharField('Cross-referenced', max_length=1000, null=True, blank=True)
edition = models.CharField(max_length=1000, null=True, blank=True)
editor = models.CharField(max_length=1000, null=True, blank=True)
howpublished = models.CharField('How it was published', max_length=1000, null=True, blank=True)
institution = models.CharField(max_length=1000, null=True, blank=True)
journal = models.CharField(max_length=1000, null=True, blank=True)
month = models.CharField(max_length=50, null=True, blank=True)
note = models.CharField(max_length=2000, null=True, blank=True)
number = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=1000, null=True, blank=True)
pages = models.CharField(max_length=255, null=True, blank=True)
publisher = models.CharField(max_length=1000, null=True, blank=True)
school = models.CharField(max_length=1000, null=True, blank=True)
series = models.CharField(max_length=500, null=True, blank=True)
title = models.CharField(max_length=1000, null=True, blank=True)
publication_type = models.CharField(max_length=1000, null=True, blank=True) # Type
volume = models.CharField(max_length=1000, null=True, blank=True)
year = models.CharField(max_length=50, null=True, blank=True)
# Extra fields
abstract = models.TextField(max_length=4000, null=True, blank=True)
coden = models.CharField(max_length=1000, null=True, blank=True)
doi = models.CharField('DOI', max_length=255, null=True, blank=True)
isbn = models.CharField('ISBN', max_length=255, null=True, blank=True)
issn = models.CharField('ISSN', max_length=255, null=True, blank=True)
keywords = models.CharField(max_length=2000, null=True, blank=True)
language = models.CharField(max_length=1000, null=True, blank=True)
url = models.CharField('URL', max_length=1000, null=True, blank=True)
# Parsifal management field
user = models.ForeignKey(User, null=True, related_name='documents',on_delete=models.CASCADE)
review = models.ForeignKey('reviews.Review', null=True, related_name='documents',on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder, null=True, related_name='documents',on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document'
verbose_name_plural = 'Documents'
def __unicode__(self):
return self.title
def document_file_upload_to(instance, filename):
return u'library/{0}/'.format(instance.document.user.pk)
class DocumentFile(models.Model):
document = models.ForeignKey(Document, related_name='files',on_delete=models.CASCADE)
document_file = models.FileField(upload_to='library/')
filename = models.CharField(max_length=255)
size = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document File'
verbose_name_plural = 'Document Files'
def __unicode__(self):
return self.filename
class Folder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, related_name='library_folders',on_delete=models.CASCADE)
documents = models.ManyToManyField(Document)
class Meta:
verbose_name = 'Folder'
verbose_name_plural = 'Folders'
ordering = ('name',)
unique_together = (('name', 'user'),)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
base_slug = slugify(self.name)
if len(base_slug) > 0:
unique_slug = base_slug
else:
base_slug = unique_slug = 'untitled-folder'
i = 0
while Folder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(Folder, self).save(*args, **kwargs)
| 38.126263 | 113 | 0.672672 | 7,295 | 0.966353 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.114055 |
7e27a51c16ce0ebdda612a36ad33d8d6b8a40c5b | 2,668 | py | Python | getkills/mongoconn.py | namrak/pyzkillredisq | da0c1fe02945f9da13795609a2f2debd95a0e74a | [
"MIT"
] | null | null | null | getkills/mongoconn.py | namrak/pyzkillredisq | da0c1fe02945f9da13795609a2f2debd95a0e74a | [
"MIT"
] | null | null | null | getkills/mongoconn.py | namrak/pyzkillredisq | da0c1fe02945f9da13795609a2f2debd95a0e74a | [
"MIT"
] | null | null | null | from pymongo import MongoClient, errors
import tstp
from mdb import creds
def connect(logfile):
"""connect to mongodb"""
try:
client = MongoClient(creds['ip'], int(creds['port']))
db = client.fpLoss
db.authenticate(creds['un'], creds['pw'])
return db
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
log = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
log = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
def insert2mongo(mongohandle, logfile, killmail):
"""insert formatted killmail to mongodb"""
try:
allkills = mongohandle.allkills
allkills.insert_one(killmail)
return 0
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
log = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
log = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
def get_groupid_from_typeid(mongohandle, logfile, typeid):
"""get item name from typeid db"""
try:
typeids = mongohandle.typeIDs
cursor = typeids.find_one({"typeID": typeid}, {"groupID": 1})
if cursor is not None:
return cursor['groupID']
else:
print(tstp.now() + '!!ERROR!! Group ID not found for Type ID: ' + str(typeid) + '\n')
return 0
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
timeoutlog = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(timeoutlog)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
failconnlog = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(failconnlog)
logfile.write(err)
logfile.close()
sys.exit() | 37.577465 | 97 | 0.583208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 648 | 0.242879 |
7e27b75092d500071f0de2b5c912b9202dacc53e | 4,785 | py | Python | test.py | richardfergie/ForecastGA | ce4ddc778d0be68f504e1bfc300be6b2fe870cb7 | [
"MIT"
] | 30 | 2021-01-02T16:14:55.000Z | 2022-02-11T23:17:18.000Z | test.py | richardfergie/ForecastGA | ce4ddc778d0be68f504e1bfc300be6b2fe870cb7 | [
"MIT"
] | 2 | 2021-01-11T16:56:23.000Z | 2021-02-04T00:37:56.000Z | test.py | richardfergie/ForecastGA | ce4ddc778d0be68f504e1bfc300be6b2fe870cb7 | [
"MIT"
] | 8 | 2021-01-10T19:04:22.000Z | 2021-11-14T18:47:22.000Z | # Libraries
import pandas as pd
import numpy as np
import re
from datetime import datetime
from dateutil.rrule import rrule, MONTHLY
import matplotlib.pyplot as plt
import json
import forecastga
import forecastga.googleanalytics as ga
# Logging
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
from types import SimpleNamespace
with open("identity.json") as f:
jf = json.load(f)
identify_json = SimpleNamespace(**jf)
# @title Google Analytics
ga_url = "https://analytics.google.com/analytics/web/?authuser=2#/report-home/a49839941w81675857p84563570" # @param {type:"string"}
ga_segment = "organic traffic" # @param ["all users", "organic traffic", "direct traffic", "referral traffic", "mobile traffic", "tablet traffic"] {type:"string"}
ga_metric = "sessions" # @param ["sessions", "pageviews", "unique pageviews", "transactions"] {type:"string"}
# @title Historical Data
# @markdown #### Date Range:
ga_start_date = "2018-01-01" # @param {type:"date"}
ga_end_date = "2019-12-31" # @param {type:"date",name:"GA Date"}
# @markdown ***
# @markdown <div align="center">OR</div>
# @markdown #### Prior Months:
prior_months = 0 # @param {type:"integer"}
# @title Prediction Data
future_months = 2 # @param {type:"slider", min:1, max:24, step:1}
# @markdown ---
# @markdown `max_available_volume` is the total possible daily volume for a niche/geo. This helps keep the algorithm honest by putting a max possible upper bound on prediction.
max_available_volume = 12222 # @param {type:"integer", hint:"this is a description"}
# @markdown ---
# @markdown `omit_values_over` is a way to clean your existing data to remove one-time spikes, caused by a rare event that is unlikely to happen again. This keeps the algorithm from using this data in its future predictions.
omit_values_over = 2000000 # @param {type:"integer"}
# @markdown ---
save_output = False # @param {type:"boolean"}
try:
profile = ga.authenticate(
client_id=identify_json.client_id,
client_secret=identify_json.client_secret,
identity=identify_json.identity,
ga_url=ga_url,
interactive=True,
)
print("Authenticated")
except Exception as e:
print("An error occured", str(e))
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def p_date(_dt):
return datetime.strftime(_dt, "%Y-%m-%d")
def get_months(start_date, end_date):
strt_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
return rrule(MONTHLY, dtstart=strt_dt, until=end_dt).count()
def get_ga_data(profile, data):
try:
if data.prior_months and int(data.prior_months) > 0:
print("Pulling {} prior months data.".format(data.prior_months))
sessions = (
profile.core.query.metrics(data.ga_metric)
.segment(data.ga_segment)
.daily(months=0 - int(data.prior_months))
.report
)
else:
print(
"Pulling data from {} to {}.".format(
data.ga_start_date, data.ga_end_date
)
)
sessions = (
profile.core.query.metrics(data.ga_metric)
.segment(data.ga_segment)
.daily(data.ga_start_date, data.ga_end_date)
.report
)
except Exception as e:
print("Error. Error retreiving data from Google Analytics.", str(e))
return None
df = sessions.as_dataframe()
df["date"] = pd.to_datetime(df["date"])
# Clean data.
if data.omit_values_over and int(data.omit_values_over) > 0:
df.loc[df[data.ga_metric] > data.omit_values_over, data.ga_metric] = np.nan
df.loc[df[data.ga_metric] < 1, data.ga_metric] = np.nan
df.dropna(inplace=True, axis=0)
print(
"Rows: {rows} Min Date: {min_date} Max Date: {max_date}".format(
rows=len(df), min_date=p_date(df.date.min()), max_date=p_date(df.date.max())
)
)
# Backfilling missing values
df = df.set_index("date").asfreq("d", method="bfill")
return df
data = Struct(
**{
"ga_segment": ga_segment,
"ga_metric": ga_metric,
"ga_start_date": ga_start_date,
"ga_end_date": ga_end_date,
"prior_months": prior_months,
"omit_values_over": omit_values_over,
}
)
datafile = get_ga_data(profile, data)
print(datafile.head())
model_list = ["TATS", "TBATS1", "TBATP1", "TBATS2", "ARIMA", "Gluonts"]
am = forecastga.AutomatedModel(
df=datafile["sessions"], model_list=model_list, forecast_len=30
)
forecast_frame, preformance = am.forecast_insample()
forecast_frame.head()
| 28.825301 | 225 | 0.654127 | 86 | 0.017973 | 0 | 0 | 0 | 0 | 0 | 0 | 1,732 | 0.361964 |
7e27fd655e7f176518f5547ff5db4f5b22ff9992 | 574 | py | Python | Simulation/Simulation/intervention.py | anoppa/Proyecto-IA-Sim-Comp | 71132bd0c6cb5aeff812fd96e0017be71178a5f3 | [
"MIT"
] | 1 | 2022-03-11T14:24:10.000Z | 2022-03-11T14:24:10.000Z | Simulation/Simulation/intervention.py | anoppa/Proyecto-IA-Sim-Comp | 71132bd0c6cb5aeff812fd96e0017be71178a5f3 | [
"MIT"
] | null | null | null | Simulation/Simulation/intervention.py | anoppa/Proyecto-IA-Sim-Comp | 71132bd0c6cb5aeff812fd96e0017be71178a5f3 | [
"MIT"
] | 1 | 2022-01-19T04:29:19.000Z | 2022-01-19T04:29:19.000Z | from .agent import Agent
from typing import List
from .activation_rule import ActivationRule
class Intervention(Agent):
def __init__(
self,
name: str,
activation_rules: List[ActivationRule],
efect_time: int,
repetition: int,
action,
supply: int,
) -> None:
super().__init__(name, activation_rules, efect_time, repetition, action)
self._supply = supply
@property
def supply(self):
"""
Returns the supply of current intervention
"""
return self._supply
| 22.96 | 80 | 0.614983 | 478 | 0.832753 | 0 | 0 | 134 | 0.233449 | 0 | 0 | 66 | 0.114983 |
7e2848ed83aa47cbc6a16174581fc8a9c100f6b0 | 79,206 | py | Python | xanalysis_groundstate_paper_figs.py | kseetharam/genPolaron | b4eb05c595f1dc7151aa564f56fcfbdeded570c5 | [
"MIT"
] | null | null | null | xanalysis_groundstate_paper_figs.py | kseetharam/genPolaron | b4eb05c595f1dc7151aa564f56fcfbdeded570c5 | [
"MIT"
] | null | null | null | xanalysis_groundstate_paper_figs.py | kseetharam/genPolaron | b4eb05c595f1dc7151aa564f56fcfbdeded570c5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.lines import Line2D
import matplotlib.colors as colors
from matplotlib.animation import writers
from matplotlib.patches import ConnectionPatch
import matplotlib.image as mpimg
import os
import itertools
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
from timeit import default_timer as timer
import scipy.stats as ss
import colors as col
if __name__ == "__main__":
# # Initialization
# matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
mpegWriter = writers['ffmpeg'](fps=0.75, bitrate=1800)
matplotlib.rcParams.update({'font.size': 16, 'font.family': 'Times New Roman', 'text.usetex': True, 'mathtext.fontset': 'dejavuserif'})
axl = matplotlib.rcParams['axes.linewidth']
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
# (Lx, Ly, Lz) = (105, 105, 105)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
# Toggle parameters
toggleDict = {'Dynamics': 'imaginary', 'Interaction': 'on', 'Grid': 'spherical', 'Coupling': 'twophonon', 'IRcuts': 'false', 'ReducedInterp': 'false', 'kGrid_ext': 'false'}
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, 1)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs'
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
animpath = animpath + '/rdyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
animpath = animpath + '/idyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
animpath = animpath + '_frohlich'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
animpath = animpath + '_twophonon'
if toggleDict['IRcuts'] == 'true':
innerdatapath = innerdatapath + '_IRcuts'
elif toggleDict['IRcuts'] == 'false':
innerdatapath = innerdatapath
print(innerdatapath)
# figdatapath = '/Users/kis/Dropbox/Apps/Overleaf/Quantum Cherenkov Transition in Bose Polaron Systems/figures/figdump'
figdatapath = '/Users/kis/Dropbox/Apps/Overleaf/Cherenkov Polaron Paper pt1/figures/figdump'
innerdatapath_cart = innerdatapath[0:-10] + '_cart'
# # Analysis of Total Dataset
base02 = col.base02.ashexstring()
base2 = col.base2.ashexstring()
red = col.red.ashexstring()
green = col.green.ashexstring()
cyan = col.cyan.ashexstring()
blue = col.blue.ashexstring()
violet = col.violet.ashexstring()
aIBi = -2
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset.nc')
# qds_aIBi = qds.sel(aIBi=aIBi)
qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
qds_aIBi = qds
PVals = qds['P'].values
tVals = qds['t'].values
n0 = qds.attrs['n0']
gBB = qds.attrs['gBB']
mI = qds.attrs['mI']
mB = qds.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
print(qds.attrs['k_mag_cutoff'] * xi)
aIBi_Vals = np.array([-12.5, -10.0, -9.0, -8.0, -7.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]) # used by many plots (spherical)
# # # # FIG SCHEMATIC - POLARON GRAPHIC + BOGO DISPERSION + POLARON DISPERSION
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig1 = plt.figure(constrained_layout=False)
# # gs1 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.1, top=0.95, left=0.05, right=0.2)
# # gs2 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.15, top=0.91, left=0.32, right=0.58)
# # gs3 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.15, top=0.91, left=0.7, right=0.97)
# gs2 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.15, top=0.91, left=0.08, right=0.45)
# gs3 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.15, top=0.91, left=0.57, right=0.97)
# # ax_pol = fig1.add_subplot(gs1[0], frame_on=False); ax_pol.get_xaxis().set_visible(False); ax_pol.get_yaxis().set_visible(False)
# ax_bogo = fig1.add_subplot(gs2[0])
# ax_gsE = fig1.add_subplot(gs3[0])
# # fig1.text(0.01, 0.95, '(a)', fontsize=labelsize)
# # fig1.text(0.24, 0.95, '(b)', fontsize=labelsize)
# # fig1.text(0.65, 0.95, '(c)', fontsize=labelsize)
# fig1.text(0.01, 0.95, '(a)', fontsize=labelsize)
# fig1.text(0.52, 0.95, '(b)', fontsize=labelsize)
# fig1.set_size_inches(7.8, 3.5)
# # # POLARON GRAPHIC
# # polimg = mpimg.imread('images/PolaronGraphic.png')
# # imgplot = ax_pol.imshow(polimg)
# # BOGOLIUBOV DISPERSION (SPHERICAL)
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds.coords['k'].values); kgrid.initArray_premade('th', qds.coords['th'].values)
# kVals = kgrid.getArray('k')
# wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
# mask = (wk_Vals < 2) * (wk_Vals > 0)
# ax_bogo.plot(kVals[mask], wk_Vals[mask], 'k-', label='')
# ax_bogo.plot(kVals[mask], nu * kVals[mask], color=red, linestyle='--', label=r'$c|\mathbf{k}|$')
# ax_bogo.set_xlabel(r'$|\mathbf{k}|$', fontsize=labelsize)
# ax_bogo.set_ylabel(r'$\omega_{|\mathbf{k}|}$', fontsize=labelsize)
# ax_bogo.set_xlim([0 - 0.09, np.max(kVals[mask]) + 0.09])
# ax_bogo.xaxis.set_major_locator(plt.MaxNLocator(2))
# ax_bogo.set_ylim([0 - 0.09, 2 + 0.09])
# ax_bogo.yaxis.set_major_locator(plt.MaxNLocator(3))
# ax_bogo.legend(loc=2, fontsize=legendsize)
# # # GROUND STATE ENERGY (SPHERICAL)
# aIBi = -5
# print(aIBi * xi)
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals = np.zeros((PVals.size, tVals.size))
# for Pind, P in enumerate(PVals):
# for tind, t in enumerate(tVals):
# CSAmp = CSAmp_ds.sel(P=P, t=t).values
# Energy_Vals[Pind, tind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Energy_Vals_inf = Energy_Vals[:, -1]
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# # Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 5 * PVals.size)
# Pinf_Vals = np.linspace(0, np.max(PVals), 5 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound, vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# ms_mask = Pinf_Vals <= 0.5
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms, ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# mask = (Pinf_Vals / (mI * nu)) < 2.2
# Ecrit = Einf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0]
# ax_gsE.plot(Pinf_Vals[mask][1:-1] / (mI * nu), Einf_Vals[mask][1:-1] / np.abs(Ecrit), 'k-')
# ax_gsE.set_xlabel(r'$P/(m_{I}c)$', fontsize=labelsize)
# ax_gsE.set_ylabel(r'$E/E_{\rm crit}$', fontsize=labelsize)
# # ymin = -2.1 / np.abs(Ecrit); ymax = -1 / np.abs(Ecrit)
# ymin = -1.3; ymax = -0.6
# # ax_gsE.set_ylim([ymin, ymax]); ax_gsE.set_xlim([0, 2.2])
# ax_gsE.set_ylim([ymin - 0.02, ymax + 0.02]); ax_gsE.set_xlim([-0.05, 2.2 + 0.05])
# ax_gsE.yaxis.set_major_locator(plt.MaxNLocator(2))
# Pcrit = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0]
# ax_gsE.axvline(x=Pcrit / (mI * nu), ymin=0.03, ymax=0.975, linestyle=':', color=green, lw=2)
# ax_bogo.tick_params(direction='in', right=True, top=True)
# ax_gsE.tick_params(direction='in', right=True, top=True)
# ax_bogo.set_title('BEC without Impurity')
# ax_gsE.set_title('BEC with Impurity')
# subVals = np.linspace(0, Pcrit / (mI * nu), 100)
# supVals = np.linspace(Pcrit / (mI * nu), np.max(Pinf_Vals[mask] / (mI * nu)), 100)
# ax_gsE.fill_between(supVals, ymin, ymax, facecolor=base2, alpha=0.75)
# ax_gsE.fill_between(subVals, ymin, ymax, facecolor=base02, alpha=0.3)
# font = {'family': 'serif', 'color': 'black', 'size': legendsize}
# sfont = {'family': 'serif', 'color': 'black', 'size': legendsize - 1}
# ax_gsE.text(0.16, -1.3 / np.abs(Ecrit), 'Polaron', fontdict=font)
# ax_gsE.text(0.16, -1.4 / np.abs(Ecrit), '(quadratic)', fontdict=sfont)
# ax_gsE.text(1.3, -1.8 / np.abs(Ecrit), 'Cherenkov', fontdict=font)
# ax_gsE.text(1.3, -1.9 / np.abs(Ecrit), '(linear)', fontdict=sfont)
# # ax_gsE.margins(1.05, 1.05)
# fig1.savefig(figdatapath + '/FigSchematic.pdf')
# # # # FIG 1 - PHASE DIAGRAM + DISTRIBUTION PLOTS - LETTER
# matplotlib.rcParams['axes.linewidth'] = 0.5 * axl
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig1 = plt.figure(constrained_layout=False)
# gs1 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.13, top=0.94, left=0.08, right=0.55)
# # gs2 = fig1.add_gridspec(nrows=2, ncols=1, bottom=0.13, top=0.94, left=0.67, right=0.99, height_ratios=[1, 1], hspace=0.2) # for ground state impurity distributions
# gs2 = fig1.add_gridspec(nrows=2, ncols=1, bottom=0.13, top=0.94, left=0.63, right=0.915, height_ratios=[1, 1], hspace=0.2) # for dynamical real space density distributions
# gs3 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.13, top=0.94, left=0.93, right=0.945); ax_colorbar = fig1.add_subplot(gs3[0]) # for dynamical real space density distributions
# ax_PD = fig1.add_subplot(gs1[0])
# ax_supDist = fig1.add_subplot(gs2[0])
# ax_subDist = fig1.add_subplot(gs2[1])
# fig1.text(0.01, 0.95, '(a)', fontsize=labelsize)
# fig1.text(0.575, 0.95, '(b)', fontsize=labelsize)
# fig1.text(0.575, 0.52, '(c)', fontsize=labelsize)
# fig1.set_size_inches(7.8, 4.5)
# # PHASE DIAGRAM (SPHERICAL)
# Pcrit = np.zeros(aIBi_Vals.size)
# ms_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).isel(t=-1).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# # Pcrit[aind] = Pinf_Vals[np.argwhere(Einf_2ndderiv_Vals < 0)[-2][0] + 3]
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0] # there is a little bit of fudging with the -3 here so that aIBi=-10 gives me Pcrit/(mI*c) = 1 -> I can also just generate data for weaker interactions and see if it's better
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# massEnhancement_Vals = (1 / ms_Vals) / mI
# mE_tck = interpolate.splrep(aIBi_Vals, massEnhancement_Vals, s=0)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# mE_interpVals = 1 * interpolate.splev(aIBi_interpVals, mE_tck, der=0)
# # scalefac = 1.0
# scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# # xmin = np.min(aIBi_interpVals / xi); xmax = 1.01 * np.max(aIBi_interpVals / xi)
# # ymin = 0; ymax = 1.01 * np.max(Pcrit_interpVals)
# xmin = -11.45; xmax = 0.25
# ymin = -0.1; ymax = 4.0
# font = {'family': 'serif', 'color': 'black', 'size': legendsize}
# sfont = {'family': 'serif', 'color': 'black', 'size': legendsize - 1}
# ax_PD.plot(aIBi_Vals * xi, Pcrit_norm, marker='s', linestyle='None', mec='k', mfc='None', ms=5)
# ax_PD.plot(aIBi_interpVals * xi, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax_PD.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax_PD.set_xlabel(r'$a_{\rm IB}^{-1}/\xi^{-1}$', fontsize=labelsize)
# ax_PD.set_ylabel(r'Total Momentum $P/(m_{I}c)$', fontsize=labelsize)
# ax_PD.set_xlim([xmin, xmax]); ax_PD.set_ylim([ymin, ymax])
# ax_PD.fill_between(aIBi_interpVals * xi, Pcrit_interpVals, ymax - 0.1, facecolor=base2, alpha=0.75)
# ax_PD.fill_between(aIBi_interpVals * xi, ymin + 0.1, Pcrit_interpVals, facecolor=base02, alpha=0.3)
# # ax_PD.text(-3.2, ymin + 0.155 * (ymax - ymin), 'Polaron', fontdict=font)
# # ax_PD.text(-3.1, ymin + 0.08 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# ax_PD.text(-10.5, ymin + 0.155 * (ymax - ymin), 'Subsonic', fontdict=font)
# # ax_PD.text(-10.5, ymin + 0.155 * (ymax - ymin), 'Polaron', fontdict=font)
# ax_PD.text(-10.2, ymin + 0.08 * (ymax - ymin), r'$Z>0$', fontdict=sfont)
# ax_PD.text(-10.5, ymin + 0.86 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax_PD.text(-10.2, ymin + 0.785 * (ymax - ymin), r'$Z=0$', fontdict=sfont)
# # ax_PD.text(-5.7, ymin + 0.5 * (ymax - ymin), 'Dynamical', fontdict=font, color=red)
# # ax_PD.text(-5.6, ymin + 0.44 * (ymax - ymin), 'Transition', fontdict=font, color=red)
# # # POLARON EFFECTIVE MASS (SPHERICAL)
# # ax_PD.plot(aIBi_Vals * xi, massEnhancement_Vals, color='#ba9e88', marker='D', linestyle='None', markerfacecolor='None', mew=1, ms=5)
# ax_PD.plot(aIBi_interpVals * xi, mE_interpVals, color='k', linestyle='dashed')
# # CONNECTING LINES TO DISTRIBUTION FUNCTIONS
# supDist_coords = [-5.0 * xi, 3.0] # is [aIBi/xi, P/(mI*c)]
# subDist_coords = [-5.0 * xi, 0.5] # is [aIBi/xi, P/(mI*c)]
# ax_PD.plot(supDist_coords[0], supDist_coords[1], linestyle='', marker='8', mec='k', mfc='k', ms=10)
# ax_PD.plot(subDist_coords[0], subDist_coords[1], linestyle='', marker='8', mec='k', mfc='k', ms=10)
# # # For ground state impurity distributions
# # con_sup = ConnectionPatch(xyA=(supDist_coords[0], supDist_coords[1]), xyB=(0, 0.49), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_supDist, color='k', linestyle='dotted', lw=0.5)
# # con_sub = ConnectionPatch(xyA=(subDist_coords[0], subDist_coords[1]), xyB=(0, 0.34), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_subDist, color='k', linestyle='dotted', lw=0.5)
# # For dynamical real space density distributions
# con_sup = ConnectionPatch(xyA=(supDist_coords[0], supDist_coords[1]), xyB=(0, -7), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_supDist, color='k', linestyle='dotted', lw=0.5)
# con_sub = ConnectionPatch(xyA=(subDist_coords[0], subDist_coords[1]), xyB=(0, -25), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_subDist, color='k', linestyle='dotted', lw=0.5)
# ax_PD.add_artist(con_sup)
# ax_PD.add_artist(con_sub)
# # # GROUND STATE IMPURITY DISTRIBUTION (CARTESIAN)
# # # GaussianBroadening = True; sigma = 0.0168
# # GaussianBroadening = True; sigma = 0.02
# # incoh_color = green
# # delta_color = base02
# # def GPDF(xVals, mean, stdev):
# # return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # aIBi = -5
# # qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # PVals = qds_aIBi['P'].values
# # nPIm_FWHM_indices = []
# # nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# # nPIm_FWHM_Vals = np.zeros(PVals.size)
# # nPIm_distPeak_Vals = np.zeros(PVals.size)
# # nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# # nPIm_Tot_Vals = np.zeros(PVals.size)
# # nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# # PIm_Vec = np.empty(PVals.size, dtype=np.object)
# # for ind, P in enumerate(PVals):
# # qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# # PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# # dPIm = PIm_Vals[1] - PIm_Vals[0]
# # nPIm_Vec[ind] = qds_nPIm_inf.values
# # PIm_Vec[ind] = PIm_Vals
# # # # Calculate nPIm(t=inf) normalization
# # nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # # Calculate FWHM, distribution peak, and delta peak
# # nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# # nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# # nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# # indices = np.where(D > 0)[0]
# # nPIm_FWHM_indices.append((indices[0], indices[-1]))
# # nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# # Pnorm = PVals / (mI * nu)
# # Pratio_sup = 3.0; Pind_sup = np.abs(Pnorm - Pratio_sup).argmin()
# # Pratio_sub = 0.5; Pind_sub = np.abs(Pnorm - Pratio_sub).argmin()
# # print(Pnorm[Pind_sup], Pnorm[Pind_sub])
# # print(nPIm_deltaPeak_Vals[Pind_sup], nPIm_deltaPeak_Vals[Pind_sub])
# # ax_supDist.plot(PIm_Vec[Pind_sup] / (mI * nu), nPIm_Vec[Pind_sup], color=incoh_color, lw=1.0, label='Incoherent Part')
# # ax_supDist.set_xlim([-0.01, 5])
# # ax_supDist.set_ylim([0, 1.05])
# # ax_supDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# # # ax_supDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# # ax_supDist.fill_between(PIm_Vec[Pind_sup] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sup], facecolor=incoh_color, alpha=0.25)
# # if GaussianBroadening:
# # Pnorm_sup = PVals[Pind_sup] / (mI * nu)
# # deltaPeak_sup = nPIm_deltaPeak_Vals[Pind_sup]
# # PIm_norm_sup = PIm_Vec[Pind_sup] / (mI * nu)
# # delta_GB_sup = deltaPeak_sup * GPDF(PIm_norm_sup, Pnorm_sup, sigma)
# # # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1.0, label='')
# # ax_supDist.fill_between(PIm_norm_sup, np.zeros(PIm_norm_sup.size), delta_GB_sup, facecolor=delta_color, alpha=0.25)
# # else:
# # ax_supDist.plot((PVals[Pind_sup] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sup], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1.5, label='Delta Peak (Z-factor)')
# # ax_supDist.legend(loc=1, fontsize=legendsize, frameon=False)
# # ax_subDist.plot(PIm_Vec[Pind_sub] / (mI * nu), nPIm_Vec[Pind_sub], color=incoh_color, lw=1.0, label='Incoherent Part')
# # # ax_subDist.set_xlim([-0.01, np.max(PIm_Vec[Pind_sub] / (mI*nu))])
# # ax_subDist.set_xlim([-0.01, 5])
# # ax_subDist.set_ylim([0, 1.05])
# # ax_subDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# # ax_subDist.set_xlabel(r'$|\mathbf{P}_{\rm imp}|/(m_{I}c)$', fontsize=labelsize)
# # ax_subDist.fill_between(PIm_Vec[Pind_sub] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sub], facecolor=incoh_color, alpha=0.25)
# # if GaussianBroadening:
# # Pnorm_sub = PVals[Pind_sub] / (mI * nu)
# # deltaPeak_sub = nPIm_deltaPeak_Vals[Pind_sub]
# # PIm_norm_sub = PIm_Vec[Pind_sub] / (mI * nu)
# # delta_GB_sub = deltaPeak_sub * GPDF(PIm_norm_sub, Pnorm_sub, sigma)
# # print(np.trapz(delta_GB_sub, PIm_norm_sub))
# # # ax_subDist.plot(PIm_norm_sub, delta_GB_sub, linestyle='-', color=delta_color, linewidth=1.0, label=r'$\delta$-Peak')
# # # ax_subDist.fill_between(PIm_norm_sub, np.zeros(PIm_norm_sub.size), delta_GB_sub, facecolor=delta_color, alpha=0.25)
# # ax_subDist.axvline(x=Pnorm_sub - 0.05, linestyle='-', color=delta_color, lw=1)
# # ax_subDist.axvline(x=Pnorm_sub + 0.05, linestyle='-', color=delta_color, lw=1)
# # else:
# # ax_subDist.plot((PVals[Pind_sub] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sub], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# # ax_subDist.legend(loc=1, fontsize=legendsize, frameon=False)
# # print(deltaPeak_sub, deltaPeak_sup)
# # ax_PD.tick_params(direction='in', right=True, top=True)
# # ax_subDist.tick_params(direction='in', right=True, top=True)
# # ax_supDist.tick_params(direction='in', right=True, top=True)
# # ax_supDist.xaxis.set_ticklabels([])
# # GAS DENSITY REAL SPACE DISTRIBUTION (CARTESIAN INTERPOLATION)
# interpdatapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0/redyn_spherical/interp'
# cmap = 'afmhot'
# avmin = 1e-5; avmax = 1e-1
# aIBi = -5
# Pratio_sup = 3.0
# Pratio_sub = 0.52
# tratio = 39.99
# nu = 0.7926654595212022
# xi = 0.8920620580763856
# tscale = xi / nu
# linDimMajor, linDimMinor = (10, 10)
# interp_ds_sup = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sup * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# interp_ds_sub = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sub * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# n0 = interp_ds_sup.attrs['n0']; gBB = interp_ds_sup.attrs['gBB']; mI = interp_ds_sup.attrs['mI']; mB = interp_ds_sup.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# mc = mI * nu
# aBB = (mB / (4 * np.pi)) * gBB
# xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
# tscale = xi / nu
# P_sup = interp_ds_sup.attrs['P']; Pratio_sup = P_sup / mc
# P_sub = interp_ds_sub.attrs['P']; Pratio_sub = P_sub / mc
# xL = interp_ds_sup['x'].values; yL = interp_ds_sup['y'].values; zL = interp_ds_sup['z'].values
# xLg, zLg = np.meshgrid(xL, zL, indexing='ij')
# dx = xL[1] - xL[0]; dy = yL[1] - yL[0]; dz = zL[1] - zL[0]
# na_xz_int_sup = interp_ds_sup['na_xz_int'].values; na_xz_int_norm_sup = na_xz_int_sup / (np.sum(na_xz_int_sup) * dx * dz)
# na_xz_int_sub = interp_ds_sub['na_xz_int'].values; na_xz_int_norm_sub = na_xz_int_sub / (np.sum(na_xz_int_sub) * dx * dz)
# quad_sup = ax_supDist.pcolormesh(zLg / xi, xLg / xi, na_xz_int_norm_sup, norm=colors.LogNorm(vmin=avmin, vmax=avmax), cmap=cmap, rasterized=True)
# # ax_supDist.text(0.57, 0.85, r'$t/(\xi c^{-1})$' + ': {:.1f}'.format(tratio), transform=ax_supDist.transAxes, color='w', fontsize=legendsize - 1)
# ax_supDist.set_ylabel(r'$x/\xi$', labelpad=-10, fontsize=labelsize)
# quad_sub = ax_subDist.pcolormesh(zLg / xi, xLg / xi, na_xz_int_norm_sub, norm=colors.LogNorm(vmin=avmin, vmax=avmax), cmap=cmap, rasterized=True)
# # ax_subDist.text(0.57, 0.85, r'$t/(\xi c^{-1})$' + ': {:.1f}'.format(tratio), transform=ax_subDist.transAxes, color='w', fontsize=legendsize - 1)
# ax_subDist.set_xlabel(r'$z/\xi$', fontsize=labelsize)
# ax_subDist.set_ylabel(r'$x/\xi$', labelpad=-10, fontsize=labelsize)
# fig1.colorbar(quad_sup, cax=ax_colorbar, extend='both')
# ax_PD.tick_params(direction='in', right=True, top=True)
# ax_subDist.tick_params(direction='in', right=True, top=True)
# ax_supDist.tick_params(direction='in', right=True, top=True)
# ax_supDist.xaxis.set_ticklabels([])
# # # # DPT
# qds = xr.open_dataset('/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp/redyn_spherical' + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# tVals = qds['t'].values
# DynOvExp_NegMask = False
# DynOvExp_Cut = False
# cut = 1e-4
# consecDetection = True
# consecSamples = 10
# def powerfunc(t, a, b):
# return b * t**(-1 * a)
# tmin = 90
# tmax = 100
# tfVals = tVals[(tVals <= tmax) * (tVals >= tmin)]
# rollwin = 1
# colorList = ['red', '#7e1e9c', 'green', 'orange', '#60460f', 'blue', 'magenta']
# lineList = ['solid', 'dashed', 'dotted', '-.']
# aIBi_des = np.array([-10.0, -5.0, -3.5, -2.5, -2.0, -1.75])
# massRat_des = np.array([1.0])
# datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp'
# Pcrit_da = xr.DataArray(np.full(aIBi_des.size, np.nan, dtype=float), coords=[aIBi_des], dims=['aIBi'])
# for inda, aIBi in enumerate(aIBi_des):
# mds = xr.open_dataset(datapath + '/redyn_spherical/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# Plen = mds.coords['P'].values.size
# Pstart_ind = 0
# PVals = mds.coords['P'].values[Pstart_ind:Plen]
# n0 = mds.attrs['n0']
# gBB = mds.attrs['gBB']
# mI = mds.attrs['mI']
# mB = mds.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# vI0_Vals = (PVals - mds.isel(t=0, P=np.arange(Pstart_ind, Plen))['Pph'].values) / mI
# mds_ts = mds.sel(t=tfVals)
# DynOv_Exponents = np.zeros(PVals.size)
# DynOv_Constants = np.zeros(PVals.size)
# for indP, P in enumerate(PVals):
# DynOv_raw = np.abs(mds_ts.isel(P=indP)['Real_DynOv'].values + 1j * mds_ts.isel(P=indP)['Imag_DynOv'].values).real.astype(float)
# DynOv_ds = xr.DataArray(DynOv_raw, coords=[tfVals], dims=['t'])
# # DynOv_ds = DynOv_ds.rolling(t=rollwin, center=True).mean().dropna('t')
# DynOv_Vals = DynOv_ds.values
# tDynOvc_Vals = DynOv_ds['t'].values
# S_slope, S_intercept, S_rvalue, S_pvalue, S_stderr = ss.linregress(np.log(tDynOvc_Vals), np.log(DynOv_Vals))
# DynOv_Exponents[indP] = -1 * S_slope
# DynOv_Constants[indP] = np.exp(S_intercept)
# if DynOvExp_NegMask:
# DynOv_Exponents[DynOv_Exponents < 0] = 0
# if DynOvExp_Cut:
# DynOv_Exponents[np.abs(DynOv_Exponents) < cut] = 0
# if consecDetection:
# crit_ind = 0
# for indE, exp in enumerate(DynOv_Exponents):
# if indE > DynOv_Exponents.size - consecDetection:
# break
# expSlice = DynOv_Exponents[indE:(indE + consecSamples)]
# if np.all(expSlice > 0):
# crit_ind = indE
# break
# DynOv_Exponents[0:crit_ind] = 0
# Pcrit_da[inda] = PVals[crit_ind] / (mI * nu)
# DynOvf_Vals = powerfunc(1e1000, DynOv_Exponents, DynOv_Constants)
# ax_PD.plot(aIBi_des * xi, Pcrit_da.values, linestyle='None', marker='D', mec=red, mfc=red, mew=2, ms=5)
# print(aIBi_des)
# print(Pcrit_da.values)
# fig1.savefig(figdatapath + '/Fig1_Letter.pdf')
# # fig1.savefig(figdatapath + '/Fig1_Letter.jpg', quality=100)
# matplotlib.rcParams['axes.linewidth'] = axl
# # # # FIG 2 - LETTER
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig2 = plt.figure(constrained_layout=False)
# gs1 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.23, top=0.95, left=0.12, right=0.48, hspace=0.1)
# gs2 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.23, top=0.95, left=0.61, right=0.98, hspace=0.1)
# ax_gsZ = fig2.add_subplot(gs1[1])
# ax_gsVel = fig2.add_subplot(gs1[0])
# ax_dynS = fig2.add_subplot(gs2[1])
# ax_dynVel = fig2.add_subplot(gs2[0])
# fig2.text(0.02, 0.95, '(a)', fontsize=labelsize)
# fig2.text(0.02, 0.55, '(b)', fontsize=labelsize)
# fig2.text(0.52, 0.95, '(c)', fontsize=labelsize)
# fig2.text(0.52, 0.55, '(d)', fontsize=labelsize)
# # colorList = ['red', 'green', 'blue']
# colorList = [red, green, blue]
# # colorList = ['red', '#7e1e9c', 'green', 'orange', '#60460f', 'blue', 'magenta']
# lineList = ['solid', 'dashed', 'dotted', '-.']
# dyndatapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp/redyn_spherical'
# # ax_GSE1.set_ylim([0, 1.2 * np.max(Einf_1stderiv_Vals / np.abs(Ecrit))])
# # aIBi_des = np.array([-10.0, -5.0, -3.5, -2.0, -1.0])
# # aIBi_Vals = np.array([-10.0, -5.0, -3.5, -2.0]) # used by many plots (spherical)
# aIBi_Vals = np.array([-10.0, -3.5, -2.0]) # used by many plots (spherical)
# # # POLARON SOUND VELOCITY (SPHERICAL)
# # Check to see if linear part of polaron (total system) energy spectrum has slope equal to sound velocity
# vsound_Vals = np.zeros(aIBi_Vals.size)
# vI_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# ZVals = np.exp(-1 * qds_aIBi['Nph'].values)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_1stderiv_Vals_subsamp = 1 * interpolate.splev(PVals, Einf_tck, der=1)
# xmask = (PVals / (mI * nu)) <= 4
# ax_gsZ.plot(PVals[xmask] / (mI * nu), ZVals[xmask], color=colorList[aind], linestyle='solid', marker='D', ms=4)
# # ax_gsVel.plot(Pinf_Vals / (mI * nu), Einf_1stderiv_Vals / nu, color=colorList[aind], linestyle='solid', marker='D', ms=4)
# ax_gsVel.plot(PVals[xmask] / (mI * nu), Einf_1stderiv_Vals_subsamp[xmask] / nu, color=colorList[aind], linestyle='solid', marker='D', ms=4)
# ax_gsVel.plot(Pinf_Vals / (mI * nu), np.ones(Pinf_Vals.size), 'k:')
# ax_gsZ.set_xlabel(r'$P/(m_{I}c)$', fontsize=13)
# ax_gsVel.set_ylabel(r'$v_{\rm pol}/c$', fontsize=13)
# ax_gsZ.set_ylabel(r'$Z$', fontsize=13)
# # DYN S(t) AND VELOCITY
# qds = xr.open_dataset('/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp/redyn_spherical' + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# tVals = qds['t'].values
# mc = mI * nu
# DynOvData_roll = False
# DynOvData_rollwin = 2
# PimpData_roll = False
# PimpData_rollwin = 2
# DynOvExp_roll = False
# DynOvExp_rollwin = 2
# DynOvExp_NegMask = False
# DynOvExp_Cut = False
# cut = 1e-4
# consecDetection = True
# consecSamples = 10
# flattenAboveC = True
# # aIBi_des = np.array([-10.0, -5.0, -3.5, -2.5, -2.0, -1.75])
# Pnorm = PVals / mc
# tmin = 90; tmax = 100
# tfVals = tVals[(tVals <= tmax) * (tVals >= tmin)]
# def powerfunc(t, a, b):
# return b * t**(-1 * a)
# Pcrit_da = xr.DataArray(np.full(aIBi_Vals.size, np.nan, dtype=float), coords=[aIBi_Vals], dims=['aIBi'])
# for inda, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(dyndatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # print(qds_aIBi['t'].values)
# qds_aIBi_ts = qds_aIBi.sel(t=tfVals)
# PVals = qds_aIBi['P'].values
# Pnorm = PVals / mc
# DynOv_Exponents = np.zeros(PVals.size)
# DynOv_Cov = np.full(PVals.size, np.nan)
# vImp_Exponents = np.zeros(PVals.size)
# vImp_Cov = np.full(PVals.size, np.nan)
# Plen = PVals.size
# Pstart_ind = 0
# vI0_Vals = (PVals - qds_aIBi.isel(t=0, P=np.arange(Pstart_ind, Plen))['Pph'].values) / mI
# DynOv_Exponents = np.zeros(PVals.size)
# DynOv_Constants = np.zeros(PVals.size)
# vImp_Exponents = np.zeros(PVals.size)
# vImp_Constants = np.zeros(PVals.size)
# DynOv_Rvalues = np.zeros(PVals.size)
# DynOv_Pvalues = np.zeros(PVals.size)
# DynOv_stderr = np.zeros(PVals.size)
# DynOv_tstat = np.zeros(PVals.size)
# DynOv_logAve = np.zeros(PVals.size)
# for indP, P in enumerate(PVals):
# DynOv_raw = np.abs(qds_aIBi_ts.isel(P=indP)['Real_DynOv'].values + 1j * qds_aIBi_ts.isel(P=indP)['Imag_DynOv'].values).real.astype(float)
# DynOv_ds = xr.DataArray(DynOv_raw, coords=[tfVals], dims=['t'])
# Pph_ds = xr.DataArray(qds_aIBi_ts.isel(P=indP)['Pph'].values, coords=[tfVals], dims=['t'])
# if DynOvData_roll:
# DynOv_ds = DynOv_ds.rolling(t=DynOvData_rollwin, center=True).mean().dropna('t')
# if PimpData_roll:
# Pph_ds = Pph_ds.rolling(t=PimpData_rollwin, center=True).mean().dropna('t')
# DynOv_Vals = DynOv_ds.values
# tDynOv_Vals = DynOv_ds['t'].values
# vImpc_Vals = (P - Pph_ds.values) / mI - nu
# tvImpc_Vals = Pph_ds['t'].values
# S_slope, S_intercept, S_rvalue, S_pvalue, S_stderr = ss.linregress(np.log(tDynOv_Vals), np.log(DynOv_Vals))
# DynOv_Exponents[indP] = -1 * S_slope
# DynOv_Constants[indP] = np.exp(S_intercept)
# DynOv_Rvalues[indP] = S_rvalue
# DynOv_Pvalues[indP] = S_pvalue
# DynOv_stderr[indP] = S_stderr
# DynOv_tstat[indP] = S_slope / S_stderr
# DynOv_logAve[indP] = np.average(np.log(DynOv_Vals))
# # if (-1 * S_slope) < 0:
# # DynOv_Exponents[indP] = 0
# if vImpc_Vals[-1] < 0:
# vImp_Exponents[indP] = 0
# vImp_Constants[indP] = vImpc_Vals[-1]
# else:
# vI_slope, vI_intercept, vI_rvalue, vI_pvalue, vI_stderr = ss.linregress(np.log(tvImpc_Vals), np.log(vImpc_Vals))
# vImp_Exponents[indP] = -1 * vI_slope
# vImp_Constants[indP] = np.exp(vI_intercept)
# if (-1 * vI_slope) < 0:
# vImp_Exponents[indP] = 0
# DynOvExponents_da = xr.DataArray(DynOv_Exponents, coords=[PVals], dims=['P'])
# if DynOvExp_roll:
# DynOvExponents_da = DynOvExponents_da.rolling(P=DynOvExp_rollwin, center=True).mean().dropna('P')
# if DynOvExp_NegMask:
# ExpMask = DynOvExponents_da.values < 0
# DynOvExponents_da[ExpMask] = 0
# if DynOvExp_Cut:
# ExpMask = np.abs(DynOvExponents_da.values) < cut
# DynOvExponents_da[ExpMask] = 0
# DynOv_Exponents = DynOvExponents_da.values
# if consecDetection:
# crit_ind = 0
# for indE, exp in enumerate(DynOv_Exponents):
# if indE > DynOv_Exponents.size - consecDetection:
# break
# expSlice = DynOv_Exponents[indE:(indE + consecSamples)]
# if np.all(expSlice > 0):
# crit_ind = indE
# break
# DynOvExponents_da[0:crit_ind] = 0
# DynOv_Exponents = DynOvExponents_da.values
# Pnorm_dynov = DynOvExponents_da['P'].values / mc
# DynOvf_Vals = powerfunc(1e1000, DynOv_Exponents, DynOv_Constants)
# Pcrit_da[inda] = PVals[crit_ind] / (mI * nu)
# vIf_Vals = nu + powerfunc(1e1000, vImp_Exponents, vImp_Constants)
# if flattenAboveC:
# vIf_Vals[vIf_Vals > nu] = nu
# xmask = (vI0_Vals / nu) <= 4
# ax_dynS.plot(vI0_Vals[xmask] / nu, DynOvf_Vals[xmask], color=colorList[inda], linestyle='solid', marker='D', ms=4)
# ax_dynVel.plot(vI0_Vals[xmask] / nu, vIf_Vals[xmask] / nu, label='{:.2f}'.format(aIBi * xi), color=colorList[inda], linestyle='solid', marker='D', ms=4)
# ax_dynS.set_ylabel(r'$S(t_{\infty})$', fontsize=13)
# ax_dynVel.plot(vI0_Vals / nu, np.ones(vI0_Vals.size), 'k:')
# ax_dynS.set_xlabel(r'$v_{\rm imp}(t_{0})/c$', fontsize=13)
# ax_dynVel.set_ylabel(r'$v_{\rm imp}(t_{\infty})/c$', fontsize=13)
# ax_dynS.tick_params(which='both', direction='in', right=True, top=True)
# ax_dynVel.tick_params(which='both', direction='in', right=True, top=True)
# # GENERAL
# handles, labels = ax_dynVel.get_legend_handles_labels()
# plt.rcParams['legend.title_fontsize'] = 13
# # fig2.legend(handles, labels, title=r'$a_{\rm IB}^{-1}/\xi^{-1}$', ncol=aIBi_Vals.size, loc='lower center', bbox_to_anchor=(0.55, 0.001), fontsize=12)
# fig2.legend(handles, labels, title=r'$a_{\rm IB}^{-1}/\xi^{-1}$', ncol=aIBi_Vals.size, loc='lower center', bbox_to_anchor=(0.55, 0.001), fontsize=12)
# ax_gsVel.xaxis.set_ticklabels([])
# ax_dynVel.xaxis.set_ticklabels([])
# # ax_gsVel.set_xticks([0.0, 1.0, 2.0])
# ax_gsZ.tick_params(direction='in', right=True, top=True)
# ax_gsVel.tick_params(direction='in', right=True, top=True)
# ax_dynS.tick_params(direction='in', right=True, top=True)
# ax_dynVel.tick_params(direction='in', right=True, top=True)
# ax_gsZ.set_xlim([0, 4.14]); ax_gsZ.set_ylim([-0.05, 1.1])
# ax_gsVel.set_xlim([0, 4.14]); ax_gsVel.set_ylim([-0.05, 1.2])
# ax_dynS.set_xlim([-0.05, 4.14]); ax_dynS.set_ylim([-0.05, 1.1])
# ax_dynVel.set_xlim([-0.05, 4.14]); ax_dynVel.set_ylim([-0.05, 1.2])
# fig2.set_size_inches(7.8, 5.2)
# fig2.savefig(figdatapath + '/Fig2_Letter.pdf')
# # # # # # #############################################################################################################################
# # # # # # FIG SM1 - LETTER
# # # # # #############################################################################################################################
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# figSM1 = plt.figure(constrained_layout=False)
# gs1 = figSM1.add_gridspec(nrows=1, ncols=1, bottom=0.18, top=0.93, left=0.08, right=0.31)
# gs2 = figSM1.add_gridspec(nrows=1, ncols=1, bottom=0.18, top=0.93, left=0.37, right=0.60)
# gs3 = figSM1.add_gridspec(nrows=1, ncols=1, bottom=0.18, top=0.93, left=0.70, right=0.94)
# ax_subDist = figSM1.add_subplot(gs1[0])
# ax_supDist = figSM1.add_subplot(gs2[0])
# ax_distChar = figSM1.add_subplot(gs3[0])
# # figSM1.set_size_inches(7.8, 3.5)
# figSM1.set_size_inches(7.8, 2.5)
# figSM1.text(0.005, 0.93, '(a)', fontsize=labelsize)
# figSM1.text(0.325, 0.93, '(b)', fontsize=labelsize)
# figSM1.text(0.62, 0.93, '(c)', fontsize=labelsize)
# # GROUND STATE IMPURITY DISTRIBUTION (CARTESIAN)
# # GaussianBroadening = True; sigma = 0.0168
# GaussianBroadening = True; sigma = 0.02
# incoh_color = green
# delta_color = base02
# def GPDF(xVals, mean, stdev):
# return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# aIBi = -5
# qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# print('Interaction: {0}'.format(aIBi * xi))
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pnorm = PVals / (mI * nu)
# Pratio_sup = 3.0; Pind_sup = np.abs(Pnorm - Pratio_sup).argmin()
# Pratio_sub = 0.5; Pind_sub = np.abs(Pnorm - Pratio_sub).argmin()
# print(Pnorm[Pind_sup], Pnorm[Pind_sub])
# print(nPIm_deltaPeak_Vals[Pind_sup], nPIm_deltaPeak_Vals[Pind_sub])
# ax_supDist.plot(PIm_Vec[Pind_sup] / (mI * nu), nPIm_Vec[Pind_sup], color=incoh_color, lw=1.0, label='Incoherent Part')
# ax_supDist.set_xlim([-0.01, 5])
# ax_supDist.set_ylim([0, 1.05])
# # ax_supDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# ax_supDist.set_xlabel(r'$|\mathbf{P}_{\rm imp}|/(m_{I}c)$', fontsize=labelsize)
# ax_supDist.fill_between(PIm_Vec[Pind_sup] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sup], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sup = PVals[Pind_sup] / (mI * nu)
# deltaPeak_sup = nPIm_deltaPeak_Vals[Pind_sup]
# PIm_norm_sup = PIm_Vec[Pind_sup] / (mI * nu)
# delta_GB_sup = deltaPeak_sup * GPDF(PIm_norm_sup, Pnorm_sup, sigma)
# # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1.0, label='')
# ax_supDist.fill_between(PIm_norm_sup, np.zeros(PIm_norm_sup.size), delta_GB_sup, facecolor=delta_color, alpha=0.25)
# else:
# ax_supDist.plot((PVals[Pind_sup] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sup], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1.5, label='Delta Peak (Z-factor)')
# ax_supDist.legend(loc=1, fontsize=legendsize - 3, frameon=False)
# ax_subDist.plot(PIm_Vec[Pind_sub] / (mI * nu), nPIm_Vec[Pind_sub], color=incoh_color, lw=1.0, label='Incoherent Part')
# # ax_subDist.set_xlim([-0.01, np.max(PIm_Vec[Pind_sub] / (mI*nu))])
# ax_subDist.set_xlim([-0.01, 5])
# ax_subDist.set_ylim([0, 1.05])
# ax_subDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# ax_subDist.set_xlabel(r'$|\mathbf{P}_{\rm imp}|/(m_{I}c)$', fontsize=labelsize)
# ax_subDist.fill_between(PIm_Vec[Pind_sub] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sub], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sub = PVals[Pind_sub] / (mI * nu)
# deltaPeak_sub = nPIm_deltaPeak_Vals[Pind_sub]
# PIm_norm_sub = PIm_Vec[Pind_sub] / (mI * nu)
# delta_GB_sub = deltaPeak_sub * GPDF(PIm_norm_sub, Pnorm_sub, sigma)
# print(np.trapz(delta_GB_sub, PIm_norm_sub))
# ax_subDist.plot(PIm_norm_sub, delta_GB_sub, linestyle='-', color=delta_color, linewidth=1.0, label=r'$\delta$-Peak')
# ax_subDist.fill_between(PIm_norm_sub, np.zeros(PIm_norm_sub.size), delta_GB_sub, facecolor=delta_color, alpha=0.25)
# # ax_subDist.axvline(x=Pnorm_sub - 0.05, linestyle='-', color=delta_color, lw=1)
# # ax_subDist.axvline(x=Pnorm_sub + 0.05, linestyle='-', color=delta_color, lw=1)
# else:
# ax_subDist.plot((PVals[Pind_sub] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sub], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# ax_subDist.legend(loc=1, fontsize=legendsize - 3, frameon=False)
# print(deltaPeak_sub, deltaPeak_sup)
# ax_subDist.set_yticks([0.0, 0.5, 1.0])
# ax_supDist.set_yticks([0.0, 0.5, 1.0])
# ax_subDist.tick_params(direction='in', right=True, top=True)
# ax_supDist.tick_params(direction='in', right=True, top=True)
# ax_supDist.yaxis.set_ticklabels([])
# ax_distChar2 = ax_distChar.twinx()
# ax_distChar.tick_params(axis='y', labelcolor=delta_color, direction='in')
# ax_distChar2.tick_params(axis='y', labelcolor=incoh_color, direction='in')
# ax_distChar.tick_params(direction='in', top=True)
# ax_distChar.plot(PVals / (mI * nu), nPIm_deltaPeak_Vals, linestyle='-', color=delta_color, alpha=0.75)
# ax_distChar2.plot(PVals / (mI * nu), nPIm_FWHM_Vals, linestyle='-', color=incoh_color)
# ax_distChar.set_xlim([-0.01, 5])
# ax_distChar.set_xlabel(r'$P/(m_{I}c)$', fontsize=labelsize)
# ax_distChar.set_ylim([-0.05, 1.05])
# ax_distChar.set_yticks([0.0, 0.5, 1.0])
# ax_distChar2.set_ylim([-0.05, 2.05])
# ax_distChar2.set_yticks([0.0, 1.0, 2.0])
# ax_distChar.set_ylabel(r'$\delta$-Peak Magnitude', fontsize=labelsize, color=delta_color, alpha=0.75)
# ax_distChar2.set_ylabel('Incoherent Part FWHM', rotation=270, labelpad=17, fontsize=labelsize, color=incoh_color)
# figSM1.savefig(figdatapath + '/FigSM1_Letter.pdf')
# # # # # # #############################################################################################################################
# # # # # # FIG SM2 - LETTER
# # # # # #############################################################################################################################
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# figSM2 = plt.figure(constrained_layout=False)
# gs1 = figSM2.add_gridspec(nrows=1, ncols=1, bottom=0.16, top=0.93, left=0.1, right=0.45)
# gs2 = figSM2.add_gridspec(nrows=1, ncols=1, bottom=0.16, top=0.93, left=0.6, right=0.98)
# ax_supDist = figSM2.add_subplot(gs2[0])
# ax_subDist = figSM2.add_subplot(gs1[0])
# figSM2.set_size_inches(7.8, 3.5)
# figSM2.text(0.01, 0.94, '(a)', fontsize=labelsize)
# figSM2.text(0.5, 0.94, '(b)', fontsize=labelsize)
# interpdatapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0/redyn_spherical/interp'
# zlim = 20
# aIBi = -5
# Pratio_sup = 3.0
# Pratio_sub = 0.52
# tratio = 39.99
# nu = 0.7926654595212022
# xi = 0.8920620580763856
# tscale = xi / nu
# linDimMajor, linDimMinor = (10, 10)
# interp_ds_sup = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sup * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# interp_ds_sub = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sub * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# n0 = interp_ds_sup.attrs['n0']; gBB = interp_ds_sup.attrs['gBB']; mI = interp_ds_sup.attrs['mI']; mB = interp_ds_sup.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# mc = mI * nu
# aBB = (mB / (4 * np.pi)) * gBB
# xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
# tscale = xi / nu
# P_sup = interp_ds_sup.attrs['P']; Pratio_sup = P_sup / mc
# P_sub = interp_ds_sub.attrs['P']; Pratio_sub = P_sub / mc
# xL = interp_ds_sup['x'].values; yL = interp_ds_sup['y'].values; zL = interp_ds_sup['z'].values
# dx = xL[1] - xL[0]; dy = yL[1] - yL[0]; dz = zL[1] - zL[0]
# na_xz_int_sup = interp_ds_sup['na_xz_int'].values; na_xz_int_norm_sup = na_xz_int_sup / (np.sum(na_xz_int_sup) * dx * dz)
# na_xz_int_sub = interp_ds_sub['na_xz_int'].values; na_xz_int_norm_sub = na_xz_int_sub / (np.sum(na_xz_int_sub) * dx * dz)
# na_z_int_sup = np.sum(na_xz_int_norm_sup, axis=0) * dx
# na_z_int_sub = np.sum(na_xz_int_norm_sub, axis=0) * dx
# ax_supDist.plot(zL / xi, na_z_int_sup, color=red, linestyle='-')
# ax_supDist.set_xlim([-1 * zlim, zlim])
# ax_supDist.set_xlabel(r'$z/\xi$', fontsize=labelsize)
# ax_supDist.set_ylabel(r'$n_{a}(z)$', fontsize=labelsize)
# ax_supDist.set_ylim([0, 0.12])
# ax_subDist.plot(zL / xi, na_z_int_sub, color=red, linestyle='-')
# ax_subDist.set_xlim([-1 * zlim, zlim])
# ax_subDist.set_xlabel(r'$z/\xi$', fontsize=labelsize)
# ax_subDist.set_ylabel(r'$n_{a}(z)$', fontsize=labelsize)
# ax_subDist.set_ylim([0, 0.12])
# ax_supDist.tick_params(direction='in', right=True, top=True)
# ax_subDist.tick_params(direction='in', right=True, top=True)
# from matplotlib.patches import Rectangle
# rect = Rectangle((0.2, 0.0135), 8.3, 0.05, linestyle='dashed', facecolor='None', edgecolor='k')
# ax_supDist.add_patch(rect)
# figSM2.savefig(figdatapath + '/FigSM2_Letter.pdf')
# # # # # # #############################################################################################################################
# # # # # # FIG SM3 - LETTER
# # # # # #############################################################################################################################
# # # # # # #############################################################################################################################
# # # # # # OLD FIGS
# # # # # #############################################################################################################################
# # # # FIG 1 (OLD) - POLARON GRAPHIC + BOGO DISPERSION + PHASE DIAGRAM + DISTRIBUTION PLOTS
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig1 = plt.figure(constrained_layout=False)
# gs1 = fig1.add_gridspec(nrows=2, ncols=1, bottom=0.55, top=0.95, left=0.12, right=0.35, height_ratios=[1, 1])
# gs2 = fig1.add_gridspec(nrows=1, ncols=1, bottom=0.55, top=0.95, left=0.5, right=0.98)
# gs3 = fig1.add_gridspec(nrows=1, ncols=2, bottom=0.08, top=0.4, left=0.12, right=0.96, wspace=0.3)
# ax_pol = fig1.add_subplot(gs1[0], frame_on=False); ax_pol.get_xaxis().set_visible(False); ax_pol.get_yaxis().set_visible(False)
# ax_bogo = fig1.add_subplot(gs1[1])
# ax_PD = fig1.add_subplot(gs2[0])
# ax_supDist = fig1.add_subplot(gs3[0])
# ax_subDist = fig1.add_subplot(gs3[1])
# fig1.text(0.01, 0.97, '(a)', fontsize=labelsize)
# fig1.text(0.01, 0.75, '(b)', fontsize=labelsize)
# fig1.text(0.43, 0.97, '(c)', fontsize=labelsize)
# fig1.text(0.01, 0.42, '(d)', fontsize=labelsize)
# fig1.text(0.51, 0.42, '(e)', fontsize=labelsize)
# # POLARON GRAPHIC
# polimg = mpimg.imread('images/PolaronGraphic.png')
# imgplot = ax_pol.imshow(polimg)
# # BOGOLIUBOV DISPERSION (SPHERICAL)
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds.coords['k'].values); kgrid.initArray_premade('th', qds.coords['th'].values)
# kVals = kgrid.getArray('k')
# wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
# ax_bogo.plot(kVals, wk_Vals, 'k-', label='')
# ax_bogo.plot(kVals, nu * kVals, 'b--', label=r'$c|k|$')
# ax_bogo.set_xlabel(r'$|k|$', fontsize=labelsize)
# ax_bogo.set_ylabel(r'$\omega_{|k|}$', fontsize=labelsize)
# ax_bogo.set_xlim([0, 2])
# ax_bogo.xaxis.set_major_locator(plt.MaxNLocator(2))
# ax_bogo.set_ylim([0, 3])
# ax_bogo.yaxis.set_major_locator(plt.MaxNLocator(3))
# ax_bogo.legend(loc=2, fontsize=legendsize)
# # PHASE DIAGRAM (SPHERICAL)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).isel(t=-1).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# # Pcrit[aind] = Pinf_Vals[np.argwhere(Einf_2ndderiv_Vals < 0)[-2][0] + 3]
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0] # there is a little bit of fudging with the -3 here so that aIBi=-10 gives me Pcrit/(mI*c) = 1 -> I can also just generate data for weaker interactions and see if it's better
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# scalefac = 1.0
# # scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# xmin = np.min(aIBi_interpVals / xi)
# xmax = 1.01 * np.max(aIBi_interpVals / xi)
# ymin = 0
# ymax = 1.01 * np.max(Pcrit_interpVals)
# font = {'family': 'serif', 'color': 'black', 'size': legendsize}
# sfont = {'family': 'serif', 'color': 'black', 'size': legendsize - 1}
# ax_PD.plot(aIBi_Vals / xi, Pcrit_norm, 'kx')
# ax_PD.plot(aIBi_interpVals / xi, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax_PD.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax_PD.set_xlabel(r'$a_{IB}^{-1}$ [$\xi$]', fontsize=labelsize)
# ax_PD.set_ylabel(r'Total Momentum $P$ [$m_{I}c$]', fontsize=labelsize)
# ax_PD.set_xlim([xmin, xmax])
# ax_PD.set_ylim([ymin, ymax])
# ax_PD.fill_between(aIBi_interpVals / xi, Pcrit_interpVals, ymax, facecolor='b', alpha=0.25)
# ax_PD.fill_between(aIBi_interpVals / xi, ymin, Pcrit_interpVals, facecolor='g', alpha=0.25)
# ax_PD.text(-3.2, ymin + 0.175 * (ymax - ymin), 'Polaron', fontdict=font)
# ax_PD.text(-3.1, ymin + 0.1 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# # ax_PD.text(-6.5, ymin + 0.6 * (ymax - ymin), 'Cherenkov', fontdict=font)
# # ax_PD.text(-6.35, ymin + 0.525 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# ax_PD.text(-12.8, ymin + 0.86 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax_PD.text(-12.65, ymin + 0.785 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# supDist_coords = [-5.0 / xi, 3.0] # is [aIBi/xi, P/(mI*c)]
# subDist_coords = [-5.0 / xi, 0.5] # is [aIBi/xi, P/(mI*c)]
# ax_PD.plot(supDist_coords[0], supDist_coords[1], linestyle='', marker='8', mec='#8f1402', mfc='#8f1402', ms=10)
# ax_PD.plot(subDist_coords[0], subDist_coords[1], linestyle='', marker='8', mec='#8f1402', mfc='#8f1402', ms=10)
# # IMPURITY DISTRIBUTION (CARTESIAN)
# GaussianBroadening = True; sigma = 0.1
# incoh_color = '#8f1402'
# delta_color = '#bf9005'
# def GPDF(xVals, mean, stdev):
# return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# aIBi = -5
# qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pnorm = PVals / (mI * nu)
# Pratio_sup = 3.0; Pind_sup = np.abs(Pnorm - Pratio_sup).argmin()
# Pratio_sub = 0.5; Pind_sub = np.abs(Pnorm - Pratio_sub).argmin()
# print(Pnorm[Pind_sup], Pnorm[Pind_sub])
# print(nPIm_deltaPeak_Vals[Pind_sup], nPIm_deltaPeak_Vals[Pind_sub])
# ax_supDist.plot(PIm_Vec[Pind_sup] / (mI * nu), nPIm_Vec[Pind_sup], color=incoh_color, lw=0.5, label='Incoherent Part')
# ax_supDist.set_xlim([-0.01, 10])
# ax_supDist.set_ylim([0, 1.05])
# ax_supDist.set_ylabel(r'$n_{|\vec{P_{I}}|}$', fontsize=labelsize)
# ax_supDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# ax_supDist.fill_between(PIm_Vec[Pind_sup] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sup], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sup = PVals[Pind_sup] / (mI * nu)
# deltaPeak_sup = nPIm_deltaPeak_Vals[Pind_sup]
# PIm_norm_sup = PIm_Vec[Pind_sup] / (mI * nu)
# delta_GB_sup = deltaPeak_sup * GPDF(PIm_norm_sup, Pnorm_sup, sigma)
# # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label='')
# ax_supDist.fill_between(PIm_norm_sup, np.zeros(PIm_norm_sup.size), delta_GB_sup, facecolor=delta_color, alpha=0.25)
# else:
# ax_supDist.plot((PVals[Pind_sup] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sup], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# ax_supDist.legend(loc=1, fontsize=legendsize)
# ax_subDist.plot(PIm_Vec[Pind_sub] / (mI * nu), nPIm_Vec[Pind_sub], color=incoh_color, lw=0.5, label='Incoherent Part')
# # ax_subDist.set_xlim([-0.01, np.max(PIm_Vec[Pind_sub] / (mI*nu))])
# ax_subDist.set_xlim([-0.01, 10])
# ax_subDist.set_ylim([0, 1.05])
# ax_subDist.set_ylabel(r'$n_{|\vec{P_{I}}|}$', fontsize=labelsize)
# ax_subDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# ax_subDist.fill_between(PIm_Vec[Pind_sub] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sub], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm_sub = PVals[Pind_sub] / (mI * nu)
# deltaPeak_sub = nPIm_deltaPeak_Vals[Pind_sub]
# PIm_norm_sub = PIm_Vec[Pind_sub] / (mI * nu)
# delta_GB_sub = deltaPeak_sub * GPDF(PIm_norm_sub, Pnorm_sub, sigma)
# ax_subDist.plot(PIm_norm_sub, delta_GB_sub, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# ax_subDist.fill_between(PIm_norm_sub, np.zeros(PIm_norm_sub.size), delta_GB_sub, facecolor=delta_color, alpha=0.25)
# else:
# ax_subDist.plot((PVals[Pind_sub] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sub], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# ax_subDist.legend(loc=1, fontsize=legendsize)
# fig1.set_size_inches(7.8, 9)
# fig1.savefig(figdatapath + '/Fig1.pdf')
# # # # FIG 2 - ENERGY DERIVATIVES + SOUND VELOCITY + EFFECTIVE MASS
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# fig2 = plt.figure(constrained_layout=False)
# # gs1 = fig2.add_gridspec(nrows=3, ncols=1, bottom=0.12, top=0.925, left=0.12, right=0.40, hspace=1.0)
# # gs2 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.12, top=0.925, left=0.58, right=0.98, hspace=0.7)
# gs1 = fig2.add_gridspec(nrows=3, ncols=1, bottom=0.12, top=0.95, left=0.12, right=0.40, hspace=0.2)
# gs2 = fig2.add_gridspec(nrows=2, ncols=1, bottom=0.12, top=0.95, left=0.58, right=0.98, hspace=0.1)
# ax_GSE0 = fig2.add_subplot(gs1[0])
# ax_GSE1 = fig2.add_subplot(gs1[1])
# ax_GSE2 = fig2.add_subplot(gs1[2])
# ax_Vel = fig2.add_subplot(gs2[0])
# ax_Mass = fig2.add_subplot(gs2[1])
# # fig2.text(0.01, 0.95, '(a)', fontsize=labelsize)
# # fig2.text(0.01, 0.65, '(b)', fontsize=labelsize)
# # fig2.text(0.01, 0.32, '(c)', fontsize=labelsize)
# # fig2.text(0.47, 0.95, '(d)', fontsize=labelsize)
# # fig2.text(0.47, 0.47, '(e)', fontsize=labelsize)
# fig2.text(0.01, 0.95, '(a)', fontsize=labelsize)
# fig2.text(0.01, 0.65, '(b)', fontsize=labelsize)
# fig2.text(0.01, 0.37, '(c)', fontsize=labelsize)
# fig2.text(0.47, 0.95, '(d)', fontsize=labelsize)
# fig2.text(0.47, 0.52, '(e)', fontsize=labelsize)
# # # ENERGY DERIVATIVES (SPHERICAL)
# aIBi = -5
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# print(aIBi * xi)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals = np.zeros((PVals.size, tVals.size))
# for Pind, P in enumerate(PVals):
# for tind, t in enumerate(tVals):
# CSAmp = CSAmp_ds.sel(P=P, t=t).values
# Energy_Vals[Pind, tind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Energy_Vals_inf = Energy_Vals[:, -1]
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 5 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound, vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# ms_mask = Pinf_Vals <= 0.5
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms, ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# Ecrit = Einf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals))]
# # ax_GSE0.plot(Pinf_Vals / (mI * nu), Einf_Vals / np.abs(Ecrit), 'k-', lw=1.5)
# ax_GSE0.plot(Pinf_Vals[::2] / (mI * nu), Einf_Vals[::2] / np.abs(Ecrit), 'ko', ms=6)
# # ax_GSE0.set_title('Ground State Energy (' + r'$a_{IB}^{-1}=$' + '{0})'.format(aIBi))
# # ax_GSE0.set_xlabel(r'$P$ [$m_{I}c$]', fontsize=labelsize)
# ax_GSE0.set_ylabel(r'$E$', fontsize=labelsize)
# ax_GSE0.set_ylim([1.1 * np.min(Einf_Vals / np.abs(Ecrit)), -0.5 / np.abs(Ecrit)])
# ax_GSE0.set_xlim([0, 2.0])
# # ax_GSE1.plot(Pinf_Vals / (mI * nu), Einf_1stderiv_Vals / np.abs(Ecrit), 'k-', lw=1.5)
# ax_GSE1.plot(Pinf_Vals[::2] / (mI * nu), Einf_1stderiv_Vals[::2] / np.abs(Ecrit), 'ko', ms=6)
# # ax_GSE1.set_title('First Derivative of Energy')
# # ax_GSE1.set_xlabel(r'$P$ [$m_{I}c$]', fontsize=labelsize)
# ax_GSE1.set_ylabel(r'$dE/dP$', fontsize=labelsize)
# ax_GSE1.plot(Pinf_Vals / (mI * nu), vsound * np.ones(Pinf_Vals.size) / np.abs(Ecrit), color=red, linestyle='--', linewidth=2.0)
# ax_GSE1.set_ylim([0, 1.2 * np.max(Einf_1stderiv_Vals / np.abs(Ecrit))])
# ax_GSE1.set_xlim([0, 2.0])
# # ax_GSE2.plot(Pinf_Vals / (mI * nu), Einf_2ndderiv_Vals / np.abs(Ecrit), 'k-', lw=1.5)
# ax_GSE2.plot(Pinf_Vals[::2] / (mI * nu), Einf_2ndderiv_Vals[::2] / np.abs(Ecrit), 'ko', ms=6)
# # ax_GSE2.set_title('Second Derivative of Energy')
# ax_GSE2.set_xlabel(r'$P/(m_{I}c)$', fontsize=labelsize)
# ax_GSE2.set_ylabel(r'$d^{2}E/dP^{2}$', fontsize=labelsize)
# ax_GSE2.plot(Pinf_Vals / (mI * nu), ms * np.ones(Pinf_Vals.size) / np.abs(Ecrit), color=blue, linestyle='--', linewidth=2.0)
# ax_GSE2.set_ylim([-.12, 1.2 * np.max(Einf_2ndderiv_Vals / np.abs(Ecrit))])
# ax_GSE2.set_xlim([0, 2.0])
# # including a Pcrit line
# Pcrit = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0]
# # Pcrit_2 = Pinf_Vals[sound_mask][0]; print(Pcrit, Pcrit_2)
# ax_GSE0.axvline(x=Pcrit / (mI * nu), linestyle=':', color=green, lw=2)
# ax_GSE1.axvline(x=Pcrit / (mI * nu), linestyle=':', color=green, lw=2)
# ax_GSE2.axvline(x=Pcrit / (mI * nu), linestyle=':', color=green, lw=2)
# # # POLARON SOUND VELOCITY (SPHERICAL)
# # Check to see if linear part of polaron (total system) energy spectrum has slope equal to sound velocity
# vsound_Vals = np.zeros(aIBi_Vals.size)
# vI_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound_Vals[aind], vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# vI_inf_tck = interpolate.splrep(PVals, PI_Vals / mI, s=0)
# vI_inf_Vals = 1 * interpolate.splev(Pinf_Vals, vI_inf_tck, der=0)
# vI_Vals[aind] = np.polyfit(Pinf_sound, vI_inf_Vals[sound_mask], deg=0)
# print(vsound_Vals)
# print(100 * (vsound_Vals - nu) / nu)
# ax_Vel.plot(aIBi_Vals * xi, vsound_Vals / nu, linestyle='None', mec=red, mfc=red, marker='x', mew=1, ms=10, label='Polaron')
# ax_Vel.plot(aIBi_Vals * xi, vI_Vals / nu, 'ko', mew=1, ms=10, markerfacecolor='none', label='Impurity')
# ax_Vel.plot(aIBi_Vals * xi, np.ones(aIBi_Vals.size), color='grey', linestyle='dashdot', linewidth=2.0, label='$c$')
# ax_Vel.set_ylim([0.5, 1.25])
# # ax_Vel.set_ylim([0.8, 1.25])
# ax_Vel.legend(loc=(0.25, 0.1), fontsize=legendsize)
# # ax_Vel.set_xlabel(r'$a_{IB}^{-1}$ [$\xi$]', fontsize=labelsize)
# ax_Vel.set_ylabel(r'Velocity', fontsize=labelsize)
# # # POLARON EFFECTIVE MASS (SPHERICAL)
# ms_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# massEnhancement_Vals = (1 / ms_Vals) / mI
# mE_tck = interpolate.splrep(aIBi_Vals, massEnhancement_Vals, s=0)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# mE_interpVals = 1 * interpolate.splev(aIBi_interpVals, mE_tck, der=0)
# ax_Mass.plot(aIBi_Vals * xi, massEnhancement_Vals, linestyle='None', marker='D', mec=blue, mfc=blue, mew=1, ms=5)
# ax_Mass.plot(aIBi_interpVals * xi, mE_interpVals, color=blue, linestyle='-')
# ax_Mass.set_xlabel(r'$a_{\rm IB}^{-1}/\xi^{-1}$', fontsize=labelsize)
# # ax_Mass.set_ylabel(r'$\frac{m^{*}}{m_{I}} = \frac{1}{m_{I}}\frac{\partial^{2} E}{\partial P^{2}}$')
# ax_Mass.set_ylabel(r'Effective Mass', fontsize=labelsize)
# ax_GSE0.xaxis.set_ticklabels([])
# ax_GSE1.xaxis.set_ticklabels([])
# ax_Vel.xaxis.set_ticklabels([])
# ax_GSE0.set_xticks([0.0, 1.0, 2.0])
# ax_GSE1.set_xticks([0.0, 1.0, 2.0])
# ax_GSE2.set_xticks([0.0, 1.0, 2.0])
# ax_GSE0.tick_params(direction='in', right=True, top=True)
# ax_GSE1.tick_params(direction='in', right=True, top=True)
# ax_GSE2.tick_params(direction='in', right=True, top=True)
# ax_Vel.tick_params(direction='in', right=True, top=True)
# ax_Mass.tick_params(direction='in', right=True, top=True)
# vel_coords = [2, vsound / np.abs(Ecrit)]
# effM_coords = [2, ms / np.abs(Ecrit)]
# con_vel = ConnectionPatch(xyA=(vel_coords[0], vel_coords[1]), xyB=(-11, 1.0), coordsA="data", coordsB="data", axesA=ax_GSE1, axesB=ax_Vel, color=red, linestyle='dashed', lw=0.5)
# con_effM = ConnectionPatch(xyA=(effM_coords[0], effM_coords[1]), xyB=(-11, 1.92), coordsA="data", coordsB="data", axesA=ax_GSE2, axesB=ax_Mass, color=blue, linestyle='dashed', lw=0.5)
# ax_GSE1.add_artist(con_vel)
# ax_GSE2.add_artist(con_effM)
# fig2.set_size_inches(7.8, 5.0)
# fig2.savefig(figdatapath + '/Fig2.pdf')
# # FIG 3 - IMPURITY DISTRIBUTION WITH CHARACTERIZATION (CARTESIAN)
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# GaussianBroadening = True; sigma = 0.1
# incoh_color = green
# delta_color = base02
# fwhm_color = red
# def GPDF(xVals, mean, stdev):
# return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# aIBi = -2
# print('int: {0}'.format(aIBi * xi))
# qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# PVals = qds_aIBi['P'].values
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# # # Plot nPIm(t=inf)
# # qds_nPIm_inf.plot(ax=ax, label='P: {:.1f}'.format(P))
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pratio = 1.4
# Pnorm = PVals / (mI * nu)
# Pind = np.abs(Pnorm - Pratio).argmin()
# print(Pnorm[Pind], aIBi / xi)
# print(nPIm_deltaPeak_Vals[Pind])
# fig3, axes3 = plt.subplots(nrows=1, ncols=3)
# ind_s, ind_f = nPIm_FWHM_indices[Pind]
# ind_f = ind_f - 1 # this is just to make the FWHM marker on the plot look a little cleaner
# axes3[0].plot(PIm_Vec[Pind] / (mI * nu), nPIm_Vec[Pind], color=incoh_color, lw=1.0, label='Incoherent Part')
# axes3[0].set_xlim([-0.01, 10])
# axes3[0].set_ylim([0, 1.05])
# axes3[0].set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# axes3[0].set_xlabel(r'$|\mathbf{P}_{\rm imp}|/(m_{I}c)$', fontsize=labelsize)
# axes3[0].fill_between(PIm_Vec[Pind] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind], facecolor=incoh_color, alpha=0.25)
# if GaussianBroadening:
# Pnorm = PVals[Pind] / (mI * nu)
# deltaPeak = nPIm_deltaPeak_Vals[Pind]
# PIm_norm = PIm_Vec[Pind] / (mI * nu)
# delta_GB = deltaPeak * GPDF(PIm_norm, Pnorm, sigma)
# axes3[0].plot(PIm_norm, delta_GB, linestyle='-', color=delta_color, linewidth=1, label='Delta Peak')
# axes3[0].fill_between(PIm_norm, np.zeros(PIm_norm.size), delta_GB, facecolor=delta_color, alpha=0.25)
# else:
# axes3[0].plot((PVals[Pind] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak Weight (Z-factor)')
# # axes3[0].legend(loc=1, fontsize=legendsize)
# axes3[0].plot(np.linspace(PIm_Vec[Pind][ind_s] / (mI * nu), PIm_Vec[Pind][ind_f] / (mI * nu), 100), nPIm_Vec[Pind][ind_s] * np.ones(100), linestyle='-', color=fwhm_color, linewidth=2.0, label='Incoherent Part FWHM')
# axes3[0].plot(np.linspace(PIm_Vec[Pind][ind_s] / (mI * nu), PIm_Vec[Pind][ind_f] / (mI * nu), 2), nPIm_Vec[Pind][ind_s] * np.ones(2), marker='D', color=fwhm_color, mew=0.5, ms=4, label='')
# axes3[1].plot(PVals / (mI * nu), nPIm_deltaPeak_Vals, linestyle='-', color=delta_color)
# axes3[1].set_xlabel(r'$P/(m_{I}c)$', fontsize=labelsize)
# axes3[1].set_ylabel(r'Quasiparticle Residue ($Z$)', fontsize=labelsize)
# axes3[2].plot(PVals / (mI * nu), nPIm_FWHM_Vals, linestyle='-', color=fwhm_color)
# axes3[2].set_xlabel(r'$P/(m_{I}c)$', fontsize=labelsize)
# axes3[2].set_ylabel('Incoherent Part FWHM', fontsize=labelsize)
# axes3[2].set_ylim([0.5, 2.5])
# axes3[2].yaxis.set_major_locator(plt.MaxNLocator(4))
# axes3[0].tick_params(direction='in', right=True, top=True)
# axes3[1].tick_params(direction='in', right=True, top=True)
# axes3[2].tick_params(direction='in', right=True, top=True)
# fig3.text(0.01, 0.95, '(a)', fontsize=labelsize)
# fig3.text(0.33, 0.95, '(b)', fontsize=labelsize)
# fig3.text(0.66, 0.95, '(c)', fontsize=labelsize)
# fig3.subplots_adjust(left=0.1, bottom=0.17, top=0.91, right=0.98, wspace=0.6)
# fig3.set_size_inches(7.8, 3.5)
# fig3.savefig(figdatapath + '/Fig3.pdf')
# # plt.show()
| 51.332469 | 257 | 0.626985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70,688 | 0.892458 |
7e28c7fed222b2d6fc00b0669d8343daf9331941 | 286 | py | Python | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | 2 | 2021-03-12T00:42:13.000Z | 2021-05-24T06:31:13.000Z | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | null | null | null | demo/onlyuserrole/demo/urls.py | tangdyy/onlyuserclient | d93b4e1077afda6b58bba002729f6bc89b988c7a | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.urls import path,include
from rest_framework import routers
from .views import RoleViewSet
router = routers.DefaultRouter()
router.register(r'roles', RoleViewSet, basename='role')
urlpatterns = [
url(r'^', include(router.urls)),
]
| 22 | 55 | 0.755245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.062937 |
7e28d10904cede451441c32ece55951a633841a9 | 547 | py | Python | data_structures/heap/heap_using_heapq.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 1,723 | 2019-07-30T07:06:22.000Z | 2022-03-31T15:22:22.000Z | data_structures/heap/heap_using_heapq.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 213 | 2019-10-06T08:07:47.000Z | 2021-10-04T15:38:36.000Z | data_structures/heap/heap_using_heapq.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 628 | 2019-10-06T10:26:25.000Z | 2022-03-31T01:41:00.000Z | """
Heap in python using heapq library function
Note: by default, heapq creates a min-heap. To make it a
max-heap, add items after multiplying them by -1
"""
from heapq import heappop, heappush, heapify
heap = []
heapify(heap)
heappush(heap, 10)
heappush(heap, 11)
heappush(heap, 2)
heappush(heap, 4)
heappush(heap, 14)
heappush(heap, 1)
print('first element - ', heap[0])
print('popping min element - ', heappop(heap))
print('first element - ', heap[0])
# Heap prints as an array and can be access using indexes
print(heap)
print(heap[2])
| 20.259259 | 57 | 0.714808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.50457 |
7e29ac378ea3d06f25904da8b4888983e79680b8 | 3,161 | py | Python | paracept.py | andrinethomas/Tensorflow-vehicle-detection-using-camera-and-db-accessing-mysql | 4d76cc24a9ea67bbf16409f8ce30ec373b55f969 | [
"FTL",
"CNRI-Python"
] | 1 | 2021-07-07T14:12:37.000Z | 2021-07-07T14:12:37.000Z | paracept.py | andrinethomas/Tensorflow-vehicle-detection-using-camera-and-db-accessing-mysql | 4d76cc24a9ea67bbf16409f8ce30ec373b55f969 | [
"FTL",
"CNRI-Python"
] | null | null | null | paracept.py | andrinethomas/Tensorflow-vehicle-detection-using-camera-and-db-accessing-mysql | 4d76cc24a9ea67bbf16409f8ce30ec373b55f969 | [
"FTL",
"CNRI-Python"
] | 1 | 2019-12-02T11:17:42.000Z | 2019-12-02T11:17:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 17:55:47 2018
@author: tensorflow-cuda
"""
import numpy as np
import os
import sys
import tensorflow as tf
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pytesseract
from custom_plate import allow_needed_values as anv
from custom_plate import do_image_conversion as dic
from custom_plate import sql_inserter_fetcher as sif
sys.path.append("..")
MODEL_NAME = 'numplate'
PATH_TO_CKPT = MODEL_NAME + '/graph-200000/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')
NUM_CLASSES = 1
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
pytesseract.tesseract_cmd = '/home/tensorflow-cuda/dharun_custom/models/research/object_detection/tessdata/'
def accept_and_die(image_path, dandt):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
ymin = boxes[0,0,0]
xmin = boxes[0,0,1]
ymax = boxes[0,0,2]
xmax = boxes[0,0,3]
(im_width, im_height) = image.size
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
cropped_image = tf.image.crop_to_bounding_box(image_np, int(yminn), int(xminn),int(ymaxx - yminn), int(xmaxx - xminn))
img_data = sess.run(cropped_image)
count = 0
filename = dic.yo_make_the_conversion(img_data, count)
count+=1
text = pytesseract.image_to_string(Image.open(filename),lang=None)
yo = anv.catch_rectify_plate_characters(text)
print('CHARACTER RECOGNITION : ',yo)
if yo!='':
# sif.store_whatever(yo, dandt)
os.rename(image_path, "folder_processings/images/copy/{}.jpg".format(dandt))
os.remove(image_path)
return yo
| 39.024691 | 130 | 0.672572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.156596 |
7e2bbd84e61191db51ca260c5490c7e1b2095162 | 2,245 | py | Python | test/test_words.py | dubmix3105/pysh | dbd14ee8445a983ee6c736e4041e47ddfa4edacc | [
"MIT"
] | 7 | 2019-07-15T14:32:23.000Z | 2021-08-08T01:17:05.000Z | test/test_words.py | dubmix3105/pysh | dbd14ee8445a983ee6c736e4041e47ddfa4edacc | [
"MIT"
] | null | null | null | test/test_words.py | dubmix3105/pysh | dbd14ee8445a983ee6c736e4041e47ddfa4edacc | [
"MIT"
] | 1 | 2021-01-02T04:13:43.000Z | 2021-01-02T04:13:43.000Z | import sys
import pytest
from pysh import shwords, shwords_f
def test_conversions():
with pytest.raises(ValueError):
shwords('{:{}}', 1, 2)
assert '{:{}}'.format(1, 2) == ' 1' # by contrast
def test_multiword():
assert shwords('touch {!@}', ['a', 'b']) \
== ['touch', 'a', 'b']
with pytest.raises(ValueError):
shwords('a b{!@}', ['x'])
with pytest.raises(ValueError):
shwords('a {!@}c', ['x'])
with pytest.raises(ValueError):
shwords('a {!@}{}', ['b'], 'c')
assert shwords('touch {!@} c', ['a', 'b']) \
== ['touch', 'a', 'b', 'c']
def test_splitting():
assert shwords('git grep {}', 'hello world') \
== ['git', 'grep', 'hello world']
assert shwords('{} {} {}', 'a', 'b c', 'd') \
== ['a', 'b c', 'd']
assert shwords(' a {} c ', 'b') \
== ['a', 'b', 'c']
assert shwords('tar -C {outdir} -xzf {tarball}',
outdir='/path/with/spaces in it',
tarball='2019 Planning (final) (v2) (final final).tgz') \
== ['tar', '-C', '/path/with/spaces in it', '-xzf', '2019 Planning (final) (v2) (final final).tgz']
def test_within_word():
assert shwords('git log --format={}', '%aN') \
== ['git', 'log', '--format=%aN']
assert shwords('{basedir}/deployments/{deploy_id}/bin/start',
basedir='/srv/app', deploy_id='0f1e2d3c') \
== ['/srv/app/deployments/0f1e2d3c/bin/start']
def test_locals():
import pytest
l = ['a', 'b']
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
assert shwords_f('touch {l[1]}') \
== ['touch', 'b']
assert shwords_f('echo {pytest.__name__}') \
== ['echo', 'pytest']
# Known limitation: locals only, no globals...
with pytest.raises(KeyError, match='sys'):
shwords_f('echo {sys}')
# (unlike real, compiler-assisted f-strings)
assert f'{sys}' \
== "<module 'sys' (built-in)>"
# ... and enclosing scopes' locals are complicated.
def inner1():
with pytest.raises(KeyError):
return shwords_f('touch {l!@}')
inner1()
def inner2():
l
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
inner2()
def inner3():
nonlocal l
assert shwords_f('touch {l!@}') \
== ['touch', 'a', 'b']
inner3()
| 27.716049 | 117 | 0.535857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.419154 |
7e2c4a41a01548c788b1f56baad24c0343c82cf7 | 753 | py | Python | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | 1 | 2020-01-10T21:51:46.000Z | 2020-01-10T21:51:46.000Z | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | slender/tests/dictionary/test_contain.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase, skip
from expects import *
from slender import Dictionary
class TestContain(TestCase):
def setUp(self):
self.key = 'a'
def test_contain_if_dictionary_is_empty(self):
d1 = Dictionary[str, int]({})
expect(self.key in d1).to(be_false)
def test_contain_if_dictionary_not_contains_key(self):
d1 = Dictionary[str, int]({'b' : 2, 'c' : 3})
expect(self.key in d1).to(be_false)
def test_contain_if_dictionary_contains_key(self):
d1 = Dictionary[str, int]({'a': 1, 'b' : 20, 'c' : 3})
expect(self.key in d1).to(be_true)
def test_contain_if_negate(self):
d1 = Dictionary[str, int]({'b' : 2})
expect(self.key not in d1).to(be_true)
| 26.892857 | 62 | 0.636122 | 658 | 0.873838 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.027888 |
7e2cb45d24767d8a20fc314235185561540eae21 | 490 | py | Python | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | 2 | 2018-08-20T04:38:06.000Z | 2019-02-03T07:48:28.000Z | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | Python/0263_ugly_number.py | codingyen/CodeAlone | b16653957258ac09f74bb1a380f29dd93e055a44 | [
"MIT"
] | null | null | null | # Time: O(logn) = O(1)
# Space: O(1)
class Solution:
def isUgly(self, num):
if not num:
return False
while num % 2 == 0:
num = num / 2
while num % 3 == 0:
num = num / 3
while num % 5 == 0:
num = num / 5
return num == 1:
#if num == 1:
# return True
#else:
# return False
if __name__ == "__main__":
num = 14
s = Solution()
print(s.isUgly(num)) | 17.5 | 27 | 0.418367 | 366 | 0.746939 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.197959 |
7e2dc1431668b0a5582c7ad9343a0741d27a1ebf | 386 | py | Python | execute_sweep.py | kinoai/skyhacks2020 | 5615046d4ca6fbae0c9c37a43b27de57ecc95fe5 | [
"MIT"
] | null | null | null | execute_sweep.py | kinoai/skyhacks2020 | 5615046d4ca6fbae0c9c37a43b27de57ecc95fe5 | [
"MIT"
] | null | null | null | execute_sweep.py | kinoai/skyhacks2020 | 5615046d4ca6fbae0c9c37a43b27de57ecc95fe5 | [
"MIT"
] | null | null | null | import wandb
import main
# Load project config
config = main.load_config()
# Initialize wandb
wandb.init()
# Replace project config hyperparameters with the ones loaded from wandb sweep server
sweep_hparams = wandb.Config._as_dict(wandb.config)
for key, value in sweep_hparams.items():
if key != "_wandb":
config["hparams"][key] = value
# Execute run
main.main(config)
| 20.315789 | 85 | 0.738342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.398964 |
7e2e521370325b840ed1d98818e4306df758c1b7 | 1,720 | py | Python | setup.py | paoloelena15/gutenberg | dd6da5c1d8768737d3403fd391101ad5a901400b | [
"Apache-2.0"
] | null | null | null | setup.py | paoloelena15/gutenberg | dd6da5c1d8768737d3403fd391101ad5a901400b | [
"Apache-2.0"
] | null | null | null | setup.py | paoloelena15/gutenberg | dd6da5c1d8768737d3403fd391101ad5a901400b | [
"Apache-2.0"
] | null | null | null | """Library installer."""
from __future__ import absolute_import, unicode_literals
from platform import system
from sys import version_info
import codecs
from setuptools import find_packages
from setuptools import setup
install_requires = [
'future>=0.15.2',
'rdflib>=4.2.0',
'requests>=2.5.1',
'six>=1.10.0',
'setuptools>=18.5',
'rdflib-sqlalchemy>=0.3.8',
'SPARQLWrapper>=1.8.2',
]
if version_info.major == 2:
install_requires.extend([
'functools32>=3.2.3-2',
])
if version_info.major == 3 or system() == 'Darwin':
install_requires.extend([
'bsddb3>=6.1.0',
])
with codecs.open('README.rst', encoding='utf-8') as fobj:
long_description = fobj.read()
setup(
name='Gutenberg',
version='0.8.0',
author='Clemens Wolff',
author_email='clemens.wolff+pypi@gmail.com',
packages=find_packages(exclude=['tests']),
url='https://github.com/c-w/Gutenberg',
download_url='https://pypi.python.org/pypi/Gutenberg',
license='Apache Software License',
description='Library to interface with Project Gutenberg',
long_description=long_description,
install_requires=sorted(install_requires),
python_requires='>=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities'
])
| 29.152542 | 70 | 0.634884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 825 | 0.479651 |
7e2f328c32199a0d821d457fa05c20e5d98cdc91 | 9,641 | py | Python | deep_sdf_prior.py | nicolaihaeni/shapenet-pyrender | 2d5054f33530257301193c506663e4dc4db8d85a | [
"Apache-2.0"
] | null | null | null | deep_sdf_prior.py | nicolaihaeni/shapenet-pyrender | 2d5054f33530257301193c506663e4dc4db8d85a | [
"Apache-2.0"
] | null | null | null | deep_sdf_prior.py | nicolaihaeni/shapenet-pyrender | 2d5054f33530257301193c506663e4dc4db8d85a | [
"Apache-2.0"
] | null | null | null | # Top of main python script
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import sys
import random
import argparse
import numpy as np
import trimesh
import imageio
import open3d as o3d
from mathutils import Matrix
import h5py
import json
from mesh_to_sdf import get_surface_point_cloud
import pyrender
import util
np.random.seed(12433)
random.seed(12433)
train_categories = [
"04379243",
"02958343",
"03001627",
"02691156",
"04256520",
"04090263",
"03636649",
"04530566",
"02828884",
"03691459",
"02933112",
"03211117",
"04401088",
]
val_categories = [
"02924116",
"02808440",
"03467517",
"03325088",
"03046257",
"03991062",
"03593526",
"02876657",
"02871439",
"03642806",
"03624134",
"04468005",
"02747177",
"03790512",
"03948459",
"03337140",
"02818832",
"03928116",
"04330267",
"03797390",
"02880940",
"04554684",
"04004475",
"03513137",
"03761084",
"04225987",
"04460130",
"02942699",
"02801938",
"02946921",
"03938244",
"03710193",
"03207941",
"04099429",
"02773838",
"02843684",
"03261776",
"03759954",
"04074963",
"03085013",
"02992529",
"02954340",
]
p = argparse.ArgumentParser(
description="Renders given obj file by rotation a camera around it."
)
p.add_argument(
"--data_dir",
type=str,
default="/labdata/nicolai/data/ShapeNetCore.v2",
help="Data directory containing meshes.",
)
p.add_argument(
"--output_dir",
type=str,
default="./images",
help="The path the output will be dumped to.",
)
p.add_argument(
"--num_views",
type=int,
default=25,
help="Number of images to render",
)
p.add_argument("--resolution", type=int, default=256, help="output image resolution.")
p.add_argument(
"--sphere_radius",
type=float,
default=1.2,
help="Radius of the viewing sphere",
)
p.add_argument("--val", action="store_true", help="Use to render validation split")
p.add_argument(
"--save_png",
action="store_true",
help="Save output images for visualization",
)
p.add_argument(
"--show_3d",
action="store_true",
help="Save output images for visualization",
)
def normalize_mesh(mesh):
# Center the mesh
matrix = np.eye(4)
bounds = mesh.bounds
centroid = (bounds[1, :] + bounds[0, :]) / 2
matrix[:3, -1] = -centroid
mesh.apply_transform(matrix)
# Scale the model to unit diagonal lenght
matrix = np.eye(4)
extents = mesh.extents
diag = np.sqrt(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)
matrix[:3, :3] *= 1.0 / diag
mesh.apply_transform(matrix)
return mesh
def main():
args = p.parse_args()
instance_names = []
shapenet_categories = train_categories + val_categories
folders = sorted(os.listdir(args.data_dir))
for cat in shapenet_categories:
path = os.path.join(args.data_dir, cat)
new_instances = [
os.path.join(cat, f)
for f in sorted(os.listdir(path))
if os.path.isdir(os.path.join(path, f))
]
instance_names = instance_names + new_instances
instance_names = instance_names[0:10000]
if len(instance_names) == 0:
print("Data dir does not contain any instances")
raise NotImplementedError
# instance_names = instance_names[32000:]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print(f"Number of files: {len(instance_names)}")
# Load n meshes
count = 0
mesh_errors = {}
for instance_name in instance_names:
runtime_error = False
category, instance_name = instance_name.split("/")
if os.path.exists(os.path.join(args.output_dir, f"{instance_name}.h5")):
continue
try:
mesh = trimesh.load(
os.path.join(
args.data_dir,
category,
instance_name,
"models",
"model_normalized.obj",
),
force="mesh",
)
except ValueError:
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
print(f"ValueError with instance {instance_name}. Skipping....")
continue
# Normalize the mesh to unit diagonal
mesh = normalize_mesh(mesh)
cam_locations = util.sample_spherical(args.num_views, args.sphere_radius)
obj_location = np.zeros((1, 3))
cv_poses = util.look_at(cam_locations, obj_location)
cam_locations = [util.cv_cam2world_to_bcam2world(m) for m in cv_poses]
image_size = (args.resolution, args.resolution)
K = np.array([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])
camera = pyrender.IntrinsicsCamera(
fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=0.01, zfar=100
)
rgbs = []
depths = []
masks = []
c2ws = []
normals = []
scene = pyrender.Scene.from_trimesh_scene(
trimesh.Scene(mesh), ambient_light=(1, 1, 1)
)
for ii, w2c in enumerate(cam_locations):
# Add camera roll
theta = random.random() * np.pi
roll_matrix = Matrix(
(
(np.cos(theta), -np.sin(theta), 0, 0),
(np.sin(theta), np.cos(theta), 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
)
)
w2c = roll_matrix @ w2c
if ii == 0:
cam_node = scene.add(camera, pose=np.array(w2c))
else:
scene.set_pose(cam_node, pose=np.array(w2c))
try:
r = pyrender.OffscreenRenderer(*image_size)
color, depth = r.render(
scene, flags=pyrender.constants.RenderFlags.FLAT
)
if np.all(color == 255):
raise RuntimeError("No texture rendered")
except Exception as e:
print(f"RuntimeError with instance: {instance_name}. Skipping...")
runtime_error = True
r.delete()
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
break
normals.append(util.depth_2_normal(depth, depth == 0.0, K))
mask = depth != 0
w2c = np.array(util.get_world2cam_from_blender_cam(w2c))
rgbs.append(color)
depths.append(depth)
masks.append(mask)
c2ws.append(np.linalg.inv(w2c))
r.delete()
if args.save_png:
imageio.imwrite(
os.path.join(
args.output_dir, f"{instance_name}_{str(ii).zfill(3)}.png"
),
color,
)
if runtime_error:
runtime_error = False
continue
rgbs = np.stack([r for r in rgbs])
# Check if all images are white. If yes, continue without saving the model
depths = np.stack([r for r in depths])
masks = np.stack([r for r in masks])
poses = np.stack([r for r in c2ws])
normals = np.stack([r for r in normals])
# Generate 3D supervision data for the prior
number_of_points = 100000
surface_pcd = get_surface_point_cloud(
mesh, "scan", args.sphere_radius, 100, 400, 10000000, calculate_normals=True
)
pts, sdf = surface_pcd.sample_sdf_near_surface(
number_of_points,
1,
sign_method="normal",
normal_sample_count=11,
min_size=0,
return_gradients=False,
)
sdf_pts = np.concatenate([pts, sdf[:, None]], axis=-1)
if args.show_3d:
colors = np.zeros_like(pts)
colors[:, 0] = 1
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
pcd.colors = o3d.utility.Vector3dVector(colors)
frames = []
for c in c2ws:
frames.append(
o3d.geometry.TriangleMesh.create_coordinate_frame().transform(c)
)
o3d.visualization.draw_geometries(frames + [pcd])
hf = h5py.File(os.path.join(args.output_dir, f"{instance_name}.h5"), "w")
hf.create_dataset("rgb", data=rgbs, compression="gzip", dtype="f")
hf.create_dataset("depth", data=depths, compression="gzip", dtype="f")
hf.create_dataset("mask", data=masks, compression="gzip", dtype="f")
hf.create_dataset("normals", data=normals, compression="gzip", dtype="f")
hf.create_dataset("pose", data=poses, compression="gzip", dtype="f")
hf.create_dataset("K", data=K, dtype="f")
hf.create_dataset("sphere_radius", data=args.sphere_radius, dtype="f")
hf.create_dataset("sdf", data=sdf_pts, compression="gzip", dtype="f")
hf.create_dataset("category", data=category)
hf.close()
count += 1
if count % 100 == 0:
print(f"Generated {count} new instances")
with open(os.path.join(args.output_dir, "failures.json"), "w") as outfile:
json.dump(mesh_errors, outfile)
print("Finished all data generation")
if __name__ == "__main__":
main()
| 28.523669 | 88 | 0.563842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,971 | 0.204439 |
7e2fade8c98465a167e99e3c11e9a3c85426d6ee | 2,913 | py | Python | wiki-preparation/dump_topn.py | bertrandlalo/piaf-code | 2c879590fca92d85bb6a51e13c9603dd2f7c573f | [
"MIT"
] | 8 | 2020-02-07T15:28:15.000Z | 2021-03-25T22:58:21.000Z | wiki-preparation/dump_topn.py | etalab-ia/piaf-code | 86948a73b26498948e70ad92e4b9b0231eac81f7 | [
"MIT"
] | null | null | null | wiki-preparation/dump_topn.py | etalab-ia/piaf-code | 86948a73b26498948e70ad92e4b9b0231eac81f7 | [
"MIT"
] | null | null | null | import struct
import pickle
import sys
class DataInputStream:
"""
Reading from Java DataInputStream format.
"""
def __init__(self, stream):
self.stream = stream
def read_boolean(self):
return struct.unpack('?', self.stream.read(1))[0]
def read_byte(self):
return struct.unpack('b', self.stream.read(1))[0]
def read_unsigned_byte(self):
return struct.unpack('B', self.stream.read(1))[0]
def read_char(self):
return chr(struct.unpack('>H', self.stream.read(2))[0])
def read_double(self):
return struct.unpack('>d', self.stream.read(8))[0]
def read_float(self):
return struct.unpack('>f', self.stream.read(4))[0]
def read_short(self):
return struct.unpack('>h', self.stream.read(2))[0]
def read_unsigned_short(self):
return struct.unpack('>H', self.stream.read(2))[0]
def read_long(self):
return struct.unpack('>q', self.stream.read(8))[0]
def read_utf(self):
utf_length = struct.unpack('>H', self.stream.read(2))[0]
return self.stream.read(utf_length)
def read_int(self):
return struct.unpack('>i', self.stream.read(4))[0]
def main(top_n, path_wiki_pageranks_raw, path_wiki_pagerank_id_title_raw, path_wiki_pagerank_title):
pageranks = []
with open(path_wiki_pageranks_raw, 'rb') as f:
stream = DataInputStream(f)
while True:
try:
val = stream.read_double()
pageranks.append(val)
except struct.error:
print("I am dead")
break
id_title = {}
with open(path_wiki_pagerank_id_title_raw) as f:
for title in f:
page_id = int(f.readline())
id_title[page_id] = title.rstrip()
with open(path_wiki_pagerank_title, 'w') as f:
for page_id, pagerank in enumerate(pageranks):
if pagerank > 0.0 and page_id in id_title:
title = id_title.get(page_id)
f.write('{} \t {} \n '.format(pagerank, title))
with open(path_wiki_pagerank_title) as f:
tuples = [(i.split('\t')[0], i.split('\t')[1])
for i in f.readlines() if i.strip()]
sorted_ = sorted(tuples, key=lambda tup: tup[0])
pickle.dump(sorted_[:top_n], open(f"top_{top_n}.pkl", 'wb'))
if __name__ == "__main__":
if len(sys.argv) < 3:
print(
"Usage: \n dump_topn.py top_N input_path_wikipedia-pageranks.raw"
" input_path_wikipedia-pagerank-id-title.raw"
" output_path_wikipedia-pagerank-title.txt")
exit(1)
top_n = int(sys.argv[1])
path_wiki_pageranks_raw = sys.argv[2]
path_wiki_pagerank_id_title_raw = sys.argv[3]
path_wiki_pagerank_title = sys.argv[4]
main(top_n, path_wiki_pageranks_raw,
path_wiki_pagerank_id_title_raw,
path_wiki_pagerank_title)
| 29.13 | 100 | 0.61174 | 1,157 | 0.397185 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.110882 |
7e30930c06c92e07eeb6ff1012f99ba08b74bc72 | 213 | py | Python | dzdp-server/app/source/job_creator.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | dzdp-server/app/source/job_creator.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | dzdp-server/app/source/job_creator.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | from fetcher.source.fetcher import Fetcher
from fetcher.source.managers.notification import FetcherException
def fetch(config_dict):
f = Fetcher(config_dict)
f.start()
return "Job has been finished"
| 23.666667 | 65 | 0.769953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.107981 |
7e357df698ecec1e1881a2b4bd0ec295d7103a9f | 1,102 | py | Python | users/forms.py | shyam999/Django-blog | 8f59987f362be0edca4e1542f820eb0d82c7bf2c | [
"MIT"
] | 13 | 2020-04-16T14:34:07.000Z | 2021-06-22T04:27:02.000Z | users/forms.py | alexhan-sys/Django-blog | 563d1975d85b2c288d6d58b5af8d54f9ab16541e | [
"MIT"
] | 4 | 2021-03-30T13:00:21.000Z | 2022-01-13T02:30:26.000Z | users/forms.py | alexhan-sys/Django-blog | 563d1975d85b2c288d6d58b5af8d54f9ab16541e | [
"MIT"
] | 5 | 2020-04-09T18:01:45.000Z | 2021-09-06T12:47:17.000Z | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = (
'username',
'email',
'password1',
'password2'
)
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'], password=self.cleaned_data['password1'])
user.email = self.cleaned_data['email']
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic', 'bio')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['profile_pic', 'bio'] | 28.25641 | 120 | 0.627949 | 941 | 0.853902 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.127949 |
7e35b708c6a565669ac9a9afca16f8645635f9cc | 3,437 | py | Python | StatementExample/urls.py | linkhub-sdk/popbill.example.django | 85ea168fd1497b690652c3f8c46b560ed7a0e1b1 | [
"MIT"
] | null | null | null | StatementExample/urls.py | linkhub-sdk/popbill.example.django | 85ea168fd1497b690652c3f8c46b560ed7a0e1b1 | [
"MIT"
] | 5 | 2020-02-11T23:43:56.000Z | 2022-03-18T08:13:13.000Z | StatementExample/urls.py | linkhub-sdk/popbill.example.django | 85ea168fd1497b690652c3f8c46b560ed7a0e1b1 | [
"MIT"
] | 1 | 2018-03-29T09:20:06.000Z | 2018-03-29T09:20:06.000Z | # -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
# 전자명세서 발행
url(r'^CheckMgtKeyInUse$', views.checkMgtKeyInUse, name='CheckMgtKeyInUse'),
url(r'^RegistIssue$', views.registIssue, name='RegistIssue'),
url(r'^Register$', views.register, name='Register'),
url(r'^Update$', views.update, name='Update'),
url(r'^Issue$', views.issue, name='Issue'),
url(r'^Cancel$', views.cancel, name='Cancel'),
url(r'^Delete$', views.delete, name='Delete'),
# 세금계산서 정보확인
url(r'^GetInfo$', views.getInfo, name='GetInfo'),
url(r'^GetInfos$', views.getInfos, name='GetInfos'),
url(r'^GetDetailInfo$', views.getDetailInfo, name='GetDetailInfo'),
url(r'^Search$', views.search, name='Search'),
url(r'^GetLogs$', views.getLogs, name='GetLogs'),
url(r'^GetURL$', views.getURL, name='GetURL'),
# 명세서 보기인쇄
url(r'^GetPopUpURL$', views.getPopUpURL, name='getPopUpURL'),
url(r'^GetViewURL$', views.getViewURL, name='getViewURL'),
url(r'^GetPrintURL$', views.getPrintURL, name='GetPrintURL'),
url(r'^GetEPrintURL$', views.getEPrintURL, name='GetEPrintURL'),
url(r'^GetMassPrintURL$', views.getMassPrintURL, name='GetMassPrintURL'),
url(r'^GetMailURL$', views.getMailURL, name='GetMailURL'),
# 부가 기능
url(r'^GetAccessURL', views.getAccessURL, name='GetAccessURL'),
url(r'^GetSealURL', views.getSealURL, name='GetSealURL'),
url(r'^AttachFile$', views.attachFile, name='AttachFile'),
url(r'^DeleteFile$', views.deleteFile, name='DeleteFile'),
url(r'^GetFiles$', views.getFiles, name='GetFiles'),
url(r'^SendEmail$', views.sendEmail, name='SendEmail'),
url(r'^SendSMS$', views.sendSMS, name='SendSMS'),
url(r'^SendFAX$', views.sendFAX, name='SendFAX'),
url(r'^FAXSend$', views.FAXSend, name='FAXSend'),
url(r'^AttachStatement$', views.attachStatement, name='AttachStatement'),
url(r'^DetachStatement$', views.detachStatement, name='DetachStatement'),
url(r'^ListEmailConfig', views.listEmailConfig, name='ListEmailConfig'),
url(r'^UpdateEmailConfig', views.updateEmailConfig, name='UpdateEmailConfig'),
# 포인트 관리
url(r'^GetBalance$', views.getBalance, name='GetBalance'),
url(r'^GetChargeURL', views.getChargeURL, name='GetChargeURL'),
url(r'^GetPaymentURL', views.getPaymentURL, name='GetPaymentURL'),
url(r'^GetUseHistoryURL', views.getUseHistoryURL, name='GetUseHistoryURL'),
url(r'^GetPartnerBalance$', views.getPartnerBalance, name='GetPartnerBalance'),
url(r'^GetPartnerURL$', views.getPartnerURL, name='GetPartnerURL'),
url(r'^GetUnitCost$', views.getUnitCost, name='GetUnitCost'),
url(r'^GetChargeInfo$', views.getChargeInfo, name='GetChargeInfo'),
# 회원정보
url(r'^CheckIsMember$', views.checkIsMember, name='CheckIsMember'),
url(r'^CheckID$', views.checkID, name='CheckID'),
url(r'^JoinMember$', views.joinMember, name='JoinMember'),
url(r'^GetCorpInfo$', views.getCorpInfo, name='GetCorpInfo'),
url(r'^UpdateCorpInfo$', views.updateCorpInfo, name='UpdateCorpInfo'),
url(r'^RegistContact$', views.registContact, name='RegistContact'),
url(r'^GetContactInfo$', views.getContactInfo, name='GetContactInfo'),
url(r'^ListContact$', views.listContact, name='ListContact'),
url(r'^UpdateContact$', views.updateContact, name='UpdateContact'),
]
| 48.408451 | 83 | 0.683445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,562 | 0.445141 |
7e35ba512bdc355a15686da4ebde9ef6630fcc86 | 564 | py | Python | setup.py | enricobacis/timeme | 6c906bf40daa547a8a11f034bd00b30f74519a72 | [
"MIT"
] | 1 | 2017-02-16T07:59:22.000Z | 2017-02-16T07:59:22.000Z | setup.py | enricobacis/timeme | 6c906bf40daa547a8a11f034bd00b30f74519a72 | [
"MIT"
] | null | null | null | setup.py | enricobacis/timeme | 6c906bf40daa547a8a11f034bd00b30f74519a72 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.rst') as README:
long_description = README.read()
long_description = long_description[long_description.index('Description'):]
setup(name='timeme',
version='0.1.1',
description='Decorator that prints the running time of a function',
long_description=long_description,
url='http://github.com/enricobacis/timeme',
author='Enrico Bacis',
author_email='enrico.bacis@gmail.com',
license='MIT',
packages=['timeme'],
keywords='time timing function decorator'
)
| 31.333333 | 79 | 0.695035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.381206 |
7e36100f24d4559426fcc2bf85da8105f8f8b446 | 7,385 | py | Python | tests/interface/test_cli.py | annakasprzik/qualle | 871f7fbce3d6d3da07fe7197cf21a5a68720645d | [
"Apache-2.0"
] | null | null | null | tests/interface/test_cli.py | annakasprzik/qualle | 871f7fbce3d6d3da07fe7197cf21a5a68720645d | [
"Apache-2.0"
] | null | null | null | tests/interface/test_cli.py | annakasprzik/qualle | 871f7fbce3d6d3da07fe7197cf21a5a68720645d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 ZBW – Leibniz Information Centre for Economics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
import pytest
import qualle.interface.cli as cli
from qualle.interface.config import FeaturesEnum, RegressorSettings, \
SubthesauriLabelCalibrationSettings, TrainSettings, EvalSettings, \
RESTSettings
from qualle.interface.cli import CliValidationError, handle_train, handle_eval
import tests.interface.common as c
DUMMY_MODEL_PATH = '/tmp/model'
@pytest.fixture
def train_args_dict():
return dict(
train_data_path='/tmp/train',
output='/tmp/output',
slc=False,
should_debug=False,
features=[],
label_calibrator_regressor=[
'{"class": "sklearn.ensemble.GradientBoostingRegressor",'
'"min_samples_leaf": 30, "max_depth": 5, "n_estimators": 10}'
],
quality_estimator_regressor=[
'{"class": "sklearn.ensemble.ExtraTreesRegressor"}'
]
)
@pytest.fixture
def train_args_dict_with_slc(train_args_dict):
train_args_dict['slc'] = True
train_args_dict['thsys'] = [c.DUMMY_THESAURUS_FILE]
train_args_dict['s_type'] = [c.DUMMY_SUBTHESAURUS_TYPE]
train_args_dict['c_uri_prefix'] = [c.DUMMY_CONCEPT_TYPE_PREFIX]
train_args_dict['c_type'] = [c.DUMMY_CONCEPT_TYPE]
train_args_dict['subthesauri'] = []
train_args_dict['use_sparse_count_matrix'] = False
return train_args_dict
@pytest.fixture(autouse=True)
def mock_internal_interface(mocker):
mocker.patch('qualle.interface.cli.train')
mocker.patch('qualle.interface.cli.evaluate')
def test_handle_train_slc_without_all_required_args_raises_exc(
train_args_dict
):
train_args_dict['slc'] = True
train_args_dict['thsys'] = train_args_dict['s_type'] = train_args_dict[
'c_uri_prefix'] = None
train_args_dict['c_type'] = 'http://test'
with pytest.raises(CliValidationError):
handle_train(Namespace(**train_args_dict))
def test_handle_train_slc_with_subthesauri(train_args_dict_with_slc):
train_args_dict_with_slc['subthesauri'] = [
','.join((c.DUMMY_SUBTHESAURUS_A, c.DUMMY_SUBTHESAURUS_B))]
handle_train(Namespace(**train_args_dict_with_slc))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.subthesauri_label_calibration ==\
SubthesauriLabelCalibrationSettings(
thesaurus_file=c.DUMMY_THESAURUS_FILE,
subthesaurus_type=c.DUMMY_SUBTHESAURUS_TYPE,
concept_type=c.DUMMY_CONCEPT_TYPE,
concept_type_prefix=c.DUMMY_CONCEPT_TYPE_PREFIX,
subthesauri=[c.DUMMY_SUBTHESAURUS_A, c.DUMMY_SUBTHESAURUS_B]
)
def test_handle_train_slc_without_subthesauri(train_args_dict_with_slc):
handle_train(Namespace(**train_args_dict_with_slc))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.subthesauri_label_calibration ==\
SubthesauriLabelCalibrationSettings(
thesaurus_file=c.DUMMY_THESAURUS_FILE,
subthesaurus_type=c.DUMMY_SUBTHESAURUS_TYPE,
concept_type=c.DUMMY_CONCEPT_TYPE,
concept_type_prefix=c.DUMMY_CONCEPT_TYPE_PREFIX,
subthesauri=[]
)
def test_handle_train_slc_with_sparse_count_matrix(train_args_dict_with_slc):
train_args_dict_with_slc['use_sparse_count_matrix'] = True
handle_train(Namespace(**train_args_dict_with_slc))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.subthesauri_label_calibration ==\
SubthesauriLabelCalibrationSettings(
thesaurus_file=c.DUMMY_THESAURUS_FILE,
subthesaurus_type=c.DUMMY_SUBTHESAURUS_TYPE,
concept_type=c.DUMMY_CONCEPT_TYPE,
concept_type_prefix=c.DUMMY_CONCEPT_TYPE_PREFIX,
subthesauri=[],
use_sparse_count_matrix=True
)
def test_handle_train_without_slc(train_args_dict):
handle_train(Namespace(**train_args_dict))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.subthesauri_label_calibration is None
def test_handle_train_all_features(train_args_dict):
train_args_dict['features'] = ['all']
handle_train(Namespace(**train_args_dict))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.features == [
FeaturesEnum.CONFIDENCE, FeaturesEnum.TEXT
]
def test_handle_train_confidence_features(train_args_dict):
train_args_dict['features'] = ['confidence']
handle_train(Namespace(**train_args_dict))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.features == [FeaturesEnum.CONFIDENCE]
def test_handle_train_no_features(train_args_dict):
handle_train(Namespace(**train_args_dict))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.features == []
def test_handle_train_creates_regressors(train_args_dict):
handle_train(Namespace(**train_args_dict))
cli.train.assert_called_once()
actual_settings = cli.train.call_args[0][0]
assert isinstance(actual_settings, TrainSettings)
assert actual_settings.label_calibrator_regressor == RegressorSettings(
regressor_class='sklearn.ensemble.GradientBoostingRegressor',
params=dict(min_samples_leaf=30, max_depth=5, n_estimators=10)
)
assert actual_settings.quality_estimator_regressor == RegressorSettings(
regressor_class='sklearn.ensemble.ExtraTreesRegressor',
params=dict()
)
def test_handle_eval():
handle_eval(
Namespace(**dict(test_data_path='/tmp/test', model=DUMMY_MODEL_PATH)))
cli.evaluate.assert_called_once()
actual_settings = cli.evaluate.call_args[0][0]
assert actual_settings == EvalSettings(
test_data_path='/tmp/test',
model_file=DUMMY_MODEL_PATH
)
def test_handle_rest(mocker):
m_run = mocker.Mock()
mocker.patch('qualle.interface.cli.run', m_run)
cli.handle_rest(
Namespace(**dict(model=DUMMY_MODEL_PATH, port=[9000], host=['x']))
)
m_run.assert_called_once_with(
RESTSettings(model_file=DUMMY_MODEL_PATH, host='x', port=9000)
)
| 33.568182 | 78 | 0.728097 | 0 | 0 | 0 | 0 | 1,116 | 0.151076 | 0 | 0 | 1,224 | 0.165696 |
7e36ca55713be33821350bb8d4032d7301720cee | 12,743 | py | Python | lale/lib/lale/smac.py | ksrinivs64/lale | e0ffc357c3711940078718717aebc5b06c9dc4ae | [
"Apache-2.0"
] | null | null | null | lale/lib/lale/smac.py | ksrinivs64/lale | e0ffc357c3711940078718717aebc5b06c9dc4ae | [
"Apache-2.0"
] | null | null | null | lale/lib/lale/smac.py | ksrinivs64/lale | e0ffc357c3711940078718717aebc5b06c9dc4ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import traceback
import numpy as np
from sklearn.metrics import check_scoring, log_loss
from sklearn.model_selection import train_test_split
from sklearn.model_selection._split import check_cv
import lale.docstrings
import lale.helpers
import lale.operators
import lale.sklearn_compat
from lale.helpers import cross_val_score_track_trials
from lale.lib._common_schemas import (
schema_best_score_single,
schema_cv,
schema_estimator,
schema_max_opt_time,
schema_scoring_single,
)
from lale.lib.sklearn import LogisticRegression
try:
# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
# Import SMAC-utilities
from smac.facade.smac_facade import SMAC as orig_SMAC
from smac.scenario.scenario import Scenario
from smac.tae.execute_ta_run import BudgetExhaustedException
from lale.search.lale_smac import (
get_smac_space,
lale_op_smac_tae,
lale_trainable_op_from_config,
)
smac_installed = True
except ImportError:
smac_installed = False
logger = logging.getLogger(__name__)
class _SMACImpl:
def __init__(
self,
*,
estimator=None,
scoring=None,
best_score=0.0,
cv=5,
handle_cv_failure=False,
max_evals=50,
max_opt_time=None,
lale_num_grids=None,
):
assert smac_installed, """Your Python environment does not have smac installed. You can install it with
pip install smac<=0.10.0
or with
pip install 'lale[full]'"""
self.max_evals = max_evals
if estimator is None:
self.estimator = LogisticRegression()
else:
self.estimator = estimator
self.scoring = scoring
if self.scoring is None:
is_clf = self.estimator.is_classifier()
if is_clf:
self.scoring = "accuracy"
else:
self.scoring = "r2"
self.best_score = best_score
self.handle_cv_failure = handle_cv_failure
self.cv = cv
self.max_opt_time = max_opt_time
self.lale_num_grids = lale_num_grids
self.trials = None
def fit(self, X_train, y_train, **fit_params):
data_schema = lale.helpers.fold_schema(
X_train, y_train, self.cv, self.estimator.is_classifier()
)
self.search_space: ConfigurationSpace = get_smac_space(
self.estimator, lale_num_grids=self.lale_num_grids, data_schema=data_schema
)
# Scenario object
scenario_options = {
"run_obj": "quality", # optimize quality (alternatively runtime)
"runcount-limit": self.max_evals, # maximum function evaluations
"cs": self.search_space, # configuration space
"deterministic": "true",
"abort_on_first_run_crash": False,
}
if self.max_opt_time is not None:
scenario_options["wallclock_limit"] = self.max_opt_time
self.scenario = Scenario(scenario_options)
self.cv = check_cv(
self.cv, y=y_train, classifier=self.estimator.is_classifier()
)
def smac_train_test(trainable, X_train, y_train):
try:
cv_score, logloss, execution_time = cross_val_score_track_trials(
trainable, X_train, y_train, cv=self.cv, scoring=self.scoring
)
logger.debug("Successful trial of SMAC")
except BaseException as e:
# If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
if self.handle_cv_failure:
(
X_train_part,
X_validation,
y_train_part,
y_validation,
) = train_test_split(X_train, y_train, test_size=0.20)
start = time.time()
trained = trainable.fit(X_train_part, y_train_part, **fit_params)
scorer = check_scoring(trainable, scoring=self.scoring)
cv_score = scorer(trained, X_validation, y_validation)
execution_time = time.time() - start
y_pred_proba = trained.predict_proba(X_validation)
try:
logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
else:
logger.debug(
"Error {} with pipeline:{}".format(e, trainable.to_json())
)
raise e
return cv_score, logloss, execution_time
def f(trainable):
return_dict = {}
try:
score, logloss, execution_time = smac_train_test(
trainable, X_train=X_train, y_train=y_train
)
return_dict = {
"loss": self.best_score - score,
"time": execution_time,
"log_loss": logloss,
}
except BaseException as e:
logger.warning(
f"Exception caught in SMACCV:{type(e)}, {traceback.format_exc()}, SMAC will set a cost_for_crash to MAXINT."
)
raise e
return return_dict["loss"]
try:
smac = orig_SMAC(
scenario=self.scenario,
rng=np.random.RandomState(42),
tae_runner=lale_op_smac_tae(self.estimator, f),
)
incumbent = smac.optimize()
self.trials = smac.get_runhistory()
trainable = lale_trainable_op_from_config(self.estimator, incumbent)
# get the trainable corresponding to the best params and train it on the entire training dataset.
trained = trainable.fit(X_train, y_train, **fit_params)
self._best_estimator = trained
except BudgetExhaustedException:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except BaseException as e:
logger.warning("Error during optimization: {}".format(e))
self._best_estimator = None
return self
def predict(self, X_eval, **predict_params):
import warnings
warnings.filterwarnings("ignore")
trained = self._best_estimator
if trained is None:
logger.warning(
"Could not get trained best estimator when predicting using SMACCV:{}, the error is"
)
return None
try:
predictions = trained.predict(X_eval, **predict_params)
except ValueError as e:
logger.warning(
"ValueError in predicting using SMACCV:{}, the error is:{}".format(
trained, e
)
)
predictions = None
return predictions
def get_trials(self):
"""Returns the trials i.e. RunHistory object.
Returns
-------
smac.runhistory.runhistory.RunHistory
RunHistory of all the trials executed during the optimization i.e. fit method of SMACCV.
"""
return self.trials
def get_pipeline(self, pipeline_name=None, astype="lale"):
if pipeline_name is not None:
raise NotImplementedError("Cannot get pipeline by name yet.")
result = getattr(self, "_best_estimator", None)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
# TODO: should this try and return an actual sklearn pipeline?
return result
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"max_evals",
"cv",
"handle_cv_failure",
"max_opt_time",
"lale_num_grids",
],
"relevantToOptimizer": ["estimator"],
"additionalProperties": False,
"properties": {
"estimator": schema_estimator,
"scoring": schema_scoring_single,
"best_score": schema_best_score_single,
"cv": schema_cv,
"handle_cv_failure": {
"description": """How to deal with cross validation failure for a trial.
If True, continue the trial by doing a 80-20 percent train-validation
split of the dataset input to fit and report the score on the
validation part. If False, terminate the trial with FAIL status.""",
"type": "boolean",
"default": False,
},
"max_evals": {
"type": "integer",
"minimum": 1,
"default": 50,
"description": "Number of trials of SMAC search i.e. runcount_limit of SMAC.",
},
"max_opt_time": schema_max_opt_time,
"lale_num_grids": {
"anyOf": [
{"description": "If not set, keep all grids.", "enum": [None]},
{
"description": "Fraction of grids to keep.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
{
"description": "Number of grids to keep.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": ["number", "string"]}},
{"type": "string"},
]
},
},
"y": {"type": "array", "items": {"type": "number"}},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": ["number", "string"]}},
{"type": "string"},
]
},
}
},
}
_output_predict_schema = {"type": "array", "items": {"type": "number"}}
_combined_schemas = {
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.smac.html",
"import_from": "lale.lib.lale",
"description": """SMAC_, the optimizer used inside auto-weka and auto-sklearn.
.. _SMAC: https://github.com/automl/SMAC3
Examples
--------
>>> from sklearn.metrics import make_scorer, f1_score, accuracy_score
>>> lr = LogisticRegression()
>>> clf = SMAC(estimator=lr, scoring='accuracy', cv=5)
>>> from sklearn import datasets
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> trained = clf.fit(X, y)
>>> predictions = trained.predict(X)
Other scoring metrics:
>>> clf = SMAC(estimator=lr, scoring=make_scorer(f1_score, average='macro'), cv=3, max_evals=2)
""",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SMAC = lale.operators.make_operator(_SMACImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMAC)
| 34.627717 | 137 | 0.556541 | 6,818 | 0.535039 | 0 | 0 | 0 | 0 | 0 | 0 | 4,152 | 0.325826 |
7e3819a044cb8584161c27f50d05ce5285d87d92 | 84 | py | Python | org.beerware.empty.app/assets/axp.py | pmp-p/projects | e958ef2c6d89d0d818086d4c88668d46044ace14 | [
"MIT"
] | null | null | null | org.beerware.empty.app/assets/axp.py | pmp-p/projects | e958ef2c6d89d0d818086d4c88668d46044ace14 | [
"MIT"
] | 3 | 2020-11-01T18:54:24.000Z | 2020-11-15T03:59:34.000Z | org.beerware.empty.app/assets/axp.py | pmp-p/projects | e958ef2c6d89d0d818086d4c88668d46044ace14 | [
"MIT"
] | null | null | null |
from android import *
print(widget.Button)
from android.widget import TextView
| 9.333333 | 35 | 0.77381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e383cf10dafc058011f30ba8f5f288676b64fb4 | 777 | py | Python | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | null | null | null | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | 5 | 2021-10-30T06:04:47.000Z | 2021-11-14T21:09:00.000Z | app/routes/student/subjects.py | roshnet/Electron-Server | f21ced024d69d7d96e89a2aa90c80f3d6757b3e1 | [
"MIT"
] | 1 | 2022-02-28T15:24:58.000Z | 2022-02-28T15:24:58.000Z | from app import app
from app.database import db
from app.database.models.student_subject_map import StudentSubjectMap
from app.database.models.subjects import Subjects
from fastapi import Depends, Response, status
from fastapi_jwt_auth import AuthJWT
@app.get("/students/{student_id}/subjects")
async def student_subjects(student_id, response: Response, Auth: AuthJWT = Depends()):
Auth.jwt_required()
subjects = (
db.query(Subjects)
.join(StudentSubjectMap, StudentSubjectMap.student_id == student_id)
.filter(Subjects.id == StudentSubjectMap.subject_id)
.all()
)
if not subjects:
response.status_code = status.HTTP_404_NOT_FOUND
return {"result": "fail", "reason": "No subjects found"}
return subjects
| 31.08 | 86 | 0.728443 | 0 | 0 | 0 | 0 | 523 | 0.673102 | 479 | 0.616474 | 74 | 0.095238 |
7e385eaec934a61b23372d87a81690aa42330d15 | 3,425 | py | Python | examples/runner/parallel/pipedream.py | nox-410/Hetu | aa5065ac40a1225f26fe42c92b5077da77e16745 | [
"Apache-2.0"
] | null | null | null | examples/runner/parallel/pipedream.py | nox-410/Hetu | aa5065ac40a1225f26fe42c92b5077da77e16745 | [
"Apache-2.0"
] | null | null | null | examples/runner/parallel/pipedream.py | nox-410/Hetu | aa5065ac40a1225f26fe42c92b5077da77e16745 | [
"Apache-2.0"
] | null | null | null | import hetu as ht
import os
import sys
import time
import argparse
import numpy as np
def fc(x, shape, name, with_relu=True):
weight = ht.init.random_normal(shape, stddev=0.1, name=name+'_weight')
bias = ht.init.random_normal(shape[-1:], stddev=0.1, name=name+'_bias')
x = ht.matmul_op(x, weight)
x = x + ht.broadcastto_op(bias, x)
if with_relu:
x = ht.relu_op(x)
return x
def make_generator(bs, x, y):
total_batches = x.shape[0] // bs
def x_gen_f():
for i in range(total_batches):
start = i * bs
end = (i+1) * bs
cur_x = x[start:end]
yield cur_x
def y_gen_f():
for i in range(total_batches):
start = i * bs
end = (i+1) * bs
cur_y = y[start:end]
yield cur_y
return x_gen_f(), y_gen_f()
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=5, help='training epochs')
parser.add_argument('--warmup', type=int, default=2, help='warm up steps excluded from timing')
parser.add_argument('--batch-size', type=int, default=2048, help='batch size')
parser.add_argument('--learning-rate', type=float, default=0.0001, help='learning rate')
args = parser.parse_args()
datasets = ht.data.mnist()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# pipeline parallel
with ht.context(ht.gpu(0)):
x = ht.Variable(name="dataloader_x", trainable=False)
activation = fc(x, (784, 1024), 'mlp_fc1', with_relu=True)
for i in range(1, 7):
with ht.context(ht.gpu(i)):
activation = fc(activation, (1024, 1024), 'mlp_fc%d' % (i + 1), with_relu=True)
with ht.context(ht.gpu(7)):
y_pred = fc(activation, (1024, 10), 'mlp_fc8', with_relu=True)
y_ = ht.Variable(name="dataloader_y", trainable=False)
loss = ht.softmaxcrossentropy_op(y_pred, y_)
loss = ht.reduce_mean_op(loss, [0])
opt = ht.optim.SGDOptimizer(learning_rate=args.learning_rate)
train_op = opt.minimize(loss)
executor = ht.Executor([loss, train_op], pipedream=True)
"""
notice: in pipedream mode, we have multiple version of weights, to achieve the best throughput
and train all the versions equally, we should combine the training data in advance and train them
in one pass
"""
# combine the feed dataset
train_set_x_list = []
train_set_y_list = []
for epoch in range(args.epochs):
rand_ind = np.random.randint(train_set_y.shape[0], size=(train_set_y.shape[0],))
train_set_x_list.append(train_set_x[rand_ind])
train_set_y_list.append(train_set_y[rand_ind])
train_set_x = np.concatenate(train_set_x_list, axis=0)
train_set_y = np.concatenate(train_set_y_list, axis=0)
# training
x_gen, y_gen = make_generator(args.batch_size, train_set_x, train_set_y)
res = executor.run(feed_dict={x: x_gen, y_: y_gen})
reduced_res = []
for elements in res:
for e in elements:
if e:
reduced_res.append(e[0])
if reduced_res:
print("epoch {}, avg loss {}, max loss {}, min loss {}".format(epoch,
np.mean(reduced_res), np.max(reduced_res), np.min(reduced_res)))
#print(reduced_res)
| 35.677083 | 101 | 0.634453 | 0 | 0 | 443 | 0.129343 | 0 | 0 | 0 | 0 | 581 | 0.169635 |
7e38b53b5f4c4aae5647971334f2234e632edaa5 | 1,069 | py | Python | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | chapter100/mongodb_04.py | thiagola92/learning-databases-with-python | cf23c34d7fd1ecd36dd3e7b30dc5916eb23eaf1e | [
"MIT"
] | null | null | null | import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client["database_name"]
collection = database["collection_name"]
threads_count = 0
lock = Lock()
package = []
def send(p):
global threads_count
with lock:
threads_count += 1
collection.insert_many(p)
with lock:
threads_count -= 1
with open("utils/trash.csv") as file:
for line in file.readlines():
name, description = line.split(",")
package.append({"name": name, "description": description})
if len(package) >= 10000:
while threads_count >= 4:
time.sleep(0)
Thread(target=send, args=(package[:],), daemon=True).start()
package.clear()
if package:
collection.insert_many(package)
while threads_count != 0:
pass
print(collection.count_documents({}))
collection.drop()
client.drop_database("mongo")
print(datetime.now() - start)
| 20.169811 | 72 | 0.656688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.109448 |
7e3921c5fde4463df2c8911c8615d3a22260d5d9 | 1,071 | py | Python | Arrays/trapping_rain_water.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | Arrays/trapping_rain_water.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | Arrays/trapping_rain_water.py | lakshyarawal/pythonPractice | 4b400342198a8270c5ac0c6306afb555f927c6c1 | [
"MIT"
] | null | null | null | """ Trapping rain water: Given an array of non negative integers, they are height of bars.
Find how much water can you collect between there bars """
"""Solution: """
def rain_water(a) -> int:
n = len(a)
res = 0
for i in range(1, n-1):
lmax = a[i]
for j in range(i):
lmax = max(lmax, a[j])
rmax = a[i]
for j in range(i+1, n):
rmax = max(rmax, a[j])
res = res + (min(lmax, rmax) - a[i])
return res
def rain_water_eff(a) -> int:
n = len(a)
res = 0
lmax = [0]*n
rmax = [0]*n
lmax[0] = a[0]
for i in range(1, n):
lmax[i] = max(lmax[i-1], a[i])
rmax[n-1] = a[n-1]
for i in range(n-2, 0, -1):
rmax[i] = max(rmax[i + 1], a[i])
for i in range(1, n-1):
res = res + min(lmax[i], rmax[i]) - a[i]
return res
def main():
arr_input = [5, 0, 6, 2, 3]
a1 = rain_water_eff(arr_input)
print(a1)
a2 = rain_water(arr_input)
#print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 21.857143 | 90 | 0.513539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.212885 |
7e3aaf1e8600f0c4dc3c6c5d88b0c978d139ca07 | 3,272 | py | Python | billing/models/pin_models.py | litchfield/merchant | e4fba8a88a326bbde39c26e937c17d5283817320 | [
"BSD-3-Clause"
] | null | null | null | billing/models/pin_models.py | litchfield/merchant | e4fba8a88a326bbde39c26e937c17d5283817320 | [
"BSD-3-Clause"
] | null | null | null | billing/models/pin_models.py | litchfield/merchant | e4fba8a88a326bbde39c26e937c17d5283817320 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class PinCard(models.Model):
token = models.CharField(max_length=32, db_index=True, editable=False)
display_number = models.CharField(max_length=20, editable=False)
expiry_month = models.PositiveSmallIntegerField()
expiry_year = models.PositiveSmallIntegerField()
scheme = models.CharField(max_length=20, editable=False)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
address_line1 = models.CharField(max_length=255)
address_line2 = models.CharField(max_length=255, blank=True)
address_city = models.CharField(max_length=255)
address_postcode = models.CharField(max_length=20)
address_state = models.CharField(max_length=255)
address_country = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, related_name='pin_cards', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Card %s' % self.display_number
class PinCustomer(models.Model):
token = models.CharField(unique=True, max_length=32)
card = models.ForeignKey(PinCard, related_name='customers')
email = models.EmailField()
created_at = models.DateTimeField()
user = models.ForeignKey(User, related_name='pin_customers', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Customer %s' % self.email
class PinCharge(models.Model):
token = models.CharField(unique=True, max_length=32, editable=False)
card = models.ForeignKey(PinCard, related_name='charges', editable=False)
customer = models.ForeignKey(PinCustomer, related_name='customers', null=True, blank=True, editable=False)
success = models.BooleanField()
amount = models.DecimalField(max_digits=16, decimal_places=2)
currency = models.CharField(max_length=3)
description = models.CharField(max_length=255)
email = models.EmailField()
ip_address = models.GenericIPAddressField(blank=True, null=True)
created_at = models.DateTimeField()
status_message = models.CharField(max_length=255)
error_message = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name='pin_charges', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Charge %s' % self.email
class PinRefund(models.Model):
token = models.CharField(unique=True, max_length=32)
charge = models.ForeignKey(PinCharge, related_name='refunds')
success = models.BooleanField()
amount = models.DecimalField(max_digits=16, decimal_places=2)
currency = models.CharField(max_length=3)
created_at = models.DateTimeField()
status_message = models.CharField(max_length=255)
error_message = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name='pin_refunds', blank=True, null=True)
class Meta:
app_label = 'billing'
def __unicode__(self):
return 'Refund %s' % self.charge.email
| 39.421687 | 110 | 0.731357 | 3,061 | 0.935513 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.056846 |
7e3abcb443f673a56a0690f3906b8e71393e203a | 7,530 | py | Python | 4 - Late Fusion Networks/Perf_Hard_and_Soft_Voting_Prep.py | pcasabianca/Acoustic-UAV-Identification | 3ff08ff4163b42f244ba2ffa00a2c20bc3221382 | [
"MIT"
] | 1 | 2021-07-18T12:48:47.000Z | 2021-07-18T12:48:47.000Z | 4 - Late Fusion Networks/Perf_Hard_and_Soft_Voting_Prep.py | pcasabianca/Acoustic-UAV-Identification | 3ff08ff4163b42f244ba2ffa00a2c20bc3221382 | [
"MIT"
] | null | null | null | 4 - Late Fusion Networks/Perf_Hard_and_Soft_Voting_Prep.py | pcasabianca/Acoustic-UAV-Identification | 3ff08ff4163b42f244ba2ffa00a2c20bc3221382 | [
"MIT"
] | null | null | null | import os
import json
import librosa
import tensorflow as tf
import numpy as np
from termcolor import colored
# Read and save parameters.
DATASET_PATH = "Unseen Testing" # Path of testing dataset.
SAMPLE_RATE = 22050
DURATION = 1 # Measured in seconds (change depending on length of one audio file).
SAMPLES_PER_TRACK = SAMPLE_RATE * DURATION
# Model testing.
SAVED_MODEL = ".../model_1.h5" # Path of trained model (change for all models trained).
# Raw predictions: 1, 0 results (for hard voting) and certainty values (for soft voting).
RESULTS = ".../voted_1.json" # Path to save raw predictions.
# Performance scores (accuracy, precision, recall, f-score).
MODEL_SCORES = ".../scores_1.json"
# Prediction of fed audio.
class _Class_Predict_Service:
"""Singleton class for keyword spotting inference with trained models.
:param model: Trained model
"""
# Mapping so drone = 1.
model = None
_mapping = [
1,
0
]
_instance = None
# Predict hard values (1 or 0).
def predict(self, file_path):
"""
:param file_path (str): Path to audio file to predict
:return predicted_keyword (str): Keyword predicted by the model
"""
# Extract mels from testing audio.
mel = self.preprocess(file_path)
# We need a 4-dim array to feed to the model for prediction: (# samples, # time steps, # coefficients, 1).
mel = mel[np.newaxis, ..., np.newaxis]
# Get the predicted label.
predictions = self.model.predict(mel)
predicted_index = np.argmax(predictions)
predicted_class = self._mapping[predicted_index]
return predicted_class
# Outputs certainty values for soft voting (1-0).
def predict_prob(self, file_path):
"""
:param file_path (str): Path to audio file to predict
:return predicted_keyword (str): Keyword predicted by the model
"""
# Extract mels from testing audio.
mel = self.preprocess(file_path)
# We need a 4-dim array to feed to the model for prediction: (# samples, # time steps, # coefficients, 1).
mel = mel[np.newaxis, ..., np.newaxis]
# Get the predicted label.
predict_prob = self.model.predict_proba(mel)[:, 0]
return predict_prob
# Extract mel specs from raw audio.
def preprocess(self, file_path, n_mels=90, n_fft=2048, hop_length=512, num_segments=1):
"""Extract MFCCs from audio file.
:param file_path (str): Path of audio file
:param n_mels (int): # of mels to extract
:param n_fft (int): Interval we consider to apply STFT. Measured in # of samples
:param hop_length (int): Sliding window for STFT. Measured in # of samples
"""
num_samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
# Load audio file.
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
# Process segments extracting mels and storing data.
for s in range(num_segments):
start_sample = num_samples_per_segment * s # s=0 --> 0
finish_sample = start_sample + num_samples_per_segment # s=0 --> num_samples_per_segment
# Extract mel specs.
mel = librosa.feature.melspectrogram(signal[start_sample:finish_sample], sr=sr, n_mels=n_mels, n_fft=n_fft,
hop_length=hop_length)
db_mel = librosa.power_to_db(mel)
return db_mel.T
def Keyword_Spotting_Service():
"""Factory function for Keyword_Spotting_Service class.
:return _Keyword_Spotting_Service._instance (_Keyword_Spotting_Service):
"""
# Ensure an instance is created only the first time the factory function is called.
if _Class_Predict_Service._instance is None:
_Class_Predict_Service._instance = _Class_Predict_Service()
_Class_Predict_Service.model = tf.keras.models.load_model(SAVED_MODEL)
return _Class_Predict_Service._instance
# Saving results into a json file.
def save_prediction(dataset_path, json_path):
# Dictionary to store data.
data = {
"mapping": [],
"names": [], # audio file names
"results": [], # hard results (0 or 1)
"certainties": [], # soft results (0-1)
}
# Loop through all the classes.
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
# Ensure that we're not at the root level.
if dirpath is not dataset_path:
# Save the semantic label.
dirpath_components = dirpath.split("/") # uav/uav.wav => ["uav", "uav.wav"]
semantic_label = dirpath_components[-1] # considering only the last value
data["mapping"].append(semantic_label)
print("\nProcessing {}".format(semantic_label))
# Process files for a specific class.
for f in filenames:
file_path = os.path.join(dirpath, f)
# Create 2 instances of the keyword spotting service.
kss = Keyword_Spotting_Service()
kss1 = Keyword_Spotting_Service()
# Check that different instances of the keyword spotting service point back to the same object.
assert kss is kss1
# Classify unseen audio.
keyword = kss.predict(file_path)
prob = kss.predict_prob(file_path)
certainty = float(prob)
# Store mel for segment if it has the expected length.
data["names"].append(f)
data["results"].append(keyword)
data["certainties"].append(certainty)
print("{}".format(file_path))
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
# Calculating performance scores (accuracy, precision, recall, f-score).
def performance_calcs(performance_path):
# Dictionary to store model performance results.
performance = {
"TP": [],
"FN": [],
"TN": [],
"FP": [],
"Accuracy": [],
"Precision": [],
"Recall": [],
"F1 Score": [],
}
with open(RESULTS, "r") as fp:
data = json.load(fp)
# Convert lists to numpy arrays.
y = np.array(data["results"])
a = float(sum(y[0:int(len(y) / 2)]))
b = float(sum(y[int(len(y) / 2):int(len(y))]))
# Calculating TP, TN, FP, FN.
TP = a
FN = int(len(y) / 2) - a
FP = b
TN = int(len(y) / 2) - b
# Performance result calcs.
Accuracy = (TP + TN) / (TP + TN + FN + FP)
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
F1 = (2 * Precision * Recall) / (Precision + Recall)
performance["TP"].append(TP)
performance["FN"].append(FN)
performance["TN"].append(TN)
performance["FP"].append(FP)
performance["Accuracy"].append(Accuracy)
performance["Precision"].append(Precision)
performance["Recall"].append(Recall)
performance["F1 Score"].append(F1)
with open(performance_path, "w") as fp:
json.dump(performance, fp, indent=4)
if __name__ == "__main__":
save_prediction(DATASET_PATH, RESULTS)
performance_calcs(MODEL_SCORES)
print(
colored("Model performance scores have been saved to {}.".format(MODEL_SCORES), "green"))
| 35.518868 | 120 | 0.605578 | 2,834 | 0.376361 | 0 | 0 | 0 | 0 | 0 | 0 | 3,136 | 0.416467 |
7e3b10150f30d30b54781201a09f35f781054e7c | 2,963 | py | Python | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | contact_test.py | Njihia413/contact-list | bf555aa3657db1f90274dfe768b649007d725285 | [
"Unlicense"
] | null | null | null | import unittest #Importing the unittest module
from contact import Contact #Importing the contact class
#import pyperclip #Pyperclip will allow us to copy and paste items to our clipboard
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("Lyn","Muthoni","0796654066","sonnie2154@gmail.com")
def tearDown(self):
Contact.contact_list = []
#First test to check if our contact objects are being instantiated correctly
def test_instance(self):
self.assertEqual(self.new_contact.first_name,"Lyn")
self.assertEqual(self.new_contact.last_name,"Muthoni")
self.assertEqual(self.new_contact.phone_number,"0796654066")
self.assertEqual(self.new_contact.email,"sonnie2154@gmail.com")
#Second test to check if we can save contacts into the contact list
def test_save_contact(self):
self.new_contact.save_contact() #saving the new contact
self.assertEqual(len(Contact.contact_list),1)
#Third test to test if we can save multiple contacts
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","test@user.com") #new contact
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
#Fourth test to test if we can remove a contact from our contact list
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","test@user.com") #new contact
test_contact.save_contact()
self.new_contact.delete_contact() #Deleting a contact object
self.assertEqual(len(Contact.contact_list),1)
#Fifth test to check if we can find a contact by phone number and display information
def test_find_contact_by_number(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","test@user.com") #new contact
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email,test_contact.email)
#Sixth test to check if a contact object exists
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","test@user.com") #new contact
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
#Seventh test to display all contacts
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(),Contact.contact_list)
#Eighth test to allow us to copy items to the clipboard
'''
def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email,pyperclip.paste())
'''
if __name__ == '__main__':
unittest.main()
| 42.942029 | 89 | 0.710429 | 2,712 | 0.915289 | 0 | 0 | 0 | 0 | 0 | 0 | 1,204 | 0.406345 |
7e3d308df34767b7c5b7bbfbef6d03d373ce1d9f | 319 | py | Python | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | 1 | 2021-01-31T16:57:35.000Z | 2021-01-31T16:57:35.000Z | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | null | null | null | back_end/celery_tasks/main.py | 22014471/malonghui_Django | c9c2a68882450f9327e141333f30fdd73e530c28 | [
"MIT"
] | null | null | null | from celery import Celery
import os
# 为celery设置django默认配置
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'mlh.settings.dev'
# 创建对象,命名为meiduo,并指明broker
celery_app = Celery('mlh',broker='redis://127.0.0.1:6379/15')
# 自动注册任务
celery_app.autodiscover_tasks(['celery_tasks.sms',]) | 22.785714 | 61 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.59673 |
7e3f28a79344a9233183603246e2d32fc400d428 | 141 | py | Python | holobot/extensions/crypto/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/extensions/crypto/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/extensions/crypto/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | from .alert_manager_interface import AlertManagerInterface
from .alert_manager import AlertManager
from .crypto_updater import CryptoUpdater
| 35.25 | 58 | 0.893617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e3f3d1673e07847a0ccc4fa36154b7d2891f18d | 676 | py | Python | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 4 | 2019-08-08T21:15:01.000Z | 2021-01-14T01:32:18.000Z | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 1 | 2021-09-02T17:24:12.000Z | 2021-09-02T17:24:12.000Z | tests/unittest/parser/test_basic_parser.py | alessandrome/pywiktionary | b9378ca1e2dfe704eaa8a044bd82519b12f81226 | [
"MIT"
] | 1 | 2020-03-19T12:57:45.000Z | 2020-03-19T12:57:45.000Z | import unittest
from pywiktionary.parsers import basic_parser
def get_pizza_html_extract():
with open('tests/file/html-responses/pizza-it.html', 'r', encoding='utf-8') as pizza_html_file:
pizza_html = pizza_html_file.read()
return pizza_html
class BasicParseTestCase(unittest.TestCase):
def test_init(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertEqual(get_pizza_html_extract(), parser.html)
def test_parse_method(self):
parser = basic_parser.BasicParser(get_pizza_html_extract())
self.assertRaises(NotImplementedError, parser.parse)
if __name__ == '__main__':
unittest.main()
| 29.391304 | 99 | 0.739645 | 364 | 0.538462 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.090237 |
7e3f43b78bda7deaae5c966af639b6568ca237b5 | 1,347 | py | Python | snuggle/web/processing/events.py | halfak/snuggle | 384818aaf8a783013b076ada3c74226f10e5dc18 | [
"MIT"
] | 2 | 2021-04-26T20:34:25.000Z | 2021-11-12T11:26:57.000Z | snuggle/web/processing/events.py | halfak/snuggle | 384818aaf8a783013b076ada3c74226f10e5dc18 | [
"MIT"
] | null | null | null | snuggle/web/processing/events.py | halfak/snuggle | 384818aaf8a783013b076ada3c74226f10e5dc18 | [
"MIT"
] | null | null | null | import logging, traceback, time
from bottle import request
from snuggle import configuration
from snuggle import mediawiki
from snuggle import errors
from snuggle.data import types
from snuggle.web.util import responses, user_data
logger = logging.getLogger("snuggle.web.processing.users")
class Events:
def __init__(self, model):
self.model = model
def action(self, session, doc):
request = types.ActionRequest.serialize(doc)
def query(self, session, query):
"""
Queries for PUBLIC events and public event content only.
"""
try:
start = time.time()
event_docs = []
for event in self.model.events.query(**query):
if event.PUBLIC:
doc = event.serialize()
doc['id'] = None
event_docs.append(doc)
end = time.time()
except Exception:
logger.error(traceback.format_exc())
return responses.database_error("getting a set of events with query %s" % query)
query['after'] = max(
query.get('after', 0),
time.time() - configuration.snuggle['changes_synchronizer']['max_age']
)
try:
snuggler, data = user_data()
event = types.EventsQueried(
query,
end-start,
len(event_docs),
snuggler,
data
)
self.model.events.insert(event)
except Exception as e:
logger.error(traceback.format_exc())
return responses.success(event_docs)
| 22.830508 | 83 | 0.690423 | 1,051 | 0.780252 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.138085 |
7e42bb796b8472a4cdf33d6dcb36d814d4383042 | 9,089 | py | Python | S2.Surface_Normal/regNormalNet/regNormalNet.py | leoshine/Spherical_Regression | d19bc2f6f52982d4d58f5ddabe4231381d7facd7 | [
"BSD-2-Clause-FreeBSD"
] | 133 | 2019-04-12T20:52:08.000Z | 2021-12-09T06:34:39.000Z | S2.Surface_Normal/regNormalNet/regNormalNet.py | QUVA-Lab/Spherical_Regression | 6a3b2a19d00f27f2417cb9f2fe21df5963def5b3 | [
"BSD-2-Clause-FreeBSD"
] | 11 | 2019-06-21T04:07:04.000Z | 2022-03-03T07:48:41.000Z | S2.Surface_Normal/regNormalNet/regNormalNet.py | QUVA-Lab/Spherical_Regression | 6a3b2a19d00f27f2417cb9f2fe21df5963def5b3 | [
"BSD-2-Clause-FreeBSD"
] | 16 | 2019-04-18T15:55:48.000Z | 2021-12-14T09:01:57.000Z | # coding: utf8
"""
@Author : Shuai Liao
"""
import os
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
from basic.common import add_path, env
this_dir = os.path.dirname(os.path.abspath(__file__))
add_path(this_dir+'/../lib/')
from helper import *
from model import VGG16_Trunk
from modelSE import VGG16_Trunk as VGG16SE_Trunk
# net_arch2Trunk = dict(
# vgg16 = VGG16_Trunk,
# vgg16se = VGG16SE_Trunk,
# )
net_arch2Trunk = dict(
vgg16=dict(
Sflat = VGG16_Trunk,
Sexp = VGG16SE_Trunk,
),
)
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit, exp_Normalization
def cls_pred(output, topk=(1,), dim=1):
maxk = max(topk)
batch_size = output.size(0)
_, pred = output.topk(maxk, dim=dim, largest=True, sorted=True)
return pred
class _regNormalNet(nn.Module):
def __init__(self, method, net_arch='vgg16', init_weights=True):
super(_regNormalNet, self).__init__()
_Trunk = net_arch2Trunk[net_arch][method]
self.trunk = _Trunk(init_weights=init_weights)
def forword(self, x, label):
raise NotImplementedError
#---------------------------------------------------------------------[regQuat]
class reg_Sflat_Net(_regNormalNet):
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sflat', net_arch=net_arch, init_weights=init_weights)
# loss module
self.loss_handler = Cos_Proximity_Loss_Handler()
self.targets = ['norm']
def forward(self, x):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0) # x of shape (40, 3, 240, 320)
#-- Normalize coordinate to a unit
x_norm = norm2unit(x, dim=1)
Prob = edict(norm=x_norm.permute(0,2,3,1).double()) # transpose prediction from BxCxHxW to BxHxWxC order.
return Prob
def compute_loss(self, Prob, GT):
Loss, Errs = self.loss_handler.compute_loss(self.targets, Prob, GT)
_metric_ = edict(norm=Errs['norm'])
return Loss, _metric_
def compute_pred(self, Prob, encode_bit=8):
x_norm = Prob['norm']
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
#---------------------------------------------------------------------[regQuat]
class reg_Sexp_Net(_regNormalNet): # Spherical exponential Problem + sign classification
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sexp', net_arch=net_arch, init_weights=init_weights)
self.reg_n_D = 3
# Note: for a surface normal (x,z,y) (Watch out the order)
# z should always satisfy z<=0 (Surface normal should from visible surfaces)
# Thus only x,y need sign prediction.
dim_need_sign = 2
_signs = list( product(*( [(-1,1)]*dim_need_sign )) ) # [(-1, -1), (-1, 1), (1, -1), (1, 1)], with len=4
self.signs = [(x[0],-1,x[1]) for x in _signs] # y-z-x order: [(-1, -1, -1), (-1, -1, 1), (1, -1, -1), (1, -1, 1)], with len=4; z always -1
self.signs2label = odict(zip(self.signs, range(len(self.signs))))
self.label2signs = Variable( torch.DoubleTensor(self.signs) ).cuda() # make it as a Variable
self.softmax = nn.Softmax(dim=1).cuda()
# loss module
self.loss_handler_abs_norm = Cos_Proximity_Loss_Handler()
self.loss_handler_sgc_norm = Cross_Entropy_Loss_Handler()
self.targets = ['sgc_norm','abs_norm']
self.gt_targets = ['norm']
self.cost, self.sint = torch.tensor(np.cos(np.pi/4)).double().cuda(), torch.tensor(np.sin(np.pi/4)).double().cuda()
def forward(self, x):
"""label shape (batchsize, ) """
x_abs, x_sgc = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x_abs.size(0)
#-- Exp and Normalize coordinate to a unit
x_sqr_norm = self.softmax(x_abs) #, nr_cate=self.nr_cate)
# sign category head (totally 4 category)
x_sgc_norm = x_sgc
Prob = edict(abs_norm=torch.sqrt(x_sqr_norm).permute(0,2,3,1).double(), # B,H,W,3
sgc_norm=x_sgc_norm.permute(0,2,3,1) ) # B,H,W,4
return Prob
def compute_loss(self, Prob, GT):
B,H,W,_3_ = GT.norm.size()
assert _3_==3, "Wrong dim: %s,%s,%s,%s" % (B,H,W,_3_)
# First get sign label from GT
#== Formulate squared value of quaternion
GT_abs_norm = torch.abs(GT.norm) # B,H,W,3
#== Formulate signs label of quaternion
GT_sign_norm = torch.sign(GT.norm) # B,H,W,3
#-------------------------------------
# hard coded: sign to label
#-------------------------------------
# y x label
# [-1 -1] --> 0
# [-1 1] --> 1
# [ 1 -1] --> 2
# [ 1 1] --> 3
# GT_sign_norm (B,H,W,3) in y-z-x order
GT_sign_norm[GT_sign_norm==0] = -1 # make sign of '0' as -1 (use -1 instead of 1 just because z<=0)
y_sign, x_sign = GT_sign_norm[:,:,:,0], GT_sign_norm[:,:,:,2]
y_sign += 1 # [y_sign==-1]
x_sign[x_sign==-1] = 0
GT_sgc_norm = (y_sign+x_sign).long() # data with shape with (B,H,W) index of [0,1,2,3]
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_norm'.
_GT = edict(abs_norm=GT_abs_norm, sgc_norm=GT_sgc_norm, mask=GT.mask) # abs_norm: (B,H,W,3) sgc_norm: (B,H,W)
Loss_abs_norm, abs_Errs = self.loss_handler_abs_norm.compute_loss(['abs_norm'], Prob, _GT)
Loss_sgc_norm = self.loss_handler_sgc_norm.compute_loss(['sgc_norm'], Prob, _GT)
# ----------------------------------------
# Compute the metric.
sign_ind = cls_pred(Prob['sgc_norm'], topk=(1,), dim=3).data.squeeze(dim=3) # B,H,W
pr_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3) (10, 240, 320, 3)
pr_abs_norm = Prob['abs_norm']
_Prob = edict(norm=pr_abs_norm * pr_sign_norm) # current predicted final norm (applied sign prediction)
_Loss_norm, out_Errs = self.loss_handler_abs_norm.compute_loss(['norm'], _Prob, GT) # just borrow loss_handler_abs_norm, nothing more.
# Compute acc of classification: sign_ind vs GT_sgc_norm
mask = GT['mask']
acc = eval_cls(sign_ind[mask], GT_sgc_norm[mask])
_metric_ = edict(abs_norm = abs_Errs['abs_norm'],
norm = out_Errs['norm'] ,
sgc_norm_acc = acc ,)
# To add loss weights here.
Loss = edict( abs_norm=Loss_abs_norm['abs_norm']*10, # / 5.
sgc_norm=Loss_sgc_norm['sgc_norm'], )
return Loss, _metric_ # .update(abs_Errs)
def compute_pred(self, Prob, encode_bit=8):
x_abs_norm = Prob['abs_norm'] # B,H,W,3
x_sgc_norm = Prob['sgc_norm'] # B,H,W,4
batchsize = x_abs_norm.size(0)
#
sign_ind = cls_pred(x_sgc_norm, topk=(1,), dim=3).data.squeeze(dim=3) # .view(-1,) # B,H,W
x_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3)
#
x_norm = x_abs_norm * x_sign_norm # B,H,W,3
# --------------Recover rot45 trick --------------
# Note: since we applied rot45 trick, here we recover it back
_x_norm = x_norm.detach().clone() # return a copy of x_norm without grad
_y,_z,_x = _x_norm[:,:,:,0],_x_norm[:,:,:,1],_x_norm[:,:,:,2]
y, z, x = x_norm[:,:,:,0],x_norm[:,:,:,1],x_norm[:,:,:,2]
x[:] = self.cost*_x - self.sint*_y
y[:] = self.sint*_x + self.cost*_y
# ------------------------------------------------
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
| 40.941441 | 154 | 0.577071 | 7,707 | 0.847948 | 0 | 0 | 0 | 0 | 0 | 0 | 2,833 | 0.311695 |
7e4370188b6c792aa03cbe5832ef7f5025340731 | 2,317 | py | Python | oscar_support/forms/widgets.py | snowball-one/django-oscar-support | 57d82200f0905e17df683652327e9102b7b34129 | [
"BSD-3-Clause"
] | 14 | 2015-01-10T05:06:33.000Z | 2021-02-08T03:37:32.000Z | oscar_support/forms/widgets.py | snowball-one/django-oscar-support | 57d82200f0905e17df683652327e9102b7b34129 | [
"BSD-3-Clause"
] | 2 | 2017-08-25T20:14:41.000Z | 2019-02-25T22:08:09.000Z | oscar_support/forms/widgets.py | snowball-one/django-oscar-support | 57d82200f0905e17df683652327e9102b7b34129 | [
"BSD-3-Clause"
] | 8 | 2015-07-29T21:39:06.000Z | 2018-12-06T04:14:56.000Z | from django.forms.util import flatatt
from django.template import loader, Context
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.template.loader import render_to_string
from django.forms.widgets import Widget, RadioInput, RadioFieldRenderer
class AutoCompleteWiget(Widget):
def __init__(self, url, user_field=None, *args, **kwargs):
self.url = url
self.user_field = user_field
super(AutoCompleteWiget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
if value is None:
value = u''
tmpl = loader.get_template(
'oscar_support/widgets/autocomplete_widget.html'
)
return tmpl.render(Context({
'name': name,
'url': self.url,
'user_field': self.user_field,
'value': value,
}))
AutoCompleteWidget = AutoCompleteWiget
class CustomRadioInput(RadioInput):
template_name = 'oscar_support/partials/custom_radio_select.html'
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
if 'id' in self.attrs:
label_for = ' for="%s"' % self.attrs['id']
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return render_to_string(self.template_name, Context({
'attrs': flatatt(self.attrs),
'checked': self.is_checked(),
'name': self.name,
'value': self.choice_value,
'label_for': label_for,
'choice_label': choice_label,
}))
class CustomRadioFieldRenderer(RadioFieldRenderer):
def __iter__(self):
for i, choice in enumerate(self.choices):
yield CustomRadioInput(self.name, self.value, self.attrs.copy(),
choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return CustomRadioInput(self.name, self.value, self.attrs.copy(),
choice, idx)
| 33.1 | 76 | 0.617609 | 1,965 | 0.848079 | 192 | 0.082866 | 0 | 0 | 0 | 0 | 254 | 0.109625 |
7e437dbe2c2eaddadfb12c0e71bcb7d0e968c5b2 | 136 | py | Python | projectname/tests/__init__.py | Casokaks/light-python-template | 1298b0283c6a7b3f2cfffabd278848f4fb8e21fa | [
"MIT"
] | null | null | null | projectname/tests/__init__.py | Casokaks/light-python-template | 1298b0283c6a7b3f2cfffabd278848f4fb8e21fa | [
"MIT"
] | null | null | null | projectname/tests/__init__.py | Casokaks/light-python-template | 1298b0283c6a7b3f2cfffabd278848f4fb8e21fa | [
"MIT"
] | null | null | null | """
Test init module
==================================
Author: Casokaks (https://github.com/Casokaks/)
Created on: Aug 15th 2021
"""
| 15.111111 | 47 | 0.514706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.992647 |
7e441d62eef151acf29604bef9891de9f98dfcc8 | 26,980 | py | Python | presenterserver/facial_recognition/src/facial_recognition_server.py | niuiic/face_recognition | 03d4aabb0ee72b3dee3d8798d344262460e71208 | [
"Apache-2.0"
] | 1 | 2021-12-16T02:03:40.000Z | 2021-12-16T02:03:40.000Z | presenterserver/facial_recognition/src/facial_recognition_server.py | niuiic/face_recognition | 03d4aabb0ee72b3dee3d8798d344262460e71208 | [
"Apache-2.0"
] | null | null | null | presenterserver/facial_recognition/src/facial_recognition_server.py | niuiic/face_recognition | 03d4aabb0ee72b3dee3d8798d344262460e71208 | [
"Apache-2.0"
] | null | null | null | # =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter facial recognition server module"""
import os
import json
import threading
import random
import logging
from logging.config import fileConfig
import numpy as np
from json.decoder import JSONDecodeError
from google.protobuf.message import DecodeError
import common.presenter_message_pb2 as presenter_message_pb2
from common.channel_manager import ChannelManager
from common.presenter_socket_server import PresenterSocketServer
from common.app_manager import AppManager
import facial_recognition.src.facial_recognition_message_pb2 as pb2
from facial_recognition.src.config_parser import ConfigParser
from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler
# Face Registration timeout is 10 seconds
FACE_REGISTER_TIME_OUT = 10
# Presenter Server Type
SERVER_TYPE = "facial_recognition"
# max app name length
APP_ID_MAX_LENGTH = 20
# max support 2 app connect
MAX_APP_NUM = 2
# length of face feature vector
FEATURE_VECTOR_LENGTH = 1024
# Face Registration Status code
FACE_REGISTER_STATUS_WAITING = 1
FACE_REGISTER_STATUS_SUCCEED = 2
FACE_REGISTER_STATUS_FAILED = 3
class FacialRecognitionServer(PresenterSocketServer):
'''A server for face recognition'''
def __init__(self, config):
"""
Description: class init func
Input:
config: config information
Returns: NA
"""
server_address = (config.presenter_server_ip,
int(config.presenter_server_port))
super(FacialRecognitionServer, self).__init__(server_address)
self.storage_dir = config.storage_dir
self.max_face_num = int(config.max_face_num)
self.face_match_threshold = float(config.face_match_threshold)
self.register_dict = {}
self.app_manager = AppManager()
self.channel_manager = ChannelManager()
# 登记人脸数据文件
self.face_register_file = os.path.join(self.storage_dir,
"registered_faces.json")
self._init_face_database()
def _init_face_database(self):
"""
Description: Init face recognition database,
read information from face_register_file
Input: NA
Returns: NA
"""
if not os.path.isfile(self.face_register_file):
with open(self.face_register_file, "w", encoding="utf-8") as f:
f.write("{}")
with open(self.face_register_file, "r") as f:
self.face_lock = threading.Lock()
self.registered_faces = json.load(f)
self._filter_registration_data()
def _filter_registration_data(self):
face_dict = self.registered_faces.copy()
for i in face_dict:
image_path = os.path.join(self.storage_dir, i + ".jpg")
if not os.path.isfile(image_path):
del self.registered_faces[i]
def get_all_face(self):
"""
Description: get registered face list.
Input: NA
Returns: NA
"""
with self.face_lock:
return [i for i in self.registered_faces]
def save_face_image(self, name, image):
"""
Description: save face image.
Input:
name face name
image: face image
Returns: True or False
"""
image_file = os.path.join(self.storage_dir, name + ".jpg")
try:
#image = image.decode("utf-8")
with open(image_file, "wb") as f:
f.write(image)
return True
except (OSError, TypeError) as exp:
logging.error(exp)
return False
def get_app_socket(self, app_id):
"""
Description: get a socket which is bound to the app.
Input:
app_id: id of the app
Returns: socket
"""
return self.app_manager.get_socket_by_app_id(app_id)
def list_registered_apps(self):
"""
Description: get registered apps list.
Input: NA
Returns: app list
"""
return self.app_manager.list_app()
def delete_faces(self, name_list):
"""
Description: delete registered faces in name_list
Input:
name_list: a name list
Returns: True or False
"""
with self.face_lock:
for i in name_list:
if self.registered_faces.get(i):
backup = self.registered_faces[i]
del self.registered_faces[i]
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
image_file = os.path.join(
self.storage_dir, i + ".jpg")
os.remove(image_file)
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
self.registered_faces[i] = backup
return False
return True
def _clean_connect(self, sock_fileno, epoll, conns, msgs):
"""
Description: close socket, and clean local variables
Input:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
"""
logging.info("clean fd:%s, conns:%s", sock_fileno, conns)
self.app_manager.unregister_app_by_fd(sock_fileno)
epoll.unregister(sock_fileno)
conns[sock_fileno].close()
del conns[sock_fileno]
del msgs[sock_fileno]
def _process_msg(self, conn, msg_name, msg_data):
"""
Total entrance to process protobuf msg
Input:
conn: a socket connection
msg_name: name of a msg.
msg_data: msg body, serialized by protobuf
Returns:
False:somme error occured
True:succeed
"""
# process open channel request
if msg_name == pb2._REGISTERAPP.full_name:
ret = self._process_register_app(conn, msg_data)
# process image request, receive an image data from presenter agent
elif msg_name == pb2._FACERESULT.full_name:
ret = self._process_face_result(msg_data)
elif msg_name == pb2._FRAMEINFO.full_name:
ret = self._process_frame_info(conn, msg_data)
elif msg_name == presenter_message_pb2._OPENCHANNELREQUEST.full_name:
ret = self._process_open_channel(conn, msg_data)
# process heartbeat request, it used to keepalive a channel path
elif msg_name == presenter_message_pb2._HEARTBEATMESSAGE.full_name:
ret = self._process_heartbeat(conn)
else:
logging.error("Not recognized msg type %s", msg_name)
ret = False
return ret
def _process_heartbeat(self, conn):
'''
set heartbeat
Input:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
sock_fileno = conn.fileno()
if self.app_manager.get_app_id_by_socket(sock_fileno):
self.app_manager.set_heartbeat(sock_fileno)
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is not None:
handler.set_heartbeat()
return True
def _parse_protobuf(self, protobuf, msg_data):
"""
Description: parse protobuf
Input:
protobuf: a struct defined by protobuf
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
try:
protobuf.ParseFromString(msg_data)
return True
except DecodeError as exp:
logging.error(exp)
return False
def _process_register_app(self, conn, msg_data):
"""
Description: process register_app message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.RegisterApp()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
response.ret = pb2.kErrorOther
response.message = "ParseFromString exception"
self.send_message(conn, response, msg_name)
return False
app_id = request.id
app_type = request.type
# check app id if exist
if self.app_manager.is_app_exist(app_id):
logging.error("App %s is already exist.", app_id)
response.ret = pb2.kErrorAppRegisterExist
response.message = "App {} is already exist.".format(app_id)
self.send_message(conn, response, msg_name)
elif self.app_manager.get_app_num() >= MAX_APP_NUM:
logging.error("App number reach the upper limit")
response.ret = pb2.kErrorAppRegisterLimit
response.message = "App number reach the upper limit"
self.send_message(conn, response, msg_name)
elif app_type != SERVER_TYPE:
logging.error("App type %s error", app_type)
response.ret = pb2.kErrorAppRegisterType
response.message = "App type {} error".format(app_type)
self.send_message(conn, response, msg_name)
elif len(app_id) > APP_ID_MAX_LENGTH:
logging.error("App id %s is too long", app_id)
response.ret = pb2.kErrorOther
response.message = "App id: {} is too long".format(app_id)
self.send_message(conn, response, msg_name)
else:
self.app_manager.register_app(app_id, conn)
response.ret = pb2.kErrorNone
response.message = "Register app {} succeed".format(app_id)
self.send_message(conn, response, msg_name)
return True
return False
def _process_face_result(self, msg_data):
"""
Description: process face_result message
Input:
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
face_result = pb2.FaceResult()
if not self._parse_protobuf(face_result, msg_data):
return False
face_id = face_result.id
if not self.register_dict.get(face_id):
logging.warning("face id %s is already deleted", face_id)
return True
ret = face_result.response.ret
if ret != pb2.kErrorNone:
err_msg = face_result.response.message
logging.error("get face feature error message: %s", err_msg)
status = FACE_REGISTER_STATUS_FAILED
message = "Get face feature failed"
self._update_register_dict(face_id, status, message)
return True
face_num = len(face_result.feature)
if face_num == 0:
status = FACE_REGISTER_STATUS_FAILED
message = "No face recognized"
self._update_register_dict(face_id, status, message)
elif face_num > 1:
status = FACE_REGISTER_STATUS_FAILED
message = "{} faces recognized".format(face_num)
self._update_register_dict(face_id, status, message)
else:
box = face_result.feature[0].box
face_coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_x]
feature_vector = [i for i in face_result.feature[0].vector]
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
status = FACE_REGISTER_STATUS_FAILED
message = "Face feature vector length invalid"
self._update_register_dict(face_id, status, message)
return True
return self._save_face_feature(face_id, face_coordinate,
feature_vector)
return True
def _update_register_dict(self, face_id, status, message):
"""
Description: update register_dict
Input:
face_id: id of face
status: status of face register
message: message of status of face register
Returns: True or False
"""
if self.register_dict.get(face_id):
self.register_dict[face_id]["status"] = status
self.register_dict[face_id]["message"] = message
self.register_dict[face_id]["event"].set()
def _save_face_feature(self, face_id, face_coordinate, feature_vector):
"""
Description: save face_feature
Input:
face_id: id of face
face_coordinate: face coordinates
feature_vector: face feature vector
Returns: True or False
"""
with self.face_lock:
self.registered_faces[face_id] = {
"coordinate": face_coordinate,
"feature": feature_vector
}
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
status = FACE_REGISTER_STATUS_SUCCEED
message = "Successful registration"
self._update_register_dict(face_id, status, message)
return True
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
del self.registered_faces[face_id]
status = FACE_REGISTER_STATUS_FAILED
message = "save face feature to json file failed"
self._update_register_dict(face_id, status, message)
return False
def _process_open_channel(self, conn, msg_data):
"""
Description: process open channel message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = presenter_message_pb2.OpenChannelRequest()
response = presenter_message_pb2.OpenChannelResponse()
if not self._parse_protobuf(request, msg_data):
channel_name = "unknown channel"
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
channel_name = request.channel_name
# check channel name if exist
if not self.channel_manager.is_channel_exist(channel_name):
logging.error("channel name %s is not exist.", channel_name)
err_code = presenter_message_pb2.kOpenChannelErrorNoSuchChannel
return self._response_open_channel(conn, channel_name,
response, err_code)
#ret = self.channel_manager.register_one_channel(channel_name)
# if ret != ChannelManager.err_code_ok:
# logging.error("Create the channel %s failed!, and ret is %d", channel_name, ret)
# err_code = pb2.kOpenChannelErrorOther
# self._response_open_channel(conn, channel_name, response, err_code)
# check channel path if busy
if self.channel_manager.is_channel_busy(channel_name):
logging.error("channel path %s is busy.", channel_name)
err = presenter_message_pb2.kOpenChannelErrorChannelAlreadyOpened
return self._response_open_channel(conn, channel_name,
response, err)
content_type = presenter_message_pb2.kChannelContentTypeVideo
if request.content_type == content_type:
media_type = "video"
else:
logging.error("media type %s is not recognized.",
request.content_type)
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
handler = FacialRecognitionHandler(channel_name, media_type)
sock = conn.fileno()
self.channel_manager.create_channel_resource(channel_name, sock,
media_type, handler)
err_code = presenter_message_pb2.kOpenChannelErrorNone
return self._response_open_channel(conn, channel_name,
response, err_code)
def _process_frame_info(self, conn, msg_data):
"""
Description: process frame info message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.FrameInfo()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
return False
sock_fileno = conn.fileno()
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is None:
logging.error("get channel handler failed")
response.ret = pb2.kErrorOther
response.message = "channel error."
self.send_message(conn, response, msg_name)
return False
face_list = self._recognize_face(request.feature)
handler.save_frame(request.image, face_list)
response.ret = pb2.kErrorNone
response.message = "process frame info suceed."
self.send_message(conn, response, msg_name)
return True
def _recognize_face(self, face_feature):
"""
Description: recognize which face it is.
Input:
face_feature: face feature
Returns: face list
"""
face_list = []
for i in face_feature:
face_info = {}
box = i.box
coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_y]
feature_vector = i.vector
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
continue
(name, score) = self._compute_face_feature(feature_vector)
face_info["coordinate"] = coordinate
face_info["name"] = name
face_info["confidence"] = score
face_list.append(face_info)
return face_list
def _compute_face_feature(self, feture_vector):
"""
Description: compute score of the feture_vector
Input:
feture_vector: face feature vector
Returns: face name and score
"""
highest_score_face = "Unknown"
highest_score = 0
with self.face_lock:
for i in self.registered_faces:
feature = self.registered_faces[i]["feature"]
score = self._compute_similar_degree(feature, feture_vector)
if score < self.face_match_threshold:
continue
if score > highest_score:
highest_score = score
highest_score_face = i
return (highest_score_face, highest_score)
def _compute_similar_degree(self, feture_vector1, feture_vector2):
"""
Description: compute cosine similarity of two vectors
Input:
feture_vector1: face feature vector
feture_vector2: face feature vector
Returns: score
"""
vector1 = np.array(feture_vector1)
vector2 = np.array(feture_vector2)
square_diff = ((np.linalg.norm(vector1)) * (np.linalg.norm(vector2)))
score = np.dot(vector1, vector2) / square_diff
return score
def stop_thread(self):
"""
Description: clean thread when process exit.
Input: NA
Returns: NA
"""
channel_manager = ChannelManager([])
channel_manager.close_all_thread()
self.set_exit_switch()
self.app_manager.set_thread_switch()
class FacialRecognitionManager():
'''Manager of Face Recognition, a class providing APIs'''
__instance = None
server = None
def __init__(self, server=None):
'''init func'''
def __new__(cls, server=None):
"""ensure only a single instance created. """
if cls.__instance is None:
cls.__instance = object.__new__(cls)
cls.server = server
return cls.__instance
def _choose_random_app(self):
"""
Description: choose a random app online.
Input: NA
Returns: a app name
"""
app_list = self.server.list_registered_apps()
if app_list:
index = random.randint(0, len(app_list) - 1)
return app_list[index]
return None
def get_app_list(self):
"""
Description: API for getting online app list
Input: NA
Returns: app list
"""
return self.server.list_registered_apps()
def register_face(self, name, image):
"""
Description: API for registering face
Input:
name: a face name
image: a face picture
Returns: (ret, msg)
"""
# Input para check
if not isinstance(name, str):
return (False, "Name is not string")
if not isinstance(image, bytes):
return (False, "Image is not bytes")
if self._get_face_number() >= self.server.max_face_num:
return (False, "Face number limit")
app_id = self._choose_random_app()
if app_id is None:
return (False, "No app is online")
conn = self.server.get_app_socket(app_id)
if conn is None:
return (False, "Internal Error, app lost socket")
# Prepare sending face register message to agent
request = pb2.FaceInfo()
request.id = name
request.image = image
register_dict = self.server.register_dict
register_dict[name] = {
"status": FACE_REGISTER_STATUS_WAITING,
"message": "",
"event": threading.Event()
}
msg_name = pb2._FACEINFO.full_name
self.server.send_message(conn, request, msg_name)
register_dict[name]["event"].wait(FACE_REGISTER_TIME_OUT)
if register_dict[name]["status"] == FACE_REGISTER_STATUS_WAITING:
logging.warning("Register face %s timeout", name)
del register_dict[name]
return (False, "10 sec Timeout")
if register_dict[name]["status"] == FACE_REGISTER_STATUS_FAILED:
err_msg = register_dict[name]["message"]
logging.error("Register face %s failed, reason:%s",
name, register_dict[name]["message"])
del register_dict[name]
return (False, err_msg)
ret = self.server.save_face_image(name, image)
del register_dict[name]
if ret:
logging.info("Register face %s succeed", name)
return (True, "Successful Registration")
logging.error("Save face %s to database failed", name)
return (False, "Save database error")
def unregister_face(self, name_list):
"""
Description: API for unregistering faces
Input:
name_list: a name list which will be deleted.
Returns: True or False
"""
if isinstance(name_list, list):
return self.server.delete_faces(name_list)
logging.error("unregister face fail")
return False
def get_all_face_name(self):
"""
Description: API for geting all registered face names
Input: NA
Returns: a name list
"""
return self.server.get_all_face()
def _get_face_number(self):
"""
Description: geting total face number
Input: NA
Returns: total face number
"""
return len(self.get_all_face_name())
def get_faces(self, name_list):
"""
Description: API for geting specified face info.
Input: a name list.
Returns: a list include face name and image.
"""
if not isinstance(name_list, list):
return []
face_list = []
for i in name_list:
face_info = {}
face_info["name"] = i
try:
image_file = os.path.join(self.server.storage_dir, i + ".jpg")
face_info["image"] = open(image_file, 'rb').read()
except OSError as exp:
logging.error(exp)
continue
face_list.append(face_info)
return face_list
def run():
'''Face Recognition server startup function'''
# read config file
config = ConfigParser()
# config log
log_file_path = os.path.join(ConfigParser.root_path, "config/logging.conf")
fileConfig(log_file_path)
logging.getLogger('facial_recognition')
if not config.config_verify():
return None
server = FacialRecognitionServer(config)
FacialRecognitionManager(server)
return server
| 36.80764 | 97 | 0.607932 | 23,702 | 0.877982 | 0 | 0 | 0 | 0 | 0 | 0 | 8,968 | 0.332197 |
7e44cb761dc01281c2078c72315a08116eff6a63 | 3,650 | py | Python | e_vae_proj/qualitative/gen_jobs.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | 1 | 2021-06-30T08:58:49.000Z | 2021-06-30T08:58:49.000Z | e_vae_proj/qualitative/gen_jobs.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | null | null | null | e_vae_proj/qualitative/gen_jobs.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | null | null | null | import numpy as np
from pathlib import Path
import sys, os
if __name__ == "__main__":
"""
Jobs:
1) VAE (VAE loss) for data=[dsprites, celeba, chairs]
2) VAE (beta-TC loss with alpha=beta=gamma=1) for data=[dsprites, celeba, chairs]
3) beta-TCVAE for alpha=gamma=[0.5, 1, 2], for beta=[3,6], for data=[dsprites, celeba, chairs]
"""
# absolute path
my_path = Path(__file__).parent.resolve().expanduser()
main_path = my_path.parent.parent.parent
# hypars
cons_list = ["kl", "rec"]
epochs_list = [120, 800, 1200]
seed = 1234
nlat = 64
batchs = 64
lr = 1e-5
n_stddevs = 3
datasets = ["dsprites", "celeba", "chairs"]
alpha_gammas = [0.5, 1, 2]
betas = [3, 6]
# cherry-pick data samples as done in repo
cherry_picked = ["92595 339150 656090",
"88413 176606 179144 32260 191281 143307 101535 70059 87889 131612",
"40919 5172 22330", ]
# .sh filename
fname = my_path / f'run_jobs_1.sh'
# clear .sh file
os.system(f'rm {fname}')
# VAE
for data, epochs, cherries in zip(datasets, epochs_list, cherry_picked):
VAE_cmd = (
# f"python main.py qualitative/VAE_{data}_z{nlat} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l VAE --lr {lr} "
# f'--no-progress-bar -F {str(my_path / f"VAE_{data}_z{nlat}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/VAE_{data}_z{nlat} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
alpha_gamma = 1
beta = 1
BTC_cmd = (
# f"python main.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l btcvae --lr {lr} --btcvae-A {alpha_gamma} --btcvae-B {beta} --btcvae-G {alpha_gamma} "
# f'--no-progress-bar -F {str(my_path / f"btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
with open(fname, 'a') as f:
f.write(VAE_cmd + BTC_cmd)
# beta-TCVAE
for data, epochs, cherries in zip(datasets, epochs_list, cherry_picked):
for alpha_gamma in alpha_gammas:
for beta in betas:
BTC_cmd = (
# f"python main.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} -s {seed} "
# f"--checkpoint-every 50 -d {data} -e {epochs} -b {batchs} "
# f"-z {nlat} -l btcvae --lr {lr} --btcvae-A {alpha_gamma} --btcvae-B {beta} --btcvae-G {alpha_gamma} "
# f'--no-progress-bar -F {str(my_path / f"btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma}.out")} '
# f"--record-loss-every=50 --pin-dataset-gpu \n"
f"python main_viz.py qualitative/btcvae_{data}_z{nlat}_A{alpha_gamma}_B{beta}_G{alpha_gamma} all -i {cherries} "
f"-s {seed} -c 10 -r 10 -t 2 --is-show-loss --is-posterior \n"
)
with open(fname, 'a') as f:
f.write(BTC_cmd)
| 42.44186 | 133 | 0.549041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,189 | 0.599726 |
7e46b7c1bdfe713325687e0706d7633667a0c0ae | 5,818 | py | Python | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 14 | 2015-02-15T05:24:22.000Z | 2020-03-19T10:07:28.000Z | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 10 | 2015-04-04T10:10:41.000Z | 2016-06-01T13:17:58.000Z | iktomi/forms/widgets.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 5 | 2015-02-20T11:18:58.000Z | 2016-10-18T15:30:13.000Z | # -*- coding: utf-8 -*-
from ..utils import weakproxy
from . import convs
class Widget(object):
# obsolete parameters from previous versions
_obsolete = frozenset(['multiple'])
#: Template to render widget
template = None
#: Value of HTML element's *class* attribute
classname = ''
#: describes how the widget is rendered.
#: the following values are supported by default:
#: 'default': label is rendered in usual place
#: 'checkbox': label and widget are rendered close to each other
#: 'full-width': for table-like templates, otherwise should be rendered as default
#: 'hidden': label is not rendered
render_type = 'default'
#: True if widget renders hint itself.
#: Otherwise parent field should render the hint
renders_hint = False
def __init__(self, field=None, **kwargs):
if self._obsolete & set(kwargs):
raise TypeError(
'Obsolete parameters are used: {}'.format(
list(self._obsolete & set(kwargs))))
self.field = weakproxy(field)
self._init_kwargs = kwargs
self.__dict__.update(kwargs)
@property
def multiple(self):
return self.field.multiple
@property
def input_name(self):
return self.field.input_name
@property
def id(self):
return self.field.id
@property
def env(self):
return self.field.env
def prepare_data(self):
'''
Method returning data passed to template.
Subclasses can override it.
'''
value = self.get_raw_value()
return dict(widget=self,
field=self.field,
value=value,
readonly=not self.field.writable)
def get_raw_value(self):
return self.field.raw_value
def render(self):
'''
Renders widget to template
'''
data = self.prepare_data()
if self.field.readable:
return self.env.template.render(self.template, **data)
return ''
def __call__(self, **kwargs):
'''
Creates current object's copy with extra constructor arguments passed.
'''
kwargs = dict(self._init_kwargs, **kwargs)
kwargs.setdefault('field', self.field)
return self.__class__(**kwargs)
class TextInput(Widget):
template = 'widgets/textinput'
classname = 'textinput'
class Textarea(Widget):
template = 'widgets/textarea'
class HiddenInput(Widget):
render_type = 'hidden'
template = 'widgets/hiddeninput'
class PasswordInput(Widget):
template = 'widgets/passwordinput'
classname = 'textinput'
class Select(Widget):
'''
Takes options from :class:`EnumChoice<EnumChoice>` converter,
looks up if converter allows null and passed this value as template
:obj:`required` variable.
'''
template = 'widgets/select'
classname = None
#: HTML select element's select attribute value.
size = None
#: Label assigned to None value if field is not required
null_label = '--------'
def get_options(self, value):
options = []
# XXX ugly
choice_conv = self.field.conv
if isinstance(choice_conv, convs.ListOf):
choice_conv = choice_conv.conv
assert isinstance(choice_conv, convs.EnumChoice)
has_null_value = False
values = value if self.multiple else [value]
for choice, label in choice_conv.options():
has_null_value = has_null_value or choice == ''
options.append(dict(value=choice,
title=label,
selected=(choice in values)))
if not self.multiple and not has_null_value and \
(value == '' or not self.field.conv.required) and \
self.null_label is not None:
options.insert(0, {'value': '',
'title': self.null_label,
'selected': value in (None, '')})
return options
def prepare_data(self):
data = Widget.prepare_data(self)
return dict(data,
options=self.get_options(data['value']),
required=('true' if self.field.conv.required else 'false'))
class CheckBoxSelect(Select):
classname = 'select-checkbox'
template = 'widgets/select-checkbox'
class CheckBox(Widget):
render_type = 'checkbox'
template = 'widgets/checkbox'
class CharDisplay(Widget):
template = 'widgets/span'
classname = 'chardisplay'
#: If is True, value is escaped while rendering.
#: Passed to template as :obj:`should_escape` variable.
escape = True
#: Function converting the value to string.
getter = staticmethod(lambda v: v)
def prepare_data(self):
data = Widget.prepare_data(self)
return dict(data,
value=self.getter(data['value']),
should_escape=self.escape)
class AggregateWidget(Widget):
def get_raw_value(self):
return None
class FieldListWidget(AggregateWidget):
allow_create = True
allow_delete = True
template = 'widgets/fieldlist'
def render_template_field(self):
# used in iktomi.cms: templates/widgets/fieldlist.html
field = self.field.field(name='%'+self.field.input_name+'-index%')
# XXX looks like a HACK
field.set_raw_value(self.field.form.raw_data,
field.from_python(field.get_initial()))
return field.widget.render()
class FieldSetWidget(AggregateWidget):
template = 'widgets/fieldset'
class FieldBlockWidget(FieldSetWidget):
render_type = 'full-width'
class FileInput(Widget):
template = 'widgets/file'
| 26.688073 | 86 | 0.611894 | 5,700 | 0.979718 | 0 | 0 | 254 | 0.043658 | 0 | 0 | 1,733 | 0.297869 |
7e4795cb214be38d91b14447a6cab578ba864108 | 1,198 | py | Python | Codes/python/py1/exs (4)/ex10.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | Codes/python/py1/exs (4)/ex10.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | Codes/python/py1/exs (4)/ex10.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | from ex3 import cont, intersec
from ex4 import iguais
from ex5 import iguais1
from ex6 import conta, lervetor
from ex7 import achar, ler_vetorx
from utility import ler_vetor
def menuprinc():
opcao = int(
input('''
Escolha um programa:
1 - ex3
2 - ex4
3 - ex5
4 - ex6
5 - ex7
0 - exit
Escolha: '''))
if opcao == 1:
print('Conjunto A:')
A = ler_vetor()
print('Conjunto B:')
B = ler_vetor()
print('Intersecção:',intersec(A,B))
elif opcao == 2:
vetor = ler_vetor()
vetor2 = ler_vetor()
print("Os elementos apenas do vetor um são: ", iguais(vetor, vetor2))
elif opcao == 3:
vetor = ler_vetor()
vetor2 = ler_vetor()
print("A união dos dois vetores é ", iguais(vetor, vetor2))
elif opcao == 4:
vetor=[0]*5
lervetor(vetor)
print("A quantidade de valores diferentes que existem no vetor é de: ",conta(vetor))
elif opcao == 5:
v = [0] * 5
ler_vetorx(v)
print("O X está na posição", achar(v))
elif opcao == 0:
return print("fim")
else:
print("\nEste número não está nas alternativas, tente novamente :D.\n")
menuprinc()
| 21.017544 | 90 | 0.590985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.3 |
7e47a45aa4626c8612e5e79d872162e8912b39d8 | 293 | py | Python | warehouse.py | Arthraim/warehouse | 3905635217f679eb9c3ace00b016668588f6bb98 | [
"MIT"
] | 1 | 2015-01-14T08:06:07.000Z | 2015-01-14T08:06:07.000Z | warehouse.py | Arthraim/warehouse | 3905635217f679eb9c3ace00b016668588f6bb98 | [
"MIT"
] | null | null | null | warehouse.py | Arthraim/warehouse | 3905635217f679eb9c3ace00b016668588f6bb98 | [
"MIT"
] | null | null | null | __author__ = 'arthur'
from PyQt5.QtCore import *
from models import *
class WareHouse(QObject):
def __init__(self):
super(QObject, self).__init__()
@pyqtSlot(result=QVariant)
def all_cargo(self):
return QVariant(map(lambda x: x.javascriptify(), Cargo.select())) | 22.538462 | 73 | 0.682594 | 220 | 0.750853 | 0 | 0 | 125 | 0.426621 | 0 | 0 | 8 | 0.027304 |
7e48637038313d81e9c4efaa9dd4ea2b3a9e9145 | 391 | py | Python | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | yadil/image/face_model.py | neolaw84/yadil | 9f56a1beec5359e0233ae342f01263b29f752bc7 | [
"Apache-2.0"
] | null | null | null | # downloaded from https://raw.githubusercontent.com/TadasBaltrusaitis/OpenFace/master/lib/local/LandmarkDetector/model/pdms/In-the-wild_aligned_PDM_68.txt
import pickle
import pathlib
THIS_FILE_PATH = pathlib.Path(__file__)
MODEL_FILE_PATH = pathlib.Path.joinpath(THIS_FILE_PATH.parent, "face_model.bin")
with open(MODEL_FILE_PATH, "rb") as f:
model_points = pickle.load(f)
| 35.545455 | 155 | 0.790281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.44757 |
7e4a01f31bce92467bfbd67774fb60815a83be86 | 567 | py | Python | Backend/scripts/average_block_rate.py | zarif007/Block-Chain-Web-App | 40bd4d8d8ce1f6de2840792290bf022d7dfacbb4 | [
"MIT"
] | 1 | 2020-12-30T09:30:23.000Z | 2020-12-30T09:30:23.000Z | Backend/scripts/average_block_rate.py | zarif007/Block-Chain-Web-App | 40bd4d8d8ce1f6de2840792290bf022d7dfacbb4 | [
"MIT"
] | null | null | null | Backend/scripts/average_block_rate.py | zarif007/Block-Chain-Web-App | 40bd4d8d8ce1f6de2840792290bf022d7dfacbb4 | [
"MIT"
] | null | null | null | import time
from backend.blockchain.Blockchain import Blockchain
from backend.config import SECONDS
blockchain = Blockchain()
times = []
for i in range(1000):
start_time = time.time_ns()
blockchain.add_block(i)
end_time = time.time_ns()
time_to_mine = (end_time - start_time) / SECONDS
times.append(time_to_mine)
average_time = sum(times) / len(times)
print(f'New block difficulty : {blockchain.chain[-1].difficulty}')
print(f'Time to mine a block: {time_to_mine}s')
print(f'Average time to add a block: {average_time}s\n')
| 24.652174 | 70 | 0.708995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.261023 |
7e4b1888606ceb34b0d7457d5881f8e99340c1b5 | 12,193 | py | Python | python/tdapi/td_oauth.py | chaelim/ExcelRTD | 2a8e5860d1f3f091261f08d49bc1f426038a9aa7 | [
"MIT"
] | 5 | 2020-07-18T20:39:06.000Z | 2021-12-29T13:15:18.000Z | python/tdapi/td_oauth.py | chaelim/ExcelRTD | 2a8e5860d1f3f091261f08d49bc1f426038a9aa7 | [
"MIT"
] | 2 | 2020-08-01T07:31:41.000Z | 2020-08-05T01:38:29.000Z | python/tdapi/td_oauth.py | chaelim/ExcelRTD | 2a8e5860d1f3f091261f08d49bc1f426038a9aa7 | [
"MIT"
] | 3 | 2020-09-08T17:28:31.000Z | 2021-09-29T20:42:13.000Z | from requests_oauthlib import OAuth2Session
from flask import Flask, request, redirect, session, url_for
from flask.json import jsonify
import logging
import datetime
import json
import os
import pickle
import requests
import time
import win32crypt
from typing import Dict
from typing import List
if __package__:
from .td_config import APPDATA_PATH, CLIENT_ID, CLIENT_ID_AUTH, REDIRECT_URI, AUTHORIZATION_BASE_URL, TOKEN_URL, TOKEN_FILE_NAME
from .td_config import USERPRINCIPALS_FILE_NAME, CREDENTIALS_FILE_NAME, API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT
else:
from td_config import APPDATA_PATH, CLIENT_ID, CLIENT_ID_AUTH, REDIRECT_URI, AUTHORIZATION_BASE_URL, TOKEN_URL, TOKEN_FILE_NAME
from td_config import USERPRINCIPALS_FILE_NAME, CREDENTIALS_FILE_NAME, API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
app = Flask(__name__)
if not os.path.exists(APPDATA_PATH):
os.makedirs(APPDATA_PATH)
@app.route("/")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
td_session = OAuth2Session(
client_id=CLIENT_ID_AUTH,
redirect_uri=REDIRECT_URI
)
authorization_url, state = td_session.authorization_url(AUTHORIZATION_BASE_URL)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
td_session = OAuth2Session(
client_id=CLIENT_ID_AUTH,
redirect_uri=REDIRECT_URI,
state=session['oauth_state']
)
token = td_session.fetch_token(
TOKEN_URL,
access_type='offline',
authorization_response=request.url,
include_client_id=True
)
# At this point you can fetch protected resources but lets save
# the token and show how this is done from a persisted token
# in /profile.
session['oauth_token'] = token
save_token(token)
# Grab the Streamer Info.
#userPrincipalsResponse = get_user_principals(
# token,
# fields=['streamerConnectionInfo', 'streamerSubscriptionKeys', 'preferences', 'surrogateIds'])
#if userPrincipalsResponse:
# save_credentials(userPrincipalsResponse)
return redirect(url_for('shutdown'))
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return '<html><head>Server shutting down...</head><body>Now you can close this and go back to Excel</body></html>'
@app.route("/profile", methods=["GET"])
def profile():
"""Fetching a protected resource using an OAuth 2 token.
"""
td_session = OAuth2Session(CLIENT_ID, token=session['oauth_token'])
return jsonify(td_session.get('https://api.td_session.com/user').json())
def get_token():
pass
def get_user_principals(token, fields: List[str]) -> Dict:
"""Returns User Principal details.
Documentation:
----
https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0
Arguments:
----
fields: A comma separated String which allows one to specify additional fields to return. None of
these fields are returned by default. Possible values in this String can be:
1. streamerSubscriptionKeys
2. streamerConnectionInfo
3. preferences
4. surrogateIds
Usage:
----
>>> td_client.get_user_principals(fields=['preferences'])
>>> td_client.get_user_principals(fields=['preferences','streamerConnectionInfo'])
"""
# define the endpoint
endpoint = 'userprincipals'
# build the params dictionary
params = {
'fields': ','.join(fields)
}
parts = [self.API_ENDPOINT, self.API_VERSION, endpoint]
url = '/'.join(parts)
headers = {
'Authorization': 'Bearer {token}'.format(token=token['access_token'])
}
# Define a new session.
request_session = requests.Session()
request_session.verify = True
# Define a new request.
request_request = requests.Request(
method='GET',
headers=headers,
url=url,
params=params,
).prepare()
# Send the request.
response: requests.Response = request_session.send(request=request_request)
request_session.close()
# grab the status code
status_code = response.status_code
# grab the response headers.
response_headers = response.headers
if response.ok:
return response.json()
else:
return None
def load_token():
try:
with open(TOKEN_FILE_NAME, 'rb') as encoded_file:
encoded_data = encoded_file.read()
token_data = json.loads(win32crypt.CryptUnprotectData(encoded_data)[1].decode())
return token_data
except Exception as e:
return None
def save_token(token_dict: dict) -> bool:
# make sure there is an access token before proceeding.
if 'access_token' not in token_dict:
return False
token_data = {}
# save the access token and refresh token
token_data['access_token'] = token_dict['access_token']
token_data['refresh_token'] = token_dict['refresh_token']
# store token expiration time
access_token_expire = time.time() + int(token_dict['expires_in'])
refresh_token_expire = time.time() + int(token_dict['refresh_token_expires_in'])
token_data['access_token_expires_at'] = access_token_expire
token_data['refresh_token_expires_at'] = refresh_token_expire
token_data['access_token_expires_at_date'] = datetime.datetime.fromtimestamp(access_token_expire).isoformat()
token_data['refresh_token_expires_at_date'] = datetime.datetime.fromtimestamp(refresh_token_expire).isoformat()
token_data['logged_in'] = True
token_json = json.dumps(token_data)
try:
with open(TOKEN_FILE_NAME, 'wb') as encoded_file:
enc = win32crypt.CryptProtectData(token_json.encode())
encoded_file.write(enc)
except Exception as e:
return False
return True
def save_credentials(userPrincipalsResponse):
# Grab the timestampe.
tokenTimeStamp = userPrincipalsResponse['streamerInfo']['tokenTimestamp']
# Grab socket
socket_url = userPrincipalsResponse['streamerInfo']['streamerSocketUrl']
# Parse the token timestamp.
token_timestamp = datetime.datetime.strptime(tokenTimeStamp, "%Y-%m-%dT%H:%M:%S%z")
tokenTimeStampAsMs = int(token_timestamp.timestamp()) * 1000
# Define our Credentials Dictionary used for authentication.
credentials = {
"userid": userPrincipalsResponse['accounts'][0]['accountId'],
"token": userPrincipalsResponse['streamerInfo']['token'],
"company": userPrincipalsResponse['accounts'][0]['company'],
"segment": userPrincipalsResponse['accounts'][0]['segment'],
"cddomain": userPrincipalsResponse['accounts'][0]['accountCdDomainId'],
"usergroup": userPrincipalsResponse['streamerInfo']['userGroup'],
"accesslevel": userPrincipalsResponse['streamerInfo']['accessLevel'],
"authorized": "Y",
"timestamp": tokenTimeStampAsMs,
"appid": userPrincipalsResponse['streamerInfo']['appId'],
"acl": userPrincipalsResponse['streamerInfo']['acl']
}
with open(file=USERPRINCIPALS_FILE_NAME, mode='w+') as json_file:
json.dump(obj=userPrincipalsResponse, fp=json_file, indent=4)
with open(file=CREDENTIALS_FILE_NAME, mode='w+') as json_file:
json.dump(obj=credentials, fp=json_file, indent=4)
def _token_seconds(token_data, token_type: str = 'access_token') -> int:
"""Determines time till expiration for a token.
Return the number of seconds until the current access token or refresh token
will expire. The default value is access token because this is the most commonly used
token during requests.
Arguments:
----
token_type {str} -- The type of token you would like to determine lifespan for.
Possible values are ['access_token', 'refresh_token'] (default: {access_token})
Returns:
----
{int} -- The number of seconds till expiration.
"""
# if needed check the access token.
if token_type == 'access_token':
# if the time to expiration is less than or equal to 0, return 0.
if not token_data['access_token'] or time.time() + 60 >= token_data['access_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(token_data['access_token_expires_at'] - time.time() - 60)
# if needed check the refresh token.
elif token_type == 'refresh_token':
# if the time to expiration is less than or equal to 0, return 0.
if not token_data['refresh_token'] or time.time() + 60 >= token_data['refresh_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(token_data['refresh_token_expires_at'] - time.time() - 60)
return token_exp
def grab_refresh_token(access_token, refresh_token) -> bool:
"""Refreshes the current access token.
This takes a valid refresh token and refreshes
an expired access token.
Returns:
----
{bool} -- `True` if successful, `False` otherwise.
"""
# build the parameters of our request
data = {
'client_id': CLIENT_ID_AUTH,
'grant_type': 'refresh_token',
'access_type': 'offline',
'refresh_token': refresh_token
}
# build url: https://api.tdameritrade.com/v1/oauth2/token
parts = [API_ENDPOINT, API_VERSION, TOKEN_ENDPOINT]
url = '/'.join(parts)
# Define a new session.
request_session = requests.Session()
request_session.verify = True
headers = { 'Content-Type': 'application/x-www-form-urlencoded' }
# Define a new request.
request_request = requests.Request(
method='POST',
headers=headers,
url=url,
data=data
).prepare()
# Send the request.
response: requests.Response = request_session.send(request=request_request)
request_session.close()
if response.ok:
save_token(response.json())
return True
return False
def silent_sso() -> bool:
try:
token_data = load_token()
# if the current access token is not expired then we are still authenticated.
if _token_seconds(token_data, token_type='access_token') > 0:
return True
# if the refresh token is expired then you have to do a full login.
elif _token_seconds(token_data, token_type='refresh_token') <= 0:
return False
# if the current access token is expired then try and refresh access token.
elif token_data['refresh_token'] and grab_refresh_token(token_data['access_token'], token_data['refresh_token']):
return True
except Exception as e:
print(repr(e))
return False
return True
def _run_full_oauth() -> None:
import webbrowser
webbrowser.open_new_tab('https://localhost:8080/')
app.secret_key = os.urandom(24)
app.run(ssl_context='adhoc', host="localhost", port=8080, debug=False)
def run_full_oauth_subprocess() -> None:
from subprocess import run
run(["python", os.path.realpath(__file__)], cwd= os.path.dirname(os.path.realpath(__file__)))
if __name__ == "__main__":
import sys
# Check if current token is valid
if silent_sso():
sys.exit(0)
else:
_run_full_oauth() | 32.342175 | 132 | 0.686623 | 0 | 0 | 0 | 0 | 2,165 | 0.177561 | 0 | 0 | 5,035 | 0.412942 |
7e4b3dfe702da16b414ccbfbae1fff18ac2129ef | 5,083 | py | Python | visitpy/visit_utils/tests/test_qannote_basic.py | ahota/visit_ospray | d80b2e18ff5654d04bfb56ae4d6f42e45f87c9b9 | [
"BSD-3-Clause"
] | null | null | null | visitpy/visit_utils/tests/test_qannote_basic.py | ahota/visit_ospray | d80b2e18ff5654d04bfb56ae4d6f42e45f87c9b9 | [
"BSD-3-Clause"
] | null | null | null | visitpy/visit_utils/tests/test_qannote_basic.py | ahota/visit_ospray | d80b2e18ff5654d04bfb56ae4d6f42e45f87c9b9 | [
"BSD-3-Clause"
] | null | null | null | #*****************************************************************************
#
# Copyright (c) 2000 - 2014, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# LLNL-CODE-442911
# All rights reserved.
#
# This file is part of VisIt. For details, see https://visit.llnl.gov/. The
# full copyright notice is contained in the file COPYRIGHT located at the root
# of the VisIt distribution or at http://www.llnl.gov/visit/copyright.html.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#*****************************************************************************
"""
author: Cyrus Harrison (cyrush@llnl.gov)
description:
Tests for qannote module.
"""
import unittest
import os
from os.path import join as pjoin
from visit_test import *
from visit_utils.qannote import *
try:
import PySide.QtCore
except:
pass
output_dir = pjoin(os.path.split(__file__)[0],"_output")
data_dir = pjoin(os.path.split(__file__)[0],"_data")
def out_path(fname):
odir = pjoin(output_dir,"qannote")
if not os.path.isdir(odir):
os.mkdir(odir)
return pjoin(odir,fname)
class TestBasic(unittest.TestCase):
def setUp(self):
txt = Text( {"txt": "Text Overlay!",
"x": 100,
"y": 200,
"color": (255,255,255,255),
"vz":"center",
"hz":"center",
"font/name": "Times New Roman",
"font/size": 22})
img = Image( {"image":pjoin(data_dir,"blue.box.png"),
"x": 130, "y": 180})
arr = Arrow( {"x0": 10, "y0":10,
"x1":100,"y1":175,"tip_len":20})
rect = Rect( {"x":400,"y":400,
"w":100,"h":200,
"color":(0,255,0,255)})
box = Rect( {"x":200,"y":200,
"w":100,"h":100,
"color":(0,255,0,255),"outline":True})
self.items = [img,txt,arr,rect,box]
@pyside_test
def test_00_basic(self):
test_output = out_path("test.basic.00.png")
Canvas.render(self.items,(600,600),test_output)
@pyside_test
def test_01_basic(self):
test_output = out_path("test.basic.01.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png")})
items = [bg]
items.extend(self.items)
Canvas.render(items,bg.size(),test_output)
@pyside_test
def test_02_view(self):
test_output = out_path("test.basic.02.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png"),
"x":-10,"y":-10})
items = [bg]
items.extend(self.items)
sz = bg.size()
Canvas.render(items,sz,test_output,(-10,-10,sz[0],sz[1]))
@pyside_test
def test_03_textbox(self):
test_output = out_path("test.basic.03.png")
bg = Image( {"image":pjoin(data_dir,"black.bg.png"),
"x":-10,"y":-10})
items = [bg]
txt = "Testing text box with wrap capability with a long sentence.\nWith some new lines for good measure.\nFinal."
items.append(TextBox({"x":200,"y":200,
"w":300,"h":200,
"font/size":20,
"txt":txt}))
sz = bg.size()
Canvas.render(items,sz,test_output,(-10,-10,sz[0],sz[1]))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 40.664 | 122 | 0.587842 | 2,334 | 0.459178 | 0 | 0 | 1,363 | 0.268149 | 0 | 0 | 2,749 | 0.540822 |
7e4b68c4767ac112003a850f22261b677c1669da | 6,961 | py | Python | searchengine.py | ayser259/searchengine | c8cd07eb90dae876fc738e7ebc3609daa0ee9ef7 | [
"MIT"
] | null | null | null | searchengine.py | ayser259/searchengine | c8cd07eb90dae876fc738e7ebc3609daa0ee9ef7 | [
"MIT"
] | null | null | null | searchengine.py | ayser259/searchengine | c8cd07eb90dae876fc738e7ebc3609daa0ee9ef7 | [
"MIT"
] | null | null | null | # This program runs the search engine program
import ast,sys,os,json, time, re,math
from objects import *
import lexicon_engine as lexicon_engine
from snippet_engine import *
def bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id):
# Retrieving Query from user
print()
query = input("Search: ")
c = time.time()
query = query.lower()
query = re.sub(r'\W+', ' ', query)
query_list = query.split()
k1 = 1.2
k2 = 7
b = 0.75
doc_id_to_bm25_score = {}
bm25_score = 0
sorted_doc_id_to_bm25_score_keys = []
for term in query_list:
try:
term_id = int(tokens_to_id[term])
postings_list = inverted_index[term_id] # [{doc_id:count}]
no_of_rel_documents = len(postings_list)/2
for i in range(0,len(postings_list)):
posting = postings_list[i] # {doc_id:count}
doc_id = list(postings_list[i].keys())
doc_id = doc_id[0]
term_count_in_doc = int(posting[doc_id])
current_file_meta_data = metadata()
qf = 1
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(int(doc_id)))
k = k1*((1-b)+b*(float(current_file_meta_data.doc_length)/float(average_word_count)))
try:
bm25_score = doc_id_to_bm25_score[int(doc_id)]
except:
bm25_score = bm25_score
bm25_score = bm25_score + ((((k1+1)*term_count_in_doc)/float((k+term_count_in_doc))*(((k2+1)*qf)/(k2+qf)))*(math.log((collection_size-no_of_rel_documents+0.5)/(no_of_rel_documents+0.5))))
doc_id_to_bm25_score[int(doc_id)] = bm25_score
bm25_score = 0
except:
bm25_score = 0
sorted_doc_id_to_bm25_score_keys = sorted(doc_id_to_bm25_score,key=doc_id_to_bm25_score.get,reverse=True)
sorted_doc_id_to_bm25_score_keys = list(sorted_doc_id_to_bm25_score_keys[:10])
rank_to_docno = {} # {rank:docno}
rank_counter = 0
for doc_id in sorted_doc_id_to_bm25_score_keys:
rank_counter +=1
docno = internal_id_to_docno[int(doc_id)]
rank_to_docno[rank_counter] = docno
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(int(doc_id)))
# Print results
headline = str(current_file_meta_data.headline).strip()
if len(headline)<1:
headline = first_x_characters(directory,current_file_meta_data,50)
print_string = str(rank_counter)+". "+headline+" ("+str(current_file_meta_data.date)+")"
print(print_string)
print()
snippet = top_3_lines(directory,current_file_meta_data,query_list)
print(snippet)
print()
t = time.time()
total = t - c
print("Retrieval performed in "+str(total)+" seconds")
return rank_to_docno
def read_doc(rank_to_docno,rank,doc_no_to_internal_id,internal_id_to_metadata,directory):
# Print document to screan given rank
docno = rank_to_docno[rank]
print("Docno")
print(docno)
file_internal_id = doc_no_to_internal_id.get(docno)
file_internal_id = int(file_internal_id)
# Getting from internal_id to meta_data
current_file_meta_data = metadata()
current_file_meta_data = current_file_meta_data.create_meta_data(internal_id_to_metadata.get(file_internal_id))
current_file_path = str(directory) + "/" + str(current_file_meta_data.date)+"/"+str(current_file_meta_data.internal_id)+".txt"
# Printing out requested data
current_file = open(current_file_path)
print("Requested File:")
for line in current_file:
print(line)
print("Loading Search Engine...")
try:
c = time.time()
directory = "/Users/ayser/Dropbox/Waterloo/3A/Courses/Course_Projects/msci_541/la_times_files"
# Loading all relevant information
# Getting docno_to_internal_id
docno_to_internal_id_file_path = directory+"/"+"doc_no_to_internal_id.txt"
docno_to_internal_id_file = open(docno_to_internal_id_file_path, "r")
doc_no_to_internal_id_string = ""
for line in docno_to_internal_id_file:
doc_no_to_internal_id_string = doc_no_to_internal_id_string +line
json_as_string = doc_no_to_internal_id_string.replace("'", "\"")
doc_no_to_internal_id = json.loads(json_as_string)
# Getting internal_id_to_docno
internal_id_to_docno = {}
doc_keys = list(doc_no_to_internal_id.keys())
for key in doc_keys:
internal_id_to_docno[doc_no_to_internal_id[key]] = key
# Loading internal id to metadata mapping
internal_id_to_meta_data_file_path = directory+"/"+"internal_id_to_meta_data.txt"
internal_id_to_meta_data_file = open(internal_id_to_meta_data_file_path, "r")
internal_id_to_metadata_string = ""
for line in internal_id_to_meta_data_file:
internal_id_to_metadata_string = internal_id_to_metadata_string + line
internal_id_to_metadata = ast.literal_eval(internal_id_to_metadata_string)
# Loading inverted_index
inverted_index = lexicon_engine.read_inverted_index(directory)
# Loading tokens_to_id
tokens_to_id = lexicon_engine.read_tokens_to_id(directory)
#Loading collection data
collection_data_file = directory+"/"+"collection_info.txt"
collection_data_file = open(collection_data_file,"r")
average_word_count = 0
collection_size = 0
for line in collection_data_file:
average_word_count = int(float((line[0:line.index("_")])))
collection_size = int(float((line[line.index("_")+1:])))
t = time.time()
total = t - c
print("All data loaded in "+str(total)+" seconds")
print("Program Ready..")
exit = False
rank_to_docno = bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id)
while exit == False:
try:
print("To read one of the above documents, enter the rank of the document (1-10)")
print("To perform another search, Enter 'N' ")
print("To exit the program, Enter 'Q' ")
query_list = [1,2,3,4,5,6,7,8,9,10]
command = input("Enter Command: ")
if command == "N":
print("Enter New Query")
# Now retrieving documents, displaying results and creating snippets
rank_to_docno = bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id)
elif command =="Q":
exit = True
elif int(command) in query_list:
read_doc(rank_to_docno,int(command),doc_no_to_internal_id,internal_id_to_metadata,directory)
print()
else:
print("Error 2: Input incorrectly formatted, try again")
except:
print("Error 3: Input incorrectly formatted, try again")
except:
print("Error 1")
| 41.933735 | 203 | 0.679931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,178 | 0.169229 |
7e4d650deef322e53bf8afec40c9a2d1620153ca | 4,049 | py | Python | dedup_parellel.py | darrenkoh/File-Dedupe | 014eb42dd36426da641aa62376425a12ddd6e2fc | [
"MIT"
] | null | null | null | dedup_parellel.py | darrenkoh/File-Dedupe | 014eb42dd36426da641aa62376425a12ddd6e2fc | [
"MIT"
] | null | null | null | dedup_parellel.py | darrenkoh/File-Dedupe | 014eb42dd36426da641aa62376425a12ddd6e2fc | [
"MIT"
] | null | null | null | # Given a folder, scan all files and group file with same content
import sys
import os
import shutil
import hashlib
import time
from collections import defaultdict
import multiprocessing
from functools import partial
import time
def ExtractFingerprint(filename, bit):
size = os.path.getsize(filename)
sign = None
signature = hashlib.sha1()
signatureStep = size // bit
with open(filename, "rb") as f:
if signatureStep > 0:
stepIndex = signatureStep
while stepIndex < size:
f.seek(stepIndex)
signature.update(f.read(1))
stepIndex += signatureStep
elif size > 0:
signature.update(f.read())
f.close()
if size > 0:
sign = signature.hexdigest()
return sign
def CreateDir(path):
if not os.path.exists(path):
os.makedirs(path)
def Process(filename, dirpath, dic, autoDelete):
start = time.time()
filename = os.path.join(dirpath, filename)
signature = ExtractFingerprint(filename, 512)
if signature:
dic[signature].append(filename)
if len(dic[signature]) > 1:
CreateDir(os.path.join("output", signature))
with open(os.path.join("output", signature ,signature +".txt"), "a+") as f:
print("\n",signature)
print("\n".join(dic[signature]))
if autoDelete:
CreateDir(os.path.join("deleted",signature))
moveDeletedFileTo = os.path.join("deleted",signature)
if len(dic[signature]) == 2:
if autoDelete:
shutil.move(filename,moveDeletedFileTo)
f.write(dic[signature][0] + "\n")
f.write("[DELETED] " + dic[signature][1] + "\n")
else:
shutil.copy(filename,os.path.join("output", signature))
shutil.copy(dic[signature][0],os.path.join("output", signature))
f.write("\n".join(dic[signature]) + "\n")
else:
if autoDelete:
shutil.move(filename,moveDeletedFileTo)
f.write("[DELETED] " + filename + "\n")
else:
shutil.copy(filename,os.path.join("output", signature))
f.write(filename + "\n")
f.close()
return (time.time() - start) * 1000
if __name__ == '__main__':
dics = defaultdict(list)
multiprocessing.managers.DictProxy
folderPath = sys.argv[sys.argv.index("-p")+1] if "-p" in sys.argv else ""
isAutoDelete = "-d" in sys.argv
signature_size = 512
if "-b" in sys.argv:
signature_size = int(sys.argv[sys.argv.index("-b")+1])
cpuCount = multiprocessing.cpu_count()
print("Using Signature Size of ", signature_size)
print("Using CPU Cores of ", cpuCount)
if len(sys.argv) == 1 or not os.path.isdir(folderPath):
print("Run with python \"dedup.py [parameters]\"")
print("[Param] -p = \"Path to scan for duplicates\"")
print("[Optional Param] -d = \"Delete Duplicate\"")
print("[Optional Param] -b number = \"Change the default signature size of 512 bits\"")
exit(0)
CreateDir("output")
if isAutoDelete:
CreateDir("deleted")
processedCount = 0
average = 0
with multiprocessing.Pool(processes=cpuCount) as pool:
for (dirpath, dirnames, filenames) in os.walk(folderPath):
if len(filenames) > 0:
sys.stdout.write("Scanning: %d Total: %d Dic-Size: %d Avg-Duration: %d \r" % (len(filenames), processedCount, len(dics), average))
process_local = partial(Process, dirpath=dirpath, dic=dics, autoDelete=isAutoDelete)
result = pool.map(process_local, filenames)
totalResult = len(result)
processedCount += totalResult
average = sum(result) / totalResult
| 37.490741 | 146 | 0.566313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 559 | 0.138059 |
7e4d7f3b009cccdbe2d7489cc9b3f8c5c3da1723 | 10,226 | py | Python | snorkel/learning/structure/synthetic.py | Conengmo/snorkel | 36868e8a84de19b94e1c4b8eceaa64969a61a46b | [
"Apache-2.0"
] | 30 | 2019-08-22T19:27:59.000Z | 2022-03-13T22:03:15.000Z | snorkel/learning/structure/synthetic.py | Conengmo/snorkel | 36868e8a84de19b94e1c4b8eceaa64969a61a46b | [
"Apache-2.0"
] | 2 | 2019-08-22T16:51:58.000Z | 2022-03-21T02:59:18.000Z | snorkel/learning/structure/synthetic.py | Conengmo/snorkel | 36868e8a84de19b94e1c4b8eceaa64969a61a46b | [
"Apache-2.0"
] | 31 | 2019-08-22T19:28:08.000Z | 2022-03-23T12:50:49.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from numbskull import NumbSkull
from numbskull.inference import FACTORS
from numbskull.numbskulltypes import Weight, Variable, Factor, FactorToVar
import numpy as np
import random
import scipy.sparse as sparse
from snorkel.learning import GenerativeModel, GenerativeModelWeights
def generate_model(n, dep_density, class_prior=False, lf_propensity=False, lf_prior=False, lf_class_propensity=False,
dep_similar=False, dep_reinforcing=False, dep_fixing=False, dep_exclusive=False, force_dep=False):
weights = GenerativeModelWeights(n)
for i in range(n):
weights.lf_accuracy[i] = 1.1 - 0.2 * random.random()
if class_prior:
weights.class_prior = random.choice((-1.0, -2.0))
if lf_propensity:
for i in range(n):
weights.lf_propensity[i] = random.choice((-1.0, -2.0))
if lf_prior:
for i in range(n):
weights.lf_prior[i] = random.choice((1.0, -1.0))
if lf_class_propensity:
for i in range(n):
weights.lf_class_propensity[i] = random.choice((1.0, -1.0))
if dep_similar:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
weights.dep_similar[i, j] = 0.25
if dep_fixing:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
if random.random() < 0.5:
weights.dep_fixing[i, j] = 0.25
else:
weights.dep_fixing[j, i] = 0.25
if dep_reinforcing:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
if random.random() < 0.5:
weights.dep_reinforcing[i, j] = 0.25
else:
weights.dep_reinforcing[j, i] = 0.25
if dep_exclusive:
for i in range(n):
for j in range(i+1, n):
if random.random() < dep_density:
weights.dep_exclusive[i, j] = 0.25
if force_dep and weights.dep_similar.getnnz() == 0 and weights.dep_fixing.getnnz() == 0 \
and weights.dep_reinforcing.getnnz() == 0 and weights.dep_exclusive.getnnz() == 0:
return generate_model(n, dep_density, class_prior=class_prior, lf_propensity=lf_propensity, lf_prior=lf_prior,
lf_class_propensity=lf_class_propensity, dep_similar=dep_similar, dep_fixing=dep_fixing,
dep_reinforcing=dep_reinforcing, dep_exclusive=dep_exclusive, force_dep=True)
else:
return weights
def generate_label_matrix(weights, m):
# Compilation
# Weights
n_weights = 1 if weights.class_prior != 0.0 else 0
n_weights += weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
n_weights += 1
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
n_weights += 1
weight = np.zeros(n_weights, Weight)
for i in range(len(weight)):
weight[i]['isFixed'] = True
if weights.class_prior != 0.0:
weight[0]['initialValue'] = np.float64(weights.class_prior)
w_off = 1
else:
w_off = 0
for i in range(weights.n):
weight[w_off + i]['initialValue'] = np.float64(weights.lf_accuracy[i])
w_off += weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
weight[w_off]['initialValue'] = np.float64(getattr(weights, optional_name)[i])
w_off += 1
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
weight[w_off]['initialValue'] = np.float64(getattr(weights, dep_name)[i, j])
w_off += 1
# Variables
variable = np.zeros(1 + weights.n, Variable)
variable[0]['isEvidence'] = 0
variable[0]['initialValue'] = 0
variable[0]["dataType"] = 0
variable[0]["cardinality"] = 2
for i in range(weights.n):
variable[1 + i]['isEvidence'] = 0
variable[1 + i]['initialValue'] = 0
variable[1 + i]["dataType"] = 0
variable[1 + i]["cardinality"] = 3
# Factors and FactorToVar
n_edges = 1 if weights.class_prior != 0.0 else 0
n_edges += 2 * weights.n
for optional_name in GenerativeModel.optional_names:
for i in range(weights.n):
if getattr(weights, optional_name)[i] != 0.0:
if optional_name == 'lf_prior' or optional_name == 'lf_propensity':
n_edges += 1
elif optional_name == 'lf_class_propensity':
n_edges += 2
else:
raise ValueError()
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
if dep_name == 'dep_similar' or dep_name == 'dep_exclusive':
n_edges += 2
elif dep_name == 'dep_fixing' or dep_name == 'dep_reinforcing':
n_edges += 3
else:
raise ValueError()
factor = np.zeros(n_weights, Factor)
ftv = np.zeros(n_edges, FactorToVar)
if weights.class_prior != 0.0:
factor[0]["factorFunction"] = FACTORS["DP_GEN_CLASS_PRIOR"]
factor[0]["weightId"] = 0
factor[0]["featureValue"] = 1
factor[0]["arity"] = 1
factor[0]["ftv_offset"] = 0
ftv[0]["vid"] = 0
f_off = 1
ftv_off = 1
else:
f_off = 0
ftv_off = 0
for i in range(weights.n):
factor[f_off + i]["factorFunction"] = FACTORS["DP_GEN_LF_ACCURACY"]
factor[f_off + i]["weightId"] = f_off + i
factor[f_off + i]["featureValue"] = 1
factor[f_off + i]["arity"] = 2
factor[f_off + i]["ftv_offset"] = ftv_off + 2 * i
ftv[ftv_off + 2 * i]["vid"] = 0
ftv[ftv_off + 2 * i + 1]["vid"] = 1 + i
f_off += weights.n
ftv_off += 2 * weights.n
for i in range(weights.n):
if weights.lf_prior[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_PRIOR"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 1
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
f_off += 1
ftv_off += 1
for i in range(weights.n):
if weights.lf_propensity[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_PROPENSITY"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 1
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
f_off += 1
ftv_off += 1
for i in range(weights.n):
if weights.lf_class_propensity[i] != 0.0:
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_LF_CLASS_PROPENSITY"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 2
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 0
ftv[ftv_off + 1]["vid"] = 1 + i
f_off += 1
ftv_off += 2
for dep_name in GenerativeModel.dep_names:
for i in range(weights.n):
for j in range(weights.n):
if getattr(weights, dep_name)[i, j] != 0.0:
if dep_name == 'dep_similar' or dep_name == 'dep_exclusive':
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_DEP_SIMILAR"] if dep_name == 'dep_similar' else FACTORS["DP_GEN_DEP_EXCLUSIVE"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 2
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 1 + i
ftv[ftv_off + 1]["vid"] = 1 + j
f_off += 1
ftv_off += 2
elif dep_name == 'dep_fixing' or dep_name == 'dep_reinforcing':
factor[f_off]["factorFunction"] = FACTORS["DP_GEN_DEP_FIXING"] if dep_name == 'dep_fixing' else FACTORS["DP_GEN_DEP_REINFORCING"]
factor[f_off]["weightId"] = f_off
factor[f_off]["featureValue"] = 1
factor[f_off]["arity"] = 3
factor[f_off]["ftv_offset"] = ftv_off
ftv[ftv_off]["vid"] = 0
ftv[ftv_off + 1]["vid"] = 1 + i
ftv[ftv_off + 2]["vid"] = 1 + j
f_off += 1
ftv_off += 3
else:
raise ValueError()
# Domain mask
domain_mask = np.zeros(1 + weights.n, np.bool)
# Instantiates factor graph
ns = NumbSkull(n_inference_epoch=100, quiet=True)
ns.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges)
fg = ns.getFactorGraph()
y = np.ndarray((m,), np.int64)
L = sparse.lil_matrix((m, weights.n), dtype=np.int64)
for i in range(m):
fg.burnIn(10, False)
y[i] = 1 if fg.var_value[0, 0] == 0 else -1
for j in range(weights.n):
if fg.var_value[0, 1 + j] != 2:
L[i, j] = 1 if fg.var_value[0, 1 + j] == 0 else -1
return y, L.tocsr()
| 36.784173 | 153 | 0.544886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.108645 |
7e4e90eca537aa89aa63261527c4a76d2ff939ec | 93 | py | Python | onaws/__init__.py | bbhunter/onaws | ac5a5b7db3765258bb57cb6808f3ed04941434d2 | [
"MIT"
] | 1 | 2021-07-07T22:07:11.000Z | 2021-07-07T22:07:11.000Z | onaws/__init__.py | bbhunter/onaws | ac5a5b7db3765258bb57cb6808f3ed04941434d2 | [
"MIT"
] | null | null | null | onaws/__init__.py | bbhunter/onaws | ac5a5b7db3765258bb57cb6808f3ed04941434d2 | [
"MIT"
] | null | null | null | '''Simple library to check if a hostname belongs to AWS IP space.'''
__version__ = '0.0.12'
| 23.25 | 68 | 0.698925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.817204 |
7e501995f92826d6c8f5de74e9e473e96ac09f37 | 1,352 | py | Python | tests/interactive/nb-test.py | andrey-puzyr/docker-dotnet-spark | f520af3feb25f7721d8547ff1f73a074b3043944 | [
"Apache-2.0"
] | 18 | 2020-05-26T22:54:59.000Z | 2022-02-25T18:34:54.000Z | tests/interactive/nb-test.py | andrey-puzyr/docker-dotnet-spark | f520af3feb25f7721d8547ff1f73a074b3043944 | [
"Apache-2.0"
] | 5 | 2020-05-27T15:00:10.000Z | 2021-05-28T19:47:35.000Z | tests/interactive/nb-test.py | indy-3rdman/docker-dotnet-spark | ccc5d8ed57e27895d972ee179a6f0e83a57e48f3 | [
"Apache-2.0"
] | 6 | 2020-05-26T22:40:07.000Z | 2022-03-15T10:09:16.000Z | import sys
import argparse
import re
import subprocess
import nbformat
from pathlib import Path
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
parser = argparse.ArgumentParser()
parser.add_argument("path", help="the path to the notebook (.ipynb) file")
args = parser.parse_args()
notebook_filename_path = Path(args.path)
if not notebook_filename_path.is_file():
sys.stderr.write('file does not exist\n')
sys.exit(999)
notebook_filename = args.path
notebook_filename_out = re.sub('\.ipynb$', '-out.ipynb',notebook_filename)
print(f"in: {notebook_filename}, out: {notebook_filename_out}")
p = subprocess.Popen(["cd dotnet.spark/examples; pwd; start-spark-debug.sh"],stdin=subprocess.PIPE, shell=True)
with open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=300, kernel_name='.net-csharp')
try:
out = ep.preprocess(nb, {'metadata': {'path': 'dotnet.spark/examples'}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with open(notebook_filename_out, mode='w', encoding='utf-8') as f:
nbformat.write(nb, f)
p.communicate(input=b'\n') | 32.97561 | 111 | 0.739645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.255178 |
7e509dea3ed6c92f5ab9b5e7fe011afa000287ec | 10,261 | py | Python | scripts/ersst_everything.py | khandokershanto/climate_data_analysis_with_python | 214b96c42732704880b8714be1c6ab60348c1adf | [
"MIT"
] | null | null | null | scripts/ersst_everything.py | khandokershanto/climate_data_analysis_with_python | 214b96c42732704880b8714be1c6ab60348c1adf | [
"MIT"
] | null | null | null | scripts/ersst_everything.py | khandokershanto/climate_data_analysis_with_python | 214b96c42732704880b8714be1c6ab60348c1adf | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import geocat.viz.util as gvutil
path = r'H:\Python project 2021\climate_data_analysis_with_python\data\sst.mnmean.nc'
ds= xr.open_dataset(path)
# time slicing
sst = ds.sst.sel(time=slice('1920-01-01','2020-12-01'))
# anomaly with respect to 1971-2000 period
clm = ds.sst.sel(time=slice('1971-01-01','2000-12-01')).groupby('time.month').mean(dim='time')
anm = (sst.groupby('time.month') - clm)
time = anm.time
itime=np.arange(time.size)
def wgt_areaave(indat, latS, latN, lonW, lonE):
lat=indat.lat
lon=indat.lon
if ( ((lonW < 0) or (lonE < 0 )) and (lon.values.min() > -1) ):
anm=indat.assign_coords(lon=( (lon + 180) % 360 - 180) )
lon=( (lon + 180) % 360 - 180)
else:
anm=indat
iplat = lat.where( (lat >= latS ) & (lat <= latN), drop=True)
iplon = lon.where( (lon >= lonW ) & (lon <= lonE), drop=True)
# print(iplat)
# print(iplon)
wgt = np.cos(np.deg2rad(lat))
odat=anm.sel(lat=iplat,lon=iplon).weighted(wgt).mean(("lon", "lat"), skipna=True)
return(odat)
# bob sst
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_ranm = bob_anm.rolling(time=7, center=True).mean('time')
##
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in bob_anm]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, bob_anm, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, bob_ranm, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1),
yticks=np.linspace(-1.5, 1, 6),
yticklabels=np.linspace(-1.5, 1, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in BoB (ERSST)",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("bob_anomalies.png",dpi = 300)
########## BoB SST with respect to ENSO and IOD (ERSST)
#nino 3.4 and dipole mode index plot together
nino = wgt_areaave(anm,-5,5,-170,-120)
nino = nino.rolling(time=7, center=True).mean('time')
#IOD west: 50 ° E to 70 ° E and 10 ° S to 10 ° N.
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
dmi = dmi.rolling(time=7, center=True).mean('time')
### Figure Plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.set_title('BoB anomaly with repect to ENSO')
ax1.plot(time, bob_ranm, '-', linewidth=1)
ax1.plot(time, nino, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['BoB anomaly','Nino3.4 Index'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax2.set_title('BoB anomaly with respect to IOD')
ax2.plot(time, bob_ranm, '-', linewidth=1)
ax2.plot(time, dmi, '-', linewidth=1)
ax2.tick_params(length = 7,right=True,labelsize=12)
ax2.legend(['BoB anomaly','Dipole Mode Index'],fontsize=12,frameon=False)
ax2.set_ylabel('SSTA (°C)',fontsize=12)
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("nino-bob-dmi.png",dpi = 300)
####################### (Ploting Nino 3.4 Index)
nino = wgt_areaave(anm,-5,5,-170,-120)
rnino = nino.rolling(time=7, center=True).mean('time')
#nino standard
ninoSD=nino/nino.std(dim='time')
rninoSD=ninoSD.rolling(time=7, center=True).mean('time')
# -- -- -- -- -- -- -- -- - -- - -- --- -- - -- - -- - - -- - -
# -- figure plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in ninoSD]
# Plot bar chart
ax1.bar(itime, nino, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rnino, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
ax2.bar(itime, ninoSD, align='edge', edgecolor="none", color=colors, width=1.0)
ax2.plot(itime, rninoSD, color="black", linewidth=1.5)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
gvutil.set_axes_limits_and_ticks(ax2,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
gvutil.add_major_minor_ticks(ax2,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in Nino3.4 region",
ylabel='Anomalies',
maintitlefontsize=18,
labelfontsize=15)
gvutil.set_titles_and_labels(ax2,
maintitle="Nino3.4 Index",
ylabel='Standardized',
xlabel='Year',
maintitlefontsize=18,
labelfontsize=15)
plt.draw()
plt.tight_layout()
plt.savefig("nino3.4_ERSST.png",dpi=300)
############### (Ploting DMI Index)
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
rdmi = dmi.rolling(time=7, center=True).mean('time')
colors = ['C1' if (value > 0) else 'C0' for value in dmi]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, dmi, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rdmi, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1.90),
yticks=np.linspace(-1, 1.5, 6),
yticklabels=np.linspace(-1, 1.5, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle=" Dipole Mode Index",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("dmi_ersst.png",dpi = 300)
### (Global vs BoB time Series -ERSST v5)
# global vs bob sst anomaly
glob_anom = anm.mean(('lon','lat'),skipna = True)
glob_anom_ra = glob_anom.rolling(time=12, center=True).mean('time')
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_anm_ra = bob_anm.rolling(time=12, center=True).mean('time')
xr.corr(glob_anom_ra,bob_anm_ra)
# plot
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
ax1.set_title('Global SSTA & BOB SSTA with 1 year moving average (ERSST v5)')
ax1.plot(time, glob_anom_ra, '-', linewidth=1)
ax1.plot(time, bob_anm_ra, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['Globally averaged','BoB averaged'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax1.set_xlabel('Year',fontsize=12)
ax1.text(pd.to_datetime('1975-01-01'),-0.8,'Correlation Coefficient = 0.89',fontsize=12)
#ax1.axis(xmin=pd.Timestamp("1982-01"), xmax=pd.Timestamp("2020-12"))
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("bobvsgloobalanom_ersst.png",dpi = 300)
| 38.720755 | 101 | 0.577527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,616 | 0.254574 |
7e521ccb06bf6f928662759cda6d62177d24e646 | 9,335 | py | Python | scripts/Model Training.py | Nickroll/League-Feature-Importance | 884634aafc480b0a07b31a0fbfc56d7b6aabcdff | [
"MIT"
] | null | null | null | scripts/Model Training.py | Nickroll/League-Feature-Importance | 884634aafc480b0a07b31a0fbfc56d7b6aabcdff | [
"MIT"
] | null | null | null | scripts/Model Training.py | Nickroll/League-Feature-Importance | 884634aafc480b0a07b31a0fbfc56d7b6aabcdff | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import pickle
import math
# Setting our classifiers
rf_clf = RandomForestClassifier()
ada_clf = AdaBoostClassifier()
bag_clf = BaggingClassifier()
grad_clf = GradientBoostingClassifier()
dt_clf = DecisionTreeClassifier()
et_clf = ExtraTreesClassifier()
xgb_clf = XGBClassifier()
# This is a list of the params I am going to test using GridSearchCV
# These are not turely optmizied features, the features that were selected
# by gridserach are included as well and some are at the limits so the limits
# need to be expended a bit
rand_dict = {'n_estimators':[300, 400, 500, 750, 1000, 1500],
'max_features': ['auto', 'sqrt', 'log2', 0.2, 0.9],
'min_samples_split': [3, 5, 7, 10, 15, 20],
'criterion': ['gini', 'entropy'],
'oob_score': [True, False]}
dt_dict = {'min_samples_split': [3, 5, 7, 10, 15, 20],
'max_features': ['auto', 'sqrt', 'log2', 0.2, 0.9],
'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random']}
ada_dict = {'n_estimators':[20, 40, 50, 70, 100],
'learning_rate': [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1],
'algorithm': ['SAMME', 'SAMME.R']}
bag_dict = {'n_estimators': [5, 7, 10, 15, 20],
'max_samples': [0.2, 0.5, 0.9],
'max_features': [0.2, 0.5, 0.9, 1],
'oob_score': [True, False]}
grad_dict = {'learning_rate': [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1],
'n_estimators': [25, 50, 100, 300, 500, 1000],
'loss' : ['deviance', 'exponential'],
'criterion': ['mse', 'mae', 'friedman_mse'],
'max_features': ['auto', 'sqrt', 'log2', 0.2, 0.9]}
xgb_dict = {'learning_rate': [0.001, 0.025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.5],
'max_depth' : [6, 7, 8, 10, 15, 20],
'n_estimators': [750, 900, 1000, 1500, 2000],
'booster': ['gbtree', 'dart', 'gblinear']}
et_dict = {'n_estimators': [5, 7, 10, 20, 50],
'criterion': ['gini', 'entropy'],
'max_features': ['auto', 'sqrt', 'log2', 0.2, 0.9],
'min_samples_split': [3, 5, 7, 10, 15, 20]}
def train_clf(clf, features, labels, arg_dict, *name_label):
'''
This function trians the clf using the arg dict options passed to it.
: param clf - This is the classifier to be trained. Is a classifier obj.
: param features - The features to be used when fitting the grid search.
Is a list.
: param labels - The labels to be used when fitting. Is a list.
: param arg_dict - The dict of paramaters to test with gridsearch.
Is a dictionary.
: arg *name_label - An optional paramater that is used when printing
: RETURN - Returns the best trained clf and a list of scores from
cross val score testing
'''
# Doing the gridsearch
estimator = GridSearchCV(clf, param_grid=arg_dict,
scoring='roc_auc',
cv=5, n_jobs=-1)
estimator.fit(features, labels)
# Getting the best clf then returning the scores for it, since
# this is a classification task using roc_auc socring
best_clf = estimator.best_estimator_
score = cross_val_score(best_clf, features, labels,
cv=10, scoring='roc_auc', n_jobs=-1)
# Prints for making an outfile so that the best options are saved
print('DATA FOR {}'.format(name_label))
print('')
print('Best CLF: {}'.format(best_clf))
print('')
return best_clf, score
def find_ci(list_of_scores, list_of_models):
'''
Find the mean and bounds of the scores.
: param list_of_scores - A list of scores to be passed that will be used
in determination of the mean, std, and CI
: param list_of_models - The models list that will be used as the index
for the data frame.
: RETURNS - Data frame containing the mean, upper, lower, and CI of
the list of scores passed to it.
'''
# Initialization of some variables
mean_list = list()
ci_list = list()
# Loop to go through the list of scores provided to it
for score in list_of_scores:
mean_list.append(score.mean())
std = score.std()
std_error = std/math.sqrt(score.shape[0])
ci_list.append(2.262*std_error)
# Creation of the output df
out_df = pd.DataFrame(index=list_of_models,
columns=['Mean', 'Upper', 'Lower', 'ci'])
# Setting the values of the data frame columns
out_df['Mean'] = mean_list
out_df['ci'] = ci_list
out_df['Upper'] = out_df['Mean'] + out_df['ci']
out_df['Lower'] = out_df['Mean'] - out_df['ci']
return out_df
# Reading in the data to be trained on
per_min = pd.read_csv('../Data/per_min_teamdf.csv', index_col=0)
per_feats = per_min.drop('result', 1)
per_labels = per_min['result'].astype('int')
# Creating a train test split so that the prediction can be done on data that
# is not tested. The random state here is 859 and will be used in creating
# the same split when predicting
per_f_train, per_f_test, per_l_train, per_l_test = train_test_split(per_feats,
per_labels, test_size = 0.3, random_state = 859)
# Same for other data set
dropped = pd.read_csv('../Data/dropped_teamdf.csv', index_col=0)
drop_feats = dropped.drop('result', 1)
drop_labels = dropped['result'].astype('int')
drop_f_train, drop_f_test, drop_l_train, drop_l_test = train_test_split(drop_feats,
drop_labels, test_size = 0.3, random_state=859)
# Training the models using the train_clf function described earlier
# Could be done in a loop and not line by line.
per_ada, per_ada_score = train_clf(ada_clf, per_f_train, per_l_train,
ada_dict, 'PER')
per_bag, per_bag_score = train_clf(bag_clf, per_f_train, per_l_train,
bag_dict, 'PER')
per_grad, per_grad_score = train_clf(grad_clf, per_f_train, per_l_train,
grad_dict, 'PER')
per_dt, per_dt_score = train_clf(dt_clf, per_f_train, per_l_train,
dt_dict, 'PER')
per_et, per_et_score = train_clf(et_clf, per_f_train, per_l_train,
et_dict, 'PER')
per_rf, per_rf_score = train_clf(rf_clf, per_f_train, per_l_train,
rand_dict, 'PER')
per_xgb, per_xgb_score = train_clf(xgb_clf, per_f_train, per_l_train,
xgb_dict, 'PER')
# Training for other data set
drop_rf, drop_rf_score = train_clf(rf_clf, drop_f_train, drop_l_train,
rand_dict, 'DROP')
drop_ada, drop_ada_score = train_clf(ada_clf, drop_f_train, drop_l_train,
ada_dict, 'DROP')
drop_bag, drop_bag_score = train_clf(bag_clf, drop_f_train, drop_l_train,
bag_dict, 'DROP')
drop_grad, drop_grad_score = train_clf(grad_clf, drop_f_train, drop_l_train,
grad_dict, 'DROP')
drop_dt, drop_dt_score = train_clf(dt_clf, drop_f_train, drop_l_train,
dt_dict, 'DROP')
drop_et, drop_et_score = train_clf(et_clf, drop_f_train, drop_l_train,
et_dict, 'DROP')
drop_xgb, drop_xgb_score = train_clf(xgb_clf, drop_f_train, drop_l_train,
xgb_dict, 'DROP')
# Creating hte score list using the find_ci function from earlier
score_list = [per_rf_score, per_dt_score, per_ada_score, per_bag_score,
per_grad_score, per_xgb_score, per_et_score]
score_df = find_ci(score_list,
['rf', 'dt', 'ada','bag', 'grad', 'xgb', 'et'])
print('Score DF: {}'.format(score_df))
print('')
# For other data set
score_list = [drop_rf_score, drop_dt_score, drop_ada_score, drop_bag_score,
drop_grad_score, drop_xgb_score, drop_et_score]
score_df = find_ci(score_list,
['rf', 'dt', 'ada', 'bag', 'grad', 'xgb', 'et'])
print('Score DF: {}'.format(score_df))
print('')
# Creating the VotingClassifier using soft voting as the sub classifiers are
# well trained to the data due to gridsearchcv.
softVoteC_drop = VotingClassifier(estimators=[('rfc', drop_rf), ('dt', drop_dt),
('ada', drop_ada), ('bag', drop_bag),
('grad', drop_grad), ('xgb', drop_xgb),
('et', drop_et)], voting='soft', n_jobs=-1)
# Repeat for other Data set
softVoteC_per = VotingClassifier(estimators=[('rfc', per_rf), ('dt', per_dt),
('ada', per_ada), ('bag', per_bag),
('grad', per_grad), ('xgb', per_xgb),
('et', per_et)], voting='soft', n_jobs=-1)
# Fitting the VoteClassifiers
softVoteC_drop = softVoteC_drop.fit(drop_f_train, drop_l_train)
softVoteC_per = softVoteC_per.fit(per_f_train, per_l_train)
# Dumping the voteClassifiers to pickle files for saving and downloading
with open('Soft_voteC_drop.pkl', 'wb') as pf:
pickle.dump(softVoteC_drop, pf)
with open('Soft_voteC_per.pkl', 'wb') as pf:
pickle.dump(softVoteC_per, pf)
print('Done!!!!')
| 39.555085 | 83 | 0.653455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,573 | 0.382753 |
7e5256469f13e0289fc5b810bd0173539ecf3b59 | 9,650 | py | Python | src/SimulationControl/SpheralConservation.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 22 | 2018-07-31T21:38:22.000Z | 2020-06-29T08:58:33.000Z | src/SimulationControl/SpheralConservation.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 41 | 2020-09-28T23:14:27.000Z | 2022-03-28T17:01:33.000Z | src/SimulationControl/SpheralConservation.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 7 | 2019-12-01T07:00:06.000Z | 2020-09-15T21:12:39.000Z | # SpheralConservation
import mpi
from SpheralCompiledPackages import *
#-------------------------------------------------------------------------------
# Conservation
#-------------------------------------------------------------------------------
class SpheralConservation:
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self, dataBase,
packages = []):
self.restart = RestartableObject(self)
self.dataBase = dataBase
self.packages = packages
self.cycleHistory = []
self.timeHistory = []
self.massHistory = []
self.pmomHistory = []
self.amomHistory = []
self.KEHistory = []
self.TEHistory = []
self.EEHistory = []
self.EHistory = []
self.Vector = eval("Vector%id" % dataBase.nDim)
self.origin = self.Vector()
# Start the conservation history
self.updateHistory()
return
#---------------------------------------------------------------------------
# Add the current state to the history variables.
#---------------------------------------------------------------------------
def updateHistory(self, cycle=0, time=0.0):
self.cycleHistory.append(cycle)
self.timeHistory.append(time)
self.massHistory.append(self.findTotalMass())
self.pmomHistory.append(self.findTotalPmom())
self.amomHistory.append(self.findTotalAmom())
self.KEHistory.append(self.findTotalKE())
self.TEHistory.append(self.findTotalTE())
self.EEHistory.append(self.findTotalPackageEnergy())
self.EHistory.append(self.KEHistory[-1] +
self.TEHistory[-1] +
self.EEHistory[-1])
return
#---------------------------------------------------------------------------
# Determine the current total mass.
#---------------------------------------------------------------------------
def findTotalMass(self):
total = 0.0
massFL = self.dataBase.globalMass
for mass in massFL:
massValues = mass.internalValues()
total += sum(list(massValues) + [0.0])
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total linear momentum.
#---------------------------------------------------------------------------
def findTotalPmom(self):
total = self.Vector()
massFL = self.dataBase.globalMass
velocityFL = self.dataBase.globalVelocity
for (mass, velocity) in zip(massFL, velocityFL):
massValues = mass.internalValues()
velocityValues = velocity.internalValues()
for mi, vi in zip(massValues, velocityValues):
total += mi*vi
# Tally momentum from packages.
for package in self.packages:
packageValue = package.extraMomentum()
total += packageValue
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total angular momentum, with reference to the
# stored origin.
#---------------------------------------------------------------------------
def findTotalAmom(self):
total = Vector3d()
massFL = self.dataBase.globalMass
positionFL = self.dataBase.globalPosition
velocityFL = self.dataBase.globalVelocity
for (mass, position, velocity) in zip(massFL, positionFL, velocityFL):
massValues = mass.internalValues()
positionValues = position.internalValues()
velocityValues = velocity.internalValues()
for (mi, ri, vi) in zip(massValues, positionValues, velocityValues):
# Find the displacement of this node from the origin.
dr = ri - self.origin
# Now add this node angular momentum.
if self.dataBase.nDim == 2:
total.z += mi*(dr.x*vi.y - dr.y*vi.x)
elif self.dataBase.nDim == 3:
total += mi * dr.cross(vi)
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total kinetic energy.
#---------------------------------------------------------------------------
def findTotalKE(self):
total = 0.0
massFL = self.dataBase.globalMass
velocityFL = self.dataBase.globalVelocity
for (mass, velocity) in zip(massFL, velocityFL):
massValues = mass.internalValues()
velocityValues = velocity.internalValues()
total += sum([mi*vi.magnitude2() for (mi, vi) in zip(massValues, velocityValues)] + [0.0])
return 0.5*mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total thermal energy.
#---------------------------------------------------------------------------
def findTotalTE(self):
total = 0.0
massFL = self.dataBase.fluidMass
epsFL = self.dataBase.fluidSpecificThermalEnergy
for (mass, eps) in zip(massFL, epsFL):
massValues = mass.internalValues()
epsValues = eps.internalValues()
total += sum([mi*epsi for (mi, epsi) in zip(list(mass.internalValues()),
list(eps.internalValues()))] + [0.0])
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total package (or "external") energy.
#---------------------------------------------------------------------------
def findTotalPackageEnergy(self):
total = 0.0
for package in self.packages:
total += package.extraEnergy()
return total # Note we assume this has already been parallel summed.
#---------------------------------------------------------------------------
# Write the history to the given file.
#---------------------------------------------------------------------------
def writeHistory(self, filename):
f = open(filename, 'w')
labels = ['"cycle"', '"time"',
'"Mass"',
'"Lin Mom Mag"', '"Lin Mom X"', '"Lin Mom Y"', '"Lin Mom Z"',
'"Ang Mom Mag"', '"Ang Mom X"', '"Ang Mom Y"', '"Ang Mom Z"',
'"Total E"', '"Kin E"', '"Therm E"', '"Pkg E"']
f.write('#')
for lab in labels:
f.write('%14s ' % lab)
f.write('\n')
for i in xrange(len(self.cycleHistory)):
for var in [self.cycleHistory[i], self.timeHistory[i],
self.massHistory[i],
self.pmomHistory[i].magnitude(),
self.pmomHistory[i].x,
self.pmomHistory[i].y,
self.pmomHistory[i].z,
self.amomHistory[i].magnitude(),
self.amomHistory[i].x,
self.amomHistory[i].y,
self.amomHistory[i].z,
self.EHistory[i],
self.KEHistory[i],
self.TEHistory[i],
self.EEHistory[i]]:
f.write('%14.8g ' % var)
f.write('\n')
f.close()
return
#---------------------------------------------------------------------------
# label
#---------------------------------------------------------------------------
def label(self):
return "SpheralConservation"
#---------------------------------------------------------------------------
# dumpState
#---------------------------------------------------------------------------
def dumpState(self, file, path):
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.massHistory, path + "/massHistory")
file.writeObject(self.pmomHistory, path + "/pmomHistory")
file.writeObject(self.amomHistory, path + "/amomHistory")
file.writeObject(self.KEHistory, path + "/KEHistory")
file.writeObject(self.TEHistory, path + "/TEHistory")
file.writeObject(self.EEHistory, path + "/EEHistory")
file.writeObject(self.EHistory, path + "/EHistory")
file.writeObject(self.origin, path + "/origin")
#---------------------------------------------------------------------------
# restoreState
#---------------------------------------------------------------------------
def restoreState(self, file, path):
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.massHistory = file.readObject(path + "/massHistory")
self.pmomHistory = file.readObject(path + "/pmomHistory")
self.amomHistory = file.readObject(path + "/amomHistory")
self.KEHistory = file.readObject(path + "/KEHistory")
self.TEHistory = file.readObject(path + "/TEHistory")
self.EEHistory = file.readObject(path + "/EEHistory")
self.EHistory = file.readObject(path + "/EHistory")
self.origin = file.readObject(path + "/origin")
| 44.266055 | 102 | 0.450466 | 9,400 | 0.974093 | 0 | 0 | 0 | 0 | 0 | 0 | 3,165 | 0.327979 |