blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d13198a10bafeba6b94dad3cf02953c983de332 | 67325192c1e528a39d457f11e61b480d68826708 | /mods/mcpython/Item/gold_block.py | 248d5e0998a17f7d438e81b093ded15dc48a62bd | [
"MIT"
] | permissive | vashistaarav1611/mcpython-a-minecraft-clone-in-python | 5851b377b54fd2b28c106112c7b18f397b71ab50 | c16cd66f319efdeec4130e1a43f5a857caf1ea13 | refs/heads/master | 2023-02-01T22:48:51.787106 | 2020-12-21T15:02:25 | 2020-12-21T15:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from .Item import *
class GoldBlock(Item):
def getName(self):
return "minecraft:gold_block"
def getTexturFile(self):
return "./assets/textures/items/gold_block.png"
handler.register(GoldBlock)
| [
"baulukas1301@googlemail.com"
] | baulukas1301@googlemail.com |
844b1e4a210ae535d99b8c29e268f0cc6c5624c4 | 145e38280b64a03c68f8c6bd293156cd06f8d149 | /time.py | 1bb03bba1dc7085ef3466e43a5cc394f537aa559 | [] | no_license | Friendktt/My-work | 21cf51509343d04fbed1d211356164a25a504c15 | a9a7143e9affec78516806086259a00f2affa9d5 | refs/heads/master | 2020-03-28T16:04:50.344455 | 2018-10-30T17:52:04 | 2018-10-30T17:52:04 | 148,657,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """time"""
def main():
"""time"""
text = input()
num = int(input())
mounth = 'JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC'
num1 = mounth.find(text)
num2 = (num1 + num*4) % 48
print(mounth[num2:num2 + 3])
main()
| [
"noreply@github.com"
] | noreply@github.com |
fc3ed4e2ab098a3f20123eaa7cb3de9aef831a42 | afa6ba97f05c4c05d935fc87fec6c4457f761871 | /PythonStuff/complex/complex.py | cda2aa884cfda04fd777c88810415d694323e300 | [] | no_license | Rubalicious/PredictionProject | a23a6b6f37000de256def4ad7ff3da630d144a5c | 83b4f2037c5cb243eccb8f1f859858c6413747d4 | refs/heads/master | 2021-06-29T21:39:39.875815 | 2017-02-27T05:21:06 | 2017-02-27T05:21:06 | 35,399,016 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | from ast import literal_eval
import numpy as np
import matplotlib.pyplot as plt
import itertools, copy
def sizeof(z): return np.sqrt(z.real**2 + z.imag**2)
def map_(z, c=0): return (z)**2 + c
def to_complex(u): return np.complex(u[0], u[1])
def in_M(z):
c = copy.copy(z)
count = 0
while sizeof(z) <=2:
if count > 200: return True
z = map_(z, c)
count+=1
return False
def main():
grid_size = 100
# make a 400x400 grid that sits
# within the window [-2-2i, 2+2i]
axis = [4*float(i)/grid_size - 2 for i in xrange(grid_size)]
grid = [e for e in itertools.product(axis, axis)]
# we start with z_0 = 0 and C to be a point on the grid
# then z_1 = z_0^2 + C = C
# convert each point in the grid into a complex number
count, total = 0, len(grid)
for u in grid:
c = to_complex(u)
if count %500 == 0: print 100*float(count)/total
count+=1
if in_M(c):
plt.plot(u[0], u[1],'*k')
plt.axis([-3,2,-2,2])
plt.show()
def plot_data():
try:
f = open('data')
except Exception, e:
raise e
data = f.read()
if not f.closed:
f.close()
lines = data.split()
l = [literal_eval(e) for e in lines]
for u in l:
plt.plot(u[0],u[1],'*k')
plt.axis([-3,2,-2,2])
plt.show()
if __name__ == '__main__':
# main()
plot_data() | [
"rabrams12@email.arizona.edu"
] | rabrams12@email.arizona.edu |
0169933bc2e1d5109a0bc4234754bd5af1414393 | ac822274f4fb85d6429fcb668842b9a892f0cf7a | /dynamic-programming/fibonacci/computing_fibonacci3.py | e62ee9126cc8c3f76bf7c671dfd7eda4b61efbab | [] | no_license | Vonewman/algorithm_toolbox | 01738a4c56aebc1ef5e321477af1e74b55521aa0 | 1ce53c2933c6069770fbe36c5fa41d1a2b030138 | refs/heads/master | 2021-06-14T18:17:25.119585 | 2021-05-26T20:48:36 | 2021-05-26T20:48:36 | 195,592,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | """Computation of the fibonacci Number using and iterative algorithm
Time Complexity: O(N)
@AUTHOR: Abdoulaye Diallo (A.D.)"""
def fib(n):
''' (int) -> int
Return the F[n] fibonacci number which is an
>>> fib(5)
5
>>> fib(10)
55
'''
T = [None] * (n + 1)
T[0], T[1] = 0, 1
for i in range(2, n+1):
T[i] = T[i - 1] + T[i - 2]
return T[n]
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"vonewman7@gmail.com"
] | vonewman7@gmail.com |
1010ec65d867986aaaf4d6b1a73ea4d33920f131 | cc5f61369ce554106252c38d502663ba6306f65a | /albow/demo/screens/LaunchDemosScreen.py | 57b7d4f397ba9f086b51eb392a8f7e1a5ef017d9 | [
"MIT"
] | permissive | hasii2011/albow-python-3 | 884e6ef704b655e6e5a7338b25e3a2c20fda60c4 | 04b9d42705b370b62f0e49d10274eebf3ac54bc1 | refs/heads/master | 2022-02-04T22:43:52.431765 | 2022-01-10T15:41:39 | 2022-01-10T15:41:39 | 182,160,833 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py |
from typing import cast
import logging
from albow.core.ResourceUtility import ResourceUtility
from albow.core.ui.Screen import Screen
from albow.core.ui.Shell import Shell
from albow.widgets.Button import Button
from albow.widgets.Label import Label
from albow.layout.Column import Column
from albow.layout.Grid import Grid
from albow.themes.Theme import Theme
DEMO_TITLE_TEXT_SIZE = 24
DEMO_BUTTON_TEXT_SIZE = 12
class LaunchDemosScreen(Screen):
"""
Buttons
"""
def __init__(self, shell: Shell):
"""
:param shell:
"""
self.logger = logging.getLogger(__name__)
#
# Python 3 update
#
# Screen.__init__(self, shell)
super().__init__(shell)
from albow.demo.DemoShell import DemoShell
self.shell = cast(DemoShell, shell)
f1 = ResourceUtility.get_font(DEMO_TITLE_TEXT_SIZE, Theme.BUILT_IN_FONT)
title = Label("Albow Demonstration", font=f1)
# emptyButton = Button("Empty", enabled=False)
menuArray = [
[
self.screen_button("Text Screen", self.shell.text_screen),
self.screen_button("Text Fields", self.shell.fields_screen),
self.screen_button("Controls", self.shell.controls_screen),
],
[
self.screen_button("Animation", self.shell.anim_screen),
self.screen_button("Grid View", self.shell.grid_screen),
self.screen_button("Palette View", self.shell.palette_screen),
],
[
self.screen_button("Image Array", self.shell.image_array_screen),
self.screen_button("Modal Dialogs", self.shell.dialog_screen),
self.screen_button("Tab Panel", self.shell.tab_panel_screen),
],
[
self.screen_button("Table View", self.shell.table_screen),
self.screen_button("MultiChoice", self.shell.multiChoiceScreen),
self.screen_button("MenuBar", self.shell.menuBarScreen)
],
[
self.screen_button("Music", self.shell.musicScreen),
self.screen_button("ListBox", self.shell.listBoxScreen),
self.screen_button("User Events", self.shell.userEventsScreen)
]
]
menuGrid = Grid(rows=menuArray, column_spacing=5, row_spacing=2, margin=5)
quitButton = Button("Quit", shell.quit)
self.equallySizeButtons(menuArray)
contents = Column([
title,
menuGrid,
quitButton
], align='c', spacing=10)
self.add_centered(contents)
def screen_button(self, text: str, screen: Screen):
# buttFont = ResourceUtility.get_font(DEMO_BUTTON_TEXT_SIZE, Theme.BUILT_IN_FONT)
# buttAttrs = {
# 'font': buttFont
# }
retButton = Button(text, action=lambda: self.shell.show_screen(screen))
return retButton
def equallySizeButtons(self, menuArray):
largestWidth: int = 0
for buttRow in menuArray:
for butt in buttRow:
self.logger.debug("Button text: %s, width: %s", butt.text, butt.width)
currWidth = butt.width
if currWidth > largestWidth:
largestWidth = currWidth
self.logger.debug("largestWidth: %s", largestWidth)
for buttRow in menuArray:
for butt in buttRow:
butt.width = largestWidth
return menuArray
def __repr__(self):
return self.__class__.__name__
| [
"Humberto.A.Sanchez.II@gmail.com"
] | Humberto.A.Sanchez.II@gmail.com |
34701cf143a4d45631f9f624420e2a494c855fec | 666dc64af3f5579dc213ac0ad9c2c36cb0a67c30 | /src/03_practitioner_bundle/10_dogs_vs_cats/crop_accuracy.py | 1cc306655316afad7fcc1c0af81f44163b8c3da3 | [] | no_license | zzingobomi/Opencv | 847e086cadb7429b1898f1a83e939d5aaeba9b5d | 8dc7692f680e84b5bfa4c41ee06251ec3190bf26 | refs/heads/master | 2023-03-03T00:48:08.923019 | 2021-02-08T06:17:14 | 2021-02-08T06:17:14 | 328,566,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,920 | py | # import the necessary packages
from config import dogs_vs_cats_config as config
from pyimagesearch.preprocessing import ImageToArrayPreprocessor
from pyimagesearch.preprocessing import SimplePreprocessor
from pyimagesearch.preprocessing import MeanPreprocessor
from pyimagesearch.preprocessing import CropPreprocessor
from pyimagesearch.io import HDF5DatasetGenerator
from pyimagesearch.utils.ranked import rank5_accuracy
from tensorflow.keras.models import load_model
import numpy as np
import progressbar
import json
# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())
# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()
# load the pretrained network
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)
# initialize the testing dataset generator, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
testGen = HDF5DatasetGenerator(config.TEST_HDF5, 64,
preprocessors=[sp, mp, iap], classes=2)
predictions = model.predict_generator(testGen.generator(),
steps=testGen.numImages // 64, max_queue_size=10)
# compute the rank-1 and rank-5 accuracies
(rank1, _) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
testGen.close()
# re-initialize the testing set generator, this time excluding the
# `SimplePreprocessor`
testGen = HDF5DatasetGenerator(config.TEST_HDF5, 64,
preprocessors=[mp], classes=2)
predictions = []
# initialize the progress bar
widgets = ["Evaluating: ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=testGen.numImages // 64,
widgets=widgets).start()
# loop over a single pass of the test data
for (i, (images, labels)) in enumerate(testGen.generator(passes=1)):
# loop over each of the individual images
for image in images:
# apply the crop preprocessor to the image to generate 10
# separate crops, then convert them from images to arrays
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(c) for c in crops],
dtype="float32")
# make predictions on the crops and then average them
# together to obtain the final prediction
pred = model.predict(crops)
predictions.append(pred.mean(axis=0))
# update the progress bar
pbar.update(i)
# compute the rank-1 accuracy
pbar.finish()
print("[INFO] predicting on test data (with crops)...")
(rank1, _) = rank5_accuracy(predictions, testGen.db["labels"])
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
testGen.close()
| [
"zzingo5@naver.com"
] | zzingo5@naver.com |
0dc35183393d83eb31bf25b1f1f39d1850886c4d | 17ef1c7483843540ce4d063708afa65430b9301f | /tests/test_allocate.py | b4281fbb11694e1dbc38fd7af714e2195439f9b5 | [
"MIT"
] | permissive | CivicKnowledge/synpums | e01f8815c5fe118ec748c248b84c862a1db15a3f | dd3793388862aa7b43eee2fc2aa96fcf21014267 | refs/heads/main | 2023-01-03T09:04:37.021235 | 2020-10-31T00:17:15 | 2020-10-31T00:17:15 | 304,128,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | import unittest
import warnings
import pandas as pd
import rowgenerators as rg
from synpums import *
from synpums.util import *
warnings.filterwarnings("ignore")
state = 'RI'
year = 2018
release = 5
cache_dir = '/tmp/synpums'
class TestAllocate(unittest.TestCase):
def test_basic(self):
tasks = AllocationTask.get_tasks(cache_dir, 'RI', ignore_completed=False)
task = tasks[24]
task.init()
print(task.m90_rms_error)
task.initialize_weights_sample()
print(f"te={task.total_error}, rms={task.m90_rms_error}")
args = dict(N=2000, min_iter=1000, step_size_max=15, step_size_min=1, reversal_rate=.4, max_ssm=150)
rows = task.vector_walk(**args)
print(f"te={task.total_error}, rms={task.m90_rms_error}")
if __name__ == '__main__':
unittest.main()
| [
"eric@civicknowledge.com"
] | eric@civicknowledge.com |
f7157930dfe0bda3f2bc5b33961ea2c1aad3ff82 | e9199038f50599735636df8c41606e916eda6c9d | /celery_config.py | 0f71011b9f6b87162a97774fafdbb2a76dfe8174 | [] | no_license | smatton/DashCeleryProject | 00f71fc8bc90762db438107fbcc31403bcd67d18 | ae6ad393f8d0ee511c20bc231d24d5ea21fbb900 | refs/heads/master | 2022-12-12T02:04:28.245822 | 2020-04-05T15:38:11 | 2020-04-05T15:38:11 | 172,076,345 | 0 | 0 | null | 2022-12-08T01:21:20 | 2019-02-22T14:04:56 | Python | UTF-8 | Python | false | false | 196 | py | from celery import Celery
import appConfig
BACKEND = appConfig.app_backend
BROKER = appConfig.app_broker
celery_app = Celery('celery_config',include=['mytasks'], backend=BACKEND, broker=BROKER)
| [
"slmatton@comcast.net"
] | slmatton@comcast.net |
087bc7514170d26a886ceb157ad850b49b661a4b | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/__init__.py | 50aadd5a1163361e23e5c78af06fbd8a3a0a4ce6 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 30,938 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/connection-points/connection-point/endpoints/endpoint/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the
endpoint
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__endpoint_id','__precedence','__type','__active',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
self.__endpoint_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'connection-points', u'connection-point', u'endpoints', u'endpoint', u'state']
def _get_endpoint_id(self):
"""
Getter method for endpoint_id, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/endpoint_id (string)
YANG Description: An identifier for the endpoint
"""
return self.__endpoint_id
def _set_endpoint_id(self, v, load=False):
"""
Setter method for endpoint_id, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/endpoint_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_endpoint_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_endpoint_id() directly.
YANG Description: An identifier for the endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """endpoint_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)""",
})
self.__endpoint_id = t
if hasattr(self, '_set'):
self._set()
def _unset_endpoint_id(self):
self.__endpoint_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
def _get_precedence(self):
"""
Getter method for precedence, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/precedence (uint16)
YANG Description: The precedence of the endpoint - the lowest precendence
viable endpoint will be utilised as the active endpoint
within a connection
"""
return self.__precedence
def _set_precedence(self, v, load=False):
"""
Setter method for precedence, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/precedence (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_precedence is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_precedence() directly.
YANG Description: The precedence of the endpoint - the lowest precendence
viable endpoint will be utilised as the active endpoint
within a connection
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """precedence must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
})
self.__precedence = t
if hasattr(self, '_set'):
self._set()
def _unset_precedence(self):
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/type (identityref)
YANG Description: The type of endpoint that is referred to by the current
endpoint
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of endpoint that is referred to by the current
endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with identityref""",
'defined-type': "openconfig-network-instance:identityref",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
def _get_active(self):
"""
Getter method for active, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/active (boolean)
YANG Description: When the backup endpoint is active, the value of this
parameter is set to true
"""
return self.__active
def _set_active(self, v, load=False):
"""
Setter method for active, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/active (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_active is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active() directly.
YANG Description: When the backup endpoint is active, the value of this
parameter is set to true
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__active = t
if hasattr(self, '_set'):
self._set()
def _unset_active(self):
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
endpoint_id = __builtin__.property(_get_endpoint_id)
precedence = __builtin__.property(_get_precedence)
type = __builtin__.property(_get_type)
active = __builtin__.property(_get_active)
_pyangbind_elements = {'endpoint_id': endpoint_id, 'precedence': precedence, 'type': type, 'active': active, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/connection-points/connection-point/endpoints/endpoint/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the
endpoint
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__endpoint_id','__precedence','__type','__active',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
self.__endpoint_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'connection-points', u'connection-point', u'endpoints', u'endpoint', u'state']
def _get_endpoint_id(self):
"""
Getter method for endpoint_id, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/endpoint_id (string)
YANG Description: An identifier for the endpoint
"""
return self.__endpoint_id
def _set_endpoint_id(self, v, load=False):
"""
Setter method for endpoint_id, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/endpoint_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_endpoint_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_endpoint_id() directly.
YANG Description: An identifier for the endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """endpoint_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)""",
})
self.__endpoint_id = t
if hasattr(self, '_set'):
self._set()
def _unset_endpoint_id(self):
self.__endpoint_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="endpoint-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='string', is_config=False)
def _get_precedence(self):
"""
Getter method for precedence, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/precedence (uint16)
YANG Description: The precedence of the endpoint - the lowest precendence
viable endpoint will be utilised as the active endpoint
within a connection
"""
return self.__precedence
def _set_precedence(self, v, load=False):
"""
Setter method for precedence, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/precedence (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_precedence is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_precedence() directly.
YANG Description: The precedence of the endpoint - the lowest precendence
viable endpoint will be utilised as the active endpoint
within a connection
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """precedence must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
})
self.__precedence = t
if hasattr(self, '_set'):
self._set()
def _unset_precedence(self):
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/type (identityref)
YANG Description: The type of endpoint that is referred to by the current
endpoint
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of endpoint that is referred to by the current
endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with identityref""",
'defined-type': "openconfig-network-instance:identityref",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'LOCAL': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}, u'oc-ni-types:REMOTE': {'@namespace': u'http://openconfig.net/yang/network-instance-types', '@module': u'openconfig-network-instance-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)
def _get_active(self):
"""
Getter method for active, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/active (boolean)
YANG Description: When the backup endpoint is active, the value of this
parameter is set to true
"""
return self.__active
def _set_active(self, v, load=False):
"""
Setter method for active, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/state/active (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_active is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active() directly.
YANG Description: When the backup endpoint is active, the value of this
parameter is set to true
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__active = t
if hasattr(self, '_set'):
self._set()
def _unset_active(self):
self.__active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
endpoint_id = __builtin__.property(_get_endpoint_id)
precedence = __builtin__.property(_get_precedence)
type = __builtin__.property(_get_type)
active = __builtin__.property(_get_active)
_pyangbind_elements = {'endpoint_id': endpoint_id, 'precedence': precedence, 'type': type, 'active': active, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
c2a50a2894a8886745a3b0cf6176b87cdd9ff324 | bd14c979335112b7718b0feda18ebf0e3b40fe5c | /contest_093/b_small_and_large_integers_2nd.py | 5090fc480a7ed5adb7ee90d373f591aadebb6a25 | [] | no_license | ababa831/atcoder_beginners | 22c57b15333d110126d1b1afadc0ff5e8784fc4f | 1a30882ce7f20f312045d5dc7bfaa5688cc8a88e | refs/heads/master | 2023-03-07T15:47:19.750682 | 2020-03-04T19:53:45 | 2020-03-04T19:53:45 | 143,360,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # Accepted
a, b, k = map(int, input().split())
lower_list = [i for i in range(a, a + k)]
upper_list = [i for i in range(b, b - k, -1)]
out_list = sorted(set(lower_list + upper_list))
for out in out_list:
if a <= out <= b:
print(out) | [
"flvonlineconverter@gmail.com"
] | flvonlineconverter@gmail.com |
5a5e0ce76558c3b94ad2149478844745d1f5087a | 67f19ebb1fb3189e4c2f99484c1dc13af5099edb | /wii_packages/enso/gage_don_h/gage_don_h.py | 08da11557b1626666c779f60cf484d446bd3aa80 | [] | no_license | delguoqing/PyLMPlayer | 609c4fe35e56e4ce3ce30eeb2e9244aad5ea1609 | db8a1edf70ac1c11deffddc458788b3a2c2078df | refs/heads/master | 2021-01-22T05:06:00.491732 | 2013-09-13T04:54:23 | 2013-09-13T04:54:23 | 8,878,510 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | def func0(this, _global):
this.stop()
def func1(this, _global):
this.gotoAndPlay("fever")
def func2(this, _global):
if 2 <= this.fever_gage._play_head <= 23:
this.fever_gage.gotoAndPlay("toNormal")
this.stop()
def func3(this, _global):
this.fever_gage.gotoAndPlay("toFever")
this.stop()
DATA = (
func0,
func1,
func2,
func3,
)
| [
"delguoqing@hotmail.com"
] | delguoqing@hotmail.com |
dbc0f0130cf61ccefa2cb7304519c144f1dc48bf | a3c34ad9425cf9c16a09423278b81c20edd8d77a | /sms_frame/models/sms_compose.py | 3cbd3cbd6f0bcc7f98da112e58038923a071a292 | [] | no_license | meswapnilwagh/Odoo9 | d1dca7de18ac555abe2da96fb78f0d3bd3835650 | 91f1e545ab597ca89283b8dc5dbf3d7f5bd5df5b | refs/heads/9.0 | 2020-04-08T00:24:25.179940 | 2016-02-22T08:49:56 | 2016-02-22T08:49:56 | 52,294,854 | 0 | 1 | null | 2016-02-22T18:20:41 | 2016-02-22T18:20:40 | null | UTF-8 | Python | false | false | 3,144 | py | # -*- coding: utf-8 -*
from datetime import datetime
from openerp import api, fields, models
class SmsCompose(models.Model):
_name = "sms.compose"
error_message = fields.Char(readonly=True)
record_id = fields.Integer()
model = fields.Char()
sms_template_id = fields.Many2one('sms.template', string="Template")
from_mobile_id = fields.Many2one('sms.number', required=True, string="From Mobile")
to_number = fields.Char(required=True, string='To Mobile Number', readonly=True)
sms_content = fields.Text(string='SMS Content')
@api.onchange('sms_template_id')
def _onchange_sms_template_id(self):
"""Prefills from mobile, sms_account and sms_content but allow them to manually change the content after"""
if self.sms_template_id.id != False:
sms_rendered_content = self.env['sms.template'].render_template(self.sms_template_id.template_body, self.sms_template_id.model_id.model, self.record_id)
self.from_mobile_id = self.sms_template_id.from_mobile_verified_id.id
self.sms_content = sms_rendered_content
@api.multi
def send_entity(self):
"""Attempt to send the sms, if any error comes back show it to the user and only log the smses that successfully sent"""
self.ensure_one()
gateway_model = self.from_mobile_id.account_id.account_gateway_id.gateway_model_name
my_sms = self.from_mobile_id.account_id.send_message(self.from_mobile_id.mobile_number, self.to_number, self.sms_content.encode('utf-8'), self.model, self.record_id)
#use the human readable error message if present
error_message = ""
if my_sms.human_read_error != "":
error_message = my_sms.human_read_error
else:
error_message = my_sms.response_string
#display the screen with an error code if the sms/mms was not successfully sent
if my_sms.delivary_state == "failed":
return {
'type':'ir.actions.act_window',
'res_model':'sms.compose',
'view_type':'form',
'view_mode':'form',
'target':'new',
'context':{'default_to_number':self.to_number,'default_record_id':self.record_id,'default_model':self.model, 'default_error_message':error_message}
}
else:
my_model = self.env['ir.model'].search([('model','=',self.model)])
#for single smses we only record succesful sms, failed ones reopen the form with the error message
sms_message = self.env['sms.message'].create({'record_id': self.record_id,'model_id':my_model[0].id,'account_id':self.from_mobile_id.account_id.id,'from_mobile':self.from_mobile_id.mobile_number,'to_mobile':self.to_number,'sms_content':self.sms_content,'status_string':my_sms.response_string, 'direction':'O','message_date':datetime.utcnow(), 'status_code':my_sms.delivary_state, 'sms_gateway_message_id':my_sms.message_id})
try:
self.env[self.model].search([('id','=', self.record_id)]).message_post(body=self.sms_content, subject="SMS Sent")
except:
#Message post only works if CRM module is installed
pass | [
"steven@sythiltech.com"
] | steven@sythiltech.com |
8414d656d3e0a08a8377c7717cdb4cabfe93d099 | 4598524620d159bd0d6e2fc2f390299b55ec9bc0 | /Kattis/iwannabe.py | 669ebcb61eb86585403f1d6d7ba54c0049206772 | [] | no_license | skyu0221/Kattis-UVa | a586c15851063828f8b78c66f93b833af1c27fcd | bd3a0835d4bb9a6ab50fca6e0db3c8df7cc4ed1b | refs/heads/master | 2021-08-22T17:39:57.172371 | 2021-01-13T00:15:58 | 2021-01-13T00:15:58 | 69,905,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from heapq import nlargest
n, k = input().split()
k = int(k)
data = list()
for i in range(int(n)):
data.append(tuple(map(int, input().split())))
sets = set()
for i in range(3):
sets.update(nlargest(k, data, key=lambda x: x[i]))
print(len(sets)) | [
"skyu0221@gmail.com"
] | skyu0221@gmail.com |
6fb0242d3b682477d14e7b4fe0caebb5edcc2d43 | fd936777d5be7f97accdf8f12ffc5407a7614d84 | /software/openUI/OpenFrameState.py | 8e41b0fe1be1bffa0c54d28914161a436941439b | [] | no_license | GiuX/giux-openwsn-sw | 0f38f3d33b53e39811cecbf0177cfdeb16c948ea | f1e6a29747502bc34fb50908b2838cad5145344d | refs/heads/master | 2021-01-21T00:52:24.105857 | 2013-03-27T00:49:08 | 2013-03-27T00:49:08 | 10,557,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,747 | py | import json
import OpenFrame
import OpenTable
import OpenGuiLib
class OpenFrameState(OpenFrame.OpenFrame):
def __init__(self,guiParent,width=None,height=None,frameName="frame",row=0,column=0,columnspan=1):
# store params
self.guiParent = guiParent
self.frameName = frameName
self.row = row
self.column = column
# initialize the parent class
OpenFrame.OpenFrame.__init__(self,guiParent,
width=width,
height=height,
frameName=frameName,
row=row,
column=column,
columnspan=columnspan,)
# local variables
self.updatePeriod = None
temp = OpenGuiLib.HeaderLabel(self.container,text="data")
#temp.grid(row=0,column=0)
self.data = OpenTable.OpenTable(self.container)
self.data.grid(row=1,column=0)
temp = OpenGuiLib.HeaderLabel(self.container,text="meta")
#temp.grid(row=2,column=0)
self.meta = OpenTable.OpenTable(self.container)
#self.meta.grid(row=3,column=0)
#======================== public ==========================================
def startAutoUpdate(self,updatePeriod,updateFunc,updateParams):
self.updatePeriod = updatePeriod
self.updateFunc = updateFunc
self.updateParams = updateParams
self.after(self.updatePeriod,self._cb_autoUpdate)
def stopAutoUpdate(self):
self.updatePeriod = None
def update(self,dataAndMeta):
assert(isinstance(dataAndMeta,dict))
assert('meta' in dataAndMeta)
assert(isinstance(dataAndMeta['meta'],list))
assert('data' in dataAndMeta)
assert(isinstance(dataAndMeta['data'],list))
if len(dataAndMeta['meta'])>0 and ('columnOrder' in dataAndMeta['meta'][0]):
self.data.update(dataAndMeta['data'],columnOrder=dataAndMeta['meta'][0]['columnOrder'].split('.'))
else:
self.data.update(dataAndMeta['data'])
self.meta.update(dataAndMeta['meta'])
#======================== private =========================================
def _cb_autoUpdate(self):
self.update(json.loads(self.updateFunc(*self.updateParams).toJson()))
if self.updatePeriod:
self.after(self.updatePeriod,self._cb_autoUpdate)
###############################################################################
if __name__=='__main__':
import OpenWindow
examplewindow = OpenWindow.OpenWindow("OpenFrameState")
exampleframestate = OpenFrameState(examplewindow,
frameName='exampleframestate',
row=0,
column=0)
exampleframestate.show()
exampleframestate.update(
{
'data': [
{
'data1': 'dA1',
'data2': 'dA2',
'data3': 'dA3',
},
],
'meta': [
{
'meta1': 'm1',
'meta2': 'm2',
},
],
}
)
examplewindow.startGui()
| [
"watteyne@eecs.berkeley.edu"
] | watteyne@eecs.berkeley.edu |
7c7b6d5899ee3e4f388506f32f261fbed6508bac | 3649308c5d709100c4dc90e661fc9f564f184877 | /ocs/login/models.py | bc379435ce64eb699e183aa176c7f68a662e65a4 | [] | no_license | anirudhasj441/django | 54171f6141d6938201146a6d3e9475477a3f0078 | 5bb202d13d4b17daca9aedf3b213908c3245757b | refs/heads/master | 2021-07-09T06:18:11.597848 | 2021-03-07T17:58:32 | 2021-03-07T17:58:32 | 230,616,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | from django.db import models
from datetime import date
# Create your models here.
class Student(models.Model):
# s_id = models.AutoField(primary_key=True,default="1")
s_pnr = models.IntegerField(primary_key=True)
s_name = models.CharField(max_length=50)
s_dob = models.DateField(null=True,blank=True)
s_gender = models.CharField(max_length=50,default="")
s_passwd = models.CharField(max_length=300)
s_roll = models.IntegerField()
s_class = models.CharField(max_length=50)
s_contact = models.IntegerField()
s_email = models.EmailField()
def __str__(self):
return self.s_name
class Teacher(models.Model):
t_id = models.AutoField(primary_key=True)
tnr = models.IntegerField()
t_name = models.CharField(max_length=50)
t_dob = models.DateField(null=True,blank=True)
t_email = models.EmailField(default="")
t_cont = models.IntegerField(null=True)
t_passwd = models.CharField(max_length=300)
def __str__(self):
return self.t_name | [
"anirudhasj441@gmail.com"
] | anirudhasj441@gmail.com |
1bd34061677f26326c7fd922af3def6d2438d598 | 4b7ddd7c2f6fad3d3f83344543bde765b17c2ad0 | /backend/places/migrations/0035_auto_20200411_2235.py | 2dd2934701edc8225fb4937bb2122e2c2174cfb3 | [
"MIT"
] | permissive | AC-Dap/savingchinatown-backend | dc68e37a6ee3268da4088a5732d6105939b5adb4 | 223b5cff88a114b3894062f11c57b9410e56134b | refs/heads/master | 2023-08-14T13:30:54.098845 | 2020-11-04T14:16:01 | 2020-11-04T14:16:01 | 254,897,902 | 0 | 0 | MIT | 2021-09-22T19:38:36 | 2020-04-11T15:32:11 | Python | UTF-8 | Python | false | false | 422 | py | # Generated by Django 3.0.4 on 2020-04-11 22:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('places', '0034_auto_20200411_2212'),
]
operations = [
migrations.RemoveField(
model_name='place',
name='lat',
),
migrations.RemoveField(
model_name='place',
name='lng',
),
]
| [
"ninjaminer18@gmail.com"
] | ninjaminer18@gmail.com |
0b756182dbb06f11ae85695612e0cb828956ae15 | 7366ce3ba86cb6af2e9ee0923d4a74d028f08c72 | /2d/floatingStructures/floating_caisson_chrono/vof_p.py | 1d9aa0eb3893e686d42ec54c7f00bad3049fd987 | [
"MIT"
] | permissive | erdc/air-water-vv | 56b939280d8d9fb81dc13b79a9de5a489e21e350 | f93ff99432703292b1d62c3e9689537eae44e864 | refs/heads/master | 2022-08-21T23:11:16.912042 | 2022-08-11T16:44:47 | 2022-08-11T16:44:47 | 21,613,939 | 5 | 21 | MIT | 2020-11-04T19:00:46 | 2014-07-08T13:36:39 | Python | UTF-8 | Python | false | false | 1,433 | py | from proteus.default_p import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.mprans import VOF
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = domain.nd
mesh = domain.MeshOptions
genMesh = mesh.genMesh
movingDomain = ct.movingDomain
T = ct.T
LevelModelType = VOF.LevelModel
if ct.useOnlyVF:
RD_model = None
LS_model = None
else:
RD_model = 3
LS_model = 2
coefficients = VOF.Coefficients(LS_model=int(ct.movingDomain)+LS_model,
V_model=int(ct.movingDomain)+0,
RD_model=int(ct.movingDomain)+RD_model,
ME_model=int(ct.movingDomain)+1,
checkMass=True,
useMetrics=ct.useMetrics,
epsFact=ct.epsFact_vof,
sc_uref=ct.vof_sc_uref,
sc_beta=ct.vof_sc_beta,
movingDomain=ct.movingDomain)
dirichletConditions = {0: lambda x, flag: domain.bc[flag].vof_dirichlet.init_cython()}
advectiveFluxBoundaryConditions = {0: lambda x, flag: domain.bc[flag].vof_advective.init_cython()}
diffusiveFluxBoundaryConditions = {0: {}}
class VF_IC:
def uOfXT(self, x, t):
return smoothedHeaviside(ct.epsFact_consrv_heaviside*ct.he,x[nd-1]-ct.waterLevel)
initialConditions = {0: VF_IC()}
| [
"l.maurel@hrwallingford.com"
] | l.maurel@hrwallingford.com |
10c70540a9623f4e0994a218263f3b689583ef58 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_049/ch25_2019_03_11_12_40_04_650432.py | 1ccce705dfcb58b27c4c448e847adbc6418c6bc3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | distancia=int(input('Qual a distância do trajeto? '))
def preco(distancia):
if distancia > 200:
return 100+(distancia-200)*0.45
else:
return distancia*0.5
print ("{:.2f}".format(preco(distancia)) | [
"you@example.com"
] | you@example.com |
961eff9041b1e3e22e3e0d9232574196a25542bb | 1d05fb71820d118397f5973c5eeca64786e0e7af | /Optimization_Project.py | b5b5951be2ee36b8f5c0d4250a8ad0114e22fd6d | [] | no_license | aymanelghotni/Conflict_of_interests_optimization_and_surrogate_modeling | 11aa71391023800214fcd08ee93a93b619d676f9 | 7817d847d3fc28d27d1e9866293142738b116a07 | refs/heads/master | 2023-03-01T11:55:12.930210 | 2021-01-29T02:44:27 | 2021-01-29T02:44:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | #!/usr/bin/env python
# coding: utf-8
# <font size=6>Task 1: Creating the Surrogate Model</font>
# In[373]:
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input,Dense
from tensorflow.keras.layers.experimental import preprocessing
import pandas as pd
import seaborn as sns
# In[105]:
dataset=pd.read_csv("./Desktop/ds_opt/ds1.csv")
# In[106]:
dataset['Res']=dataset['Width']*dataset['Height']
dataset
# In[107]:
train_data=dataset.sample(frac=0.6,random_state=0)
test_data=dataset.drop(train_data.index)
# <font size=5>Sanity check that train and test set are unique</font>
# In[108]:
print(test_data.merge(train_data).empty)
# <font size=5>As we can see below, the time and quality are functions of Resolution</font>
# In[109]:
sns.pairplot(train_data[['Res','time(Seconds)','PPI(Normalized)']],diag_kind='kde')
# <font size=5>Getting rid of useless features, because we are interested in the Width and Height only</font>
# In[110]:
train_data.pop('PPI')
train_data.pop('size(MB)')
train_data.pop('Resolution')
train_data.pop('Res')
test_data.pop('PPI')
test_data.pop('size(MB)')
test_data.pop('Resolution')
test_data.pop('Res')
# <font size=5>Separate features from labels in both Training and Test set</font>
# In[112]:
train_features=train_data.copy()
test_features=test_data.copy()
train_labels1=train_features.pop('time(Seconds)').tolist()
train_labels2=train_features.pop('PPI(Normalized)').tolist()
test_labels1=test_features.pop('time(Seconds)').tolist()
test_labels2=test_features.pop('PPI(Normalized)').tolist()
train_labels=np.concatenate((np.array(train_labels1).reshape((len(train_features),1)),np.array(train_labels2).reshape(len(train_features),1)),axis=1)
test_labels=np.concatenate((np.array(test_labels1).reshape((len(test_features),1)),np.array(test_labels2).reshape(len(test_features),1)),axis=1)
# <font size=5>Creating the model</font>
# In[359]:
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
# In[368]:
model=keras.Sequential([
normalizer,
Dense(256,activation='relu'),
Dense(512,activation='relu'),
Dense(256,activation='relu'),
Dense(128,activation='relu'),
Dense(2)
])
model.compile(optimizer='adam',loss='mse')
# In[369]:
hist_train=model.fit(train_features,train_labels,validation_split=0.2,epochs=1000)
# In[374]:
hist_acc=model.evaluate(test_features,test_labels)
# In[375]:
plt.plot(hist_train.history['val_loss'],range(len(hist_train.history['val_loss'])))
# In[ ]:
| [
"aymanelghotni@gmail.com"
] | aymanelghotni@gmail.com |
5c9f2854ab21e6a4f0a2279da1cc6f59fa67e162 | acb00f2d6bd40522a7a7a3375a2138572da46533 | /games/Sokoban/screens/game.py | cccd4c9ce8a55f64b4209dc54e64e26a4ea7e499 | [] | no_license | Nonju/rcade | 377d605a2ab8188a56c63bb1a3d343a42fd2c6b5 | 7428bf1aaea95896713ec75b47f727bc9f40704c | refs/heads/master | 2023-08-07T16:23:12.882868 | 2021-09-18T22:26:42 | 2021-09-18T22:26:42 | 400,798,058 | 0 | 0 | null | 2021-09-13T18:59:55 | 2021-08-28T13:21:52 | Python | UTF-8 | Python | false | false | 8,706 | py |
import pygame
import math
import os
from enum import Enum
from constants import window
from utils import KeyState, ThrottledUpdate, Delay
from ..constants import colors
from ..states import GameState
from ..events import GOTOMENU
LEVEL_DIR = '/../levels/'
class Direction(Enum):
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class Tile:
FLOOR = ' '
WALL = '#'
BOX = 'o'
TARGET = '.'
PLAYER = '@'
SPACER = '-'
class MySprite(pygame.sprite.Sprite):
def __init__(self, image, pos, direction):
super().__init__()
self.image = pygame.transform.rotate(image, 90 * direction.value)
self.rect = self.image.get_rect()
self.rect.topleft = pos
class Game:
def __init__(self, surface, level=''):
super().__init__()
self.surface = surface
self.state = GameState.PLAY
self.throttledUpdate = ThrottledUpdate()
self.playerPos = (1, 1) # position x / y
self.targetPos = [] # List of box target positions [(x,y), (x,y)]
self.level = self.loadLevel(level) # TODO: Check if should store in separate "level" object
longestSide = max(len(self.level), * map(len, self.level))
self.tileWidth = math.ceil(min([window.SCREEN_WIDTH, window.SCREEN_HEIGHT]) / longestSide)
self.offset = self.tileWidth * 2
self.wallImage = pygame.image.load(os.path.join(os.path.dirname(__file__), '../assets','wall.png')).convert_alpha()
self.wallImage = pygame.transform.scale(self.wallImage, (self.tileWidth, self.tileWidth))
self.wallGroup = pygame.sprite.Group()
# Victory screen
self.menuEvent = pygame.event.Event(GOTOMENU)
self.victoryKeyInputCooldown = 2000 # 2s
self.victoryCooldown = True
self.victoryFont = pygame.font.Font('fonts/Roboto-Bold.ttf', int(window.SCREEN_HEIGHT * 0.2))
self.victorySurf = self.victoryFont.render('Du vann!!', False, colors.WHITE)
self.gotoMenuFont = pygame.font.Font('fonts/Roboto-MediumItalic.ttf', int(window.SCREEN_HEIGHT * 0.05))
self.gotoMenuSurf = self.gotoMenuFont.render(u'Tryck på valfri tangent för att fortsätta', False, colors.WHITE)
def loadLevel(self, level=''):
print('loadLevel - level', level)
if not level:
raise 'Invalid level path' # TODO: Replace with actual error
filepath = '{}{}.sokoban'.format(os.path.dirname(os.path.abspath(__file__)) + LEVEL_DIR, level)
with open(filepath, 'r') as f:
level = f.read()
tileMap = [list(row) for row in level.split('\n') if row]
for y in range(len(tileMap)):
hitWall = False
for x in range(len(tileMap[y])):
char = tileMap[y][x]
if not hitWall and char == Tile.FLOOR:
tileMap[y][x] = ''
continue
elif char == Tile.WALL:
hitWall = True
if char == Tile.PLAYER:
self.playerPos = (x, y)
elif char == Tile.TARGET:
self.targetPos.append((x, y))
return tileMap
def getTile(self, pos):
x, y = pos
try: return self.level[y][x]
except: return ''
def setTile(self, pos, char=' '):
self.level[pos[1]][pos[0]] = char
def validateNext(self, direction, current=None):
''' Check if positions x / y are a valid destination '''
next = self.getNext(direction, current)
nextChar = self.getTile(next)
if nextChar == Tile.WALL:
return False
elif nextChar == Tile.BOX:
return self.validateNext(direction, next)
return True
def getNext(self, direction, current=None):
if current is None:
current = self.playerPos
if direction == Direction.UP:
return (current[0], current[1]-1)
elif direction == Direction.DOWN:
return (current[0], current[1]+1)
elif direction == Direction.LEFT:
return (current[0]-1, current[1])
elif direction == Direction.RIGHT:
return (current[0]+1, current[1])
def swapTiles(self, direction, current, dest):
''' Recursively move / push tiles '''
if self.getTile(dest) == Tile.BOX:
self.swapTiles(direction, dest, self.getNext(direction, dest))
tmp = self.getTile(dest)
if not (current in self.targetPos and dest in self.targetPos):
if current in self.targetPos:
tmp = Tile.TARGET
elif dest in self.targetPos:
tmp = Tile.FLOOR
self.setTile(dest, self.getTile(current))
self.setTile(current, tmp)
def move(self, direction=None):
if not direction:
return
nextPos = self.getNext(direction)
if not nextPos or not self.validateNext(direction):
return
self.swapTiles(direction, self.playerPos, nextPos)
self.playerPos = nextPos
def checkWin(self):
def endVictoryCooldown():
self.victoryCooldown = False
if all(bool(self.getTile(target) == Tile.BOX) for target in self.targetPos):
self.state = GameState.WIN
Delay.call(f=endVictoryCooldown, ms=self.victoryKeyInputCooldown)
def gotoMenu(self):
pygame.event.post(self.menuEvent)
def togglePause(self):
## TODO
# Implement pause menu here
# - "Continue" --> unpauses game
# - "Exit" --> asks user if sure ('yes' --> calls gotoMenu, 'no' --> returns to pause menu)
# For now --> just go back to game main menu
self.gotoMenu()
def update(self, events):
if not self.throttledUpdate.shouldUpdate(events):
return
if self.state in [GameState.PLAY, GameState.PAUSE]:
if KeyState.up():
self.move(Direction.UP)
elif KeyState.down():
self.move(Direction.DOWN)
elif KeyState.left():
self.move(Direction.LEFT)
elif KeyState.right():
self.move(Direction.RIGHT)
elif KeyState.escape():
self.togglePause()
self.checkWin()
elif self.state == GameState.WIN:
if not self.victoryCooldown and KeyState.any():
self.gotoMenu()
def drawTile(self, pos):
tile = self.getTile(pos)
if not tile:
return
x, y = pos
color = colors.DARKBROWN
if tile == Tile.FLOOR:
color = colors.BROWN
# elif tile == Tile.WALL:
# color = colors.WHITE
# color = colors.BLACK
elif tile == Tile.BOX:
color = colors.BLUE
elif tile == Tile.TARGET:
color = colors.GREEN
elif tile == Tile.PLAYER:
color = colors.RED
posX = x * self.tileWidth + self.offset
posY = y * self.tileWidth + self.offset
pygame.draw.rect(self.surface, color, pygame.Rect((posX, posY), (self.tileWidth, self.tileWidth)))
if tile == Tile.WALL:
# self.wallImage.get_rect().move((x,y))
# self.surface.blit(self.wallImage, (self.tileWidth, self.tileWidth))
def notWall(pos):
return self.getTile(pos) not in [Tile.WALL, Tile.SPACER, '']
if notWall((x, y-1)):
self.wallGroup.add(MySprite(self.wallImage, (posX, posY), Direction.UP))
if notWall((x, y+1)):
self.wallGroup.add(MySprite(self.wallImage, (posX, posY), Direction.DOWN))
if notWall((x-1, y)):
self.wallGroup.add(MySprite(self.wallImage, (posX, posY), Direction.RIGHT))
if notWall((x+1, y)):
self.wallGroup.add(MySprite(self.wallImage, (posX, posY), Direction.LEFT))
def draw(self):
self.surface.fill(colors.DARKBROWN)
self.wallGroup.empty()
if self.state in [GameState.PLAY, GameState.PAUSE]:
for y in range(len(self.level)):
for x in range(len(self.level[y])):
self.drawTile((x, y))
self.wallGroup.draw(self.surface)
elif self.state == GameState.WIN:
victoryRect = self.victorySurf.get_rect(center=(window.SCREEN_WIDTH / 2, window.SCREEN_HEIGHT * 0.3))
self.surface.blit(self.victorySurf, victoryRect)
if not self.victoryCooldown:
gotoMenuRect = self.gotoMenuSurf.get_rect(center=(window.SCREEN_WIDTH / 2, window.SCREEN_HEIGHT * 0.55))
self.surface.blit(self.gotoMenuSurf, gotoMenuRect)
| [
"albinzon@gmail.com"
] | albinzon@gmail.com |
08d4d9c16e7f74c35e5cd54f0d4bb362e7e40170 | 2196d3798497fc7d3d80cd02b008dbb35dbd64d8 | /d05/ex03/d05/d05/settings.py | db954e17e154e3496ce94053aa82cf1a75e97ac7 | [] | no_license | avallete/Python-Django-Pool | d59965e1c6315520da54205848d4f59bf05a9928 | cee52fe4b30cbc53135caf89a8a8843842e2daeb | refs/heads/master | 2021-03-30T06:36:37.546122 | 2016-10-13T15:08:40 | 2016-10-13T15:08:40 | 69,851,733 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,262 | py | """
Django settings for d05 project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p!^#lra&!#sl7+tbv$^=v)3l7l6@1t*-s)r+3janh97lb6bjjp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ex03'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'd05.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'd05.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'formationdjango',
'USER': 'djangouser',
'PASSWORD': 'secret',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"avallete@student.42.fr"
] | avallete@student.42.fr |
38a9d372205ce1eadcf83995b5d1a84961c7f6a4 | ed083578e7facb03ab7f1b1e0462894e381e1837 | /PresentationScripts/hysteresis_prl/collisionality_plots_better_test.py | 6fe8a3cfc0593e999736b7932c810b640a74cc4f | [] | no_license | Maplenormandy/psfc-misc | 8cf9dc3a5c66993b72523bf1d74f33eac6326aa4 | e73a70c0e38efcda33452b2fd6439897ddbb1d7f | refs/heads/master | 2021-05-23T07:05:05.706105 | 2020-05-18T07:05:48 | 2020-05-18T07:05:48 | 42,888,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,740 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 13 17:15:04 2016
@author: normandy
"""
from __future__ import division
import profiletools
import gptools
import eqtools
import numpy as np
import scipy
import readline
import MDSplus
import matplotlib.pyplot as plt
import sys
sys.path.append('/home/normandy/git/psfc-misc')
import shotAnalysisTools as sat
import copy
import scipy.optimize as op
readline
# %% Initial defines
def downsampleZave(ztime, z_ave, tmin, tmax):
newtime = np.arange(tmin, tmax, 0.02)
searchtime = np.append(newtime, tmax + 0.02) - 0.01
#b, a = scipy.signal.butter(2, 0.005)
#z_avef = scipy.signal.filtfilt(b, a, z_ave, method="gust")
z_avef = z_ave
newz = np.zeros(newtime.shape)
bounds = np.searchsorted(ztime, searchtime)
for i in range(len(bounds)-1):
newz[i] = np.median(z_avef[bounds[i]:bounds[i+1]])
return newtime, newz
# %% Te fitting
def getTeRanges(p_Te, tmin, tmax):
channels = np.unique(p_Te.channels[:,1])
fitr = np.zeros((len(channels), 3))
fitTe = np.zeros((len(channels), 3))
fitTeerr = np.zeros((len(channels)))
t = p_Te.X[:,0]
for i in range(len(channels)):
validTimes = np.logical_and(t >= tmin, t < tmax)
valid = np.logical_and(p_Te.channels[:,1] == channels[i], validTimes)
cr = p_Te.X[valid, 1]
cTe = p_Te.y[valid]
if len(cTe) < 3:
continue
sort = cTe.argsort()
n = len(sort)
i0 = [np.floor(n*0.1), np.ceil(n*0.01)]
i1 = [np.floor(n*0.5), np.ceil(n*0.5)]
i2 = [np.floor(n*0.9), np.ceil(n*0.99)]
i2 = map(lambda x: np.clip(x, 0, len(sort)-1), i2)
i0 = map(int, i0)
i1 = map(int, i1)
i2 = map(int, i2)
fitTe[i,0] = np.mean(cTe[sort[i0]])
fitTe[i,1] = np.mean(cTe[sort[i1]])
fitTe[i,2] = np.mean(cTe[sort[i2]])
fitr[i,0] = np.mean(cr[sort[i0]])
fitr[i,1] = np.mean(cr[sort[i1]])
fitr[i,2] = np.mean(cr[sort[i2]])
fitTeerr[i] = np.sqrt(np.var(cTe, ddof=1) + (fitTe[i,1]/10.0)**2)
fitr = fitr[fitTeerr > 0.015, :]
fitTe = fitTe[fitTeerr > 0.015, :]
fitTeerr = fitTeerr[fitTeerr > 0.015]
return fitr, fitTe, fitTeerr
def sliceTeCrashPoints(p_Te, sawtimes):
p = copy.deepcopy(p_Te)
pt = p.X[:,0]
peakpoints = np.logical_and.reduce([(pt > st+1e-5) | (pt < st-1e-3) for st in sawtimes])
p.remove_points(peakpoints)
return p
def fitTeBeforeCrash(p_Te, p_Te2, sawtimes):
p = sliceTeCrashPoints(p_Te, sawtimes)
try:
p2 = sliceTeCrashPoints(p_Te2, sawtimes)
p.add_profile(p2)
except:
pass
p.time_average()
p.err_y = p.y / 10.0
p.create_gp(k='SE', constrain_at_limiter=False, use_hyper_deriv=True)
p.gp.k.hyperprior = (
gptools.UniformJointPrior([(0, 8)]) *
gptools.GammaJointPrior([1 + 1 * 5], [5])
)
p.find_gp_MAP_estimate(random_starts=2)
return p
def fitTe(r, Te, err):
k0 = gptools.SquaredExponentialKernel()
k0.hyperprior = (
gptools.UniformJointPrior([(0, 8)]) *
gptools.GammaJointPrior([1 + 1 * 5], [5])
)
gp0 = gptools.GaussianProcess(k0, use_hyper_deriv=True)
gp0.add_data(r, Te, err_y = err)
gp0.optimize_hyperparameters(random_starts=2)
return gp0
def getTeFit(p_Te, p_Te2, gpc0time, gpc0te, t):
print t
peaks = sat.findSawteeth(gpc0time, gpc0te, t-0.012, t+0.012)
tmin = gpc0time[peaks[0]]
tmax = gpc0time[peaks[-1]]
r, Te, sTe = getTeRanges(p_Te, tmin, tmax)
"""
try:
r1, Te1, sTe1 = getTeRanges(p_Te, tmin, tmax)
try:
r2, Te2, sTe2 = getTeRanges(p_Te2, tmin, tmax)
except:
r = r1
Te = Te1
sTe = sTe1
sTe = np.concatenate((sTe1, sTe2))
r = np.vstack((r1, r2))
Te = np.vstack((Te1, Te2))
except:
r, Te, sTe = getTeRanges(p_Te2, tmin, tmax)
"""
gp0 = fitTe(r[:,0], Te[:,0], sTe)
gp1 = fitTe(r[:,1], Te[:,1], sTe)
gp2 = fitTe(r[:,2], Te[:,2], sTe)
maxPeaks = sat.rephaseToNearbyMax(peaks, gpc0te, 4)
p_peak = fitTeBeforeCrash(p_Te, p_Te2, gpc0time[maxPeaks])
# Note the standard deviations are meaningless due to the random guess errors
return (lambda roa: gp0.predict(roa)[0],
lambda roa: gp1.predict(roa)[0],
lambda roa: gp2.predict(roa)[0],
lambda roa: p_peak.smooth(roa)[0],
lambda roa: p_peak.smooth(roa, n=1)[0])
# %% ne fitting; need to load every time
def fitNe(p_ne, t):
p = copy.deepcopy(p_ne)
p.remove_points((p.X[:, 0] < t-0.09) | (p.X[:, 0] > t+0.09))
p.remove_points(p.y < 0.1)
p.create_gp(k='SE', constrain_at_limiter=False, use_hyper_deriv=True)
p.gp.k.hyperprior = (
gptools.UniformJointPrior([(0, 5)]) *
gptools.UniformJointPrior([(0, 0.1)]) *
gptools.GammaJointPrior([1 + 1 * 5], [5])
)
print p.find_gp_MAP_estimate(random_starts=2)
return p
def evalNeFit(p_ne, roa, t, n=0):
roa = np.array(roa)
a = np.vstack((np.ones(roa.shape)*t, roa)).T
return p_ne.smooth(a, n=n)
def getNeFit(p_ne, t):
return lambda roa: evalNeFit(p_ne, roa, t)[0], lambda roa: evalNeFit(p_ne, roa, t, n=1)[0]
# %% Collisionality class
class NustarProfile:
def __init__(self, shot, tmin, tmax):
# Load data
self.e = eqtools.CModEFITTree(shot)
self.p_ne_master = profiletools.ne(shot, abscissa='r/a', t_min=tmin, t_max=tmax, include=['CTS', 'ETS'], efit_tree=self.e)
self.p_Te_master = profiletools.Te(shot, abscissa='r/a', t_min=tmin, t_max=tmax, include=['GPC'], efit_tree=self.e)
self.p_Te2_master = profiletools.Te(shot, abscissa='r/a', t_min=tmin, t_max=tmax, include=['GPC2'], efit_tree=self.e)
specTree = MDSplus.Tree('spectroscopy', shot)
z_aveNode = specTree.getNode(r'\z_ave')
z_ave = z_aveNode.data()
ztime = z_aveNode.dim_of().data()
ztime, zeff = downsampleZave(ztime, z_ave, tmin, tmax)
self.zefff = scipy.interpolate.interp1d(ztime, zeff)
electrons = MDSplus.Tree('electrons', shot)
gpc0 = electrons.getNode(r'\ELECTRONS::GPC2_TE0')
self.gpc0time = gpc0.dim_of().data()
self.gpc0te = gpc0.data()
# Magnetics manipulations
qp = self.e.getQProfile()
qt = self.e.getTimeBase()
psin = np.linspace(0, 1, qp.shape[1])
qfpsi = scipy.interpolate.interp2d(psin, qt, qp)
self.qfroa = lambda t: lambda roa: qfpsi(self.e.roa2psinorm(roa, t, each_t=True), t)
magRf = self.e.getMagRSpline(kind='linear')
magaf = self.e.getAOutSpline(kind='linear')
self.epsfroa = lambda t: lambda roa: roa * magaf(t) / magRf(t)
def fitNe(self, tnefits):
self.tnefits = tnefits
self.p_ne = [None] * len(tnefits)
for i in range(len(tnefits)):
self.p_ne[i] = fitNe(self.p_ne_master, tnefits[i])
def evalProfile(self, tfits):
neFit = [None] * len(tfits)
dne = [None] * len(tfits)
TeMin = [None] * len(tfits)
TeMed = [None] * len(tfits)
TeMax = [None] * len(tfits)
TeCrash = [None] * len(tfits)
dTeCrash = [None] * len(tfits)
q = [None] * len(tfits)
eps = [None] * len(tfits)
self.collMin = [None] * len(tfits)
self.collMed = [None] * len(tfits)
self.collMax = [None] * len(tfits)
def coll(eps, q, ne, zeff, Te):
return lambda roa: 0.0118/(eps(roa)**1.5)*q(roa)*ne(roa)*zeff/(Te(roa)**2)
for j in range(len(tfits)):
# Calculate the relevant profile fit
i = np.argmin(np.abs(self.tnefits - tfits[j]))
# Get things as a function of r/a
neFit[j], dne[j] = getNeFit(self.p_ne[i], tfits[j])
TeMin[j], TeMed[j], TeMax[j], TeCrash[j], dTeCrash[j] = getTeFit(self.p_Te_master, self.p_Te2_master, self.gpc0time, self.gpc0te, tfits[j])
q[j] = self.qfroa(tfits[j])
eps[j] = self.epsfroa(tfits[j])
# Calculate collisionality
self.collMin[j] = coll(eps[j], q[j], neFit[j], self.zefff(tfits[j]), TeMax[j])
self.collMed[j] = coll(eps[j], q[j], neFit[j], self.zefff(tfits[j]), TeMed[j])
self.collMax[j] = coll(eps[j], q[j], neFit[j], self.zefff(tfits[j]), TeMin[j])
self.neFit = neFit
self.dne = dne
self.TeMin = TeMin
self.TeMed = TeMed
self.TeMax = TeMax
self.TeCrash = TeCrash
self.dTeCrash = dTeCrash
self.q = q
self.eps = eps
self.tfits = tfits
# %% Temporary construction functions
def calcTraces(slf):
def unpack(f):
return lambda x: f(x)[0]
slf.numinTrace = np.zeros(len(slf.tfits))
slf.xminTrace = np.zeros(len(slf.tfits))
for j in range(len(slf.tfits)):
res = op.minimize_scalar(unpack(slf.collMin[j]), bounds=[0.2, 0.8], method='bounded')
slf.numinTrace[j] = res.fun
slf.xminTrace[j] = res.x
# %% Collisionality plot
nustar = NustarProfile(1160506007, 0.4, 1.6)
nustar.fitNe([0.6, 0.96])
nustar.evalProfile(np.array([0.6, 0.96]))
rho = np.linspace(0.0,0.9)
plt.figure()
plt.plot(rho, (nustar.neFit[0](rho)))
#plt.plot(rho, (nustar.neFit[1](rho)))
d0 = np.gradient(nustar.neFit[0](rho), np.median(np.diff(rho)))
d1 = np.gradient(nustar.neFit[0](rho), np.median(np.diff(rho)))
# %%
plt.figure()
plt.plot(rho, (nustar.neFit[0](rho)), c='b')
plt.plot(rho, (nustar.neFit[1](rho)), c='r')
# %% ne0, te0, etc...
plt.figure()
ne0 = np.array([ne(0) for ne in nustar.neFit])
plt.plot(nustar.tfits, ne0)
"""
plt.errorbar(np.linspace(0,1), Temean0, yerr=Testd0, c='b')
plt.errorbar(np.linspace(0,1), Temean1, yerr=Testd1, c='g')
plt.errorbar(np.linspace(0,1), Temean2, yerr=Testd2, c='r')
plt.scatter(r1[:,0], Te1[:,0], c='b')
plt.scatter(r2[:,0], Te2[:,0], c='b', marker='^')
plt.scatter(r1[:,1], Te1[:,1], c='g')
plt.scatter(r2[:,1], Te2[:,1], c='g', marker='^')
plt.scatter(r1[:,2], Te1[:,2], c='r')
plt.scatter(r2[:,2], Te2[:,2], c='r', marker='^')
"""
| [
"maplenormandy@gmail.com"
] | maplenormandy@gmail.com |
098f68ce0de1a4e85ab1ea096ed45ccf2fff3eeb | 4bed9030031fc99f6ea3d5267bd9e773f54320f8 | /sparse/repos/Calysto/matlab_kernel/setup.py | 313419fcbb79751dd03972ceb291c85638644417 | [
"BSD-3-Clause"
] | permissive | yuvipanda/mybinder.org-analytics | c5f4b939541d29727bc8d3c023b4d140de756f69 | 7b654e3e21dea790505c626d688aa15640ea5808 | refs/heads/master | 2021-06-13T05:49:12.447172 | 2018-12-22T21:48:12 | 2018-12-22T21:48:12 | 162,839,358 | 1 | 1 | BSD-3-Clause | 2021-06-10T21:05:50 | 2018-12-22T20:01:52 | Jupyter Notebook | UTF-8 | Python | false | false | 1,680 | py | import glob
from setuptools import setup, find_packages
with open('matlab_kernel/__init__.py', 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
DISTNAME = 'matlab_kernel'
PACKAGE_DATA = {
DISTNAME: ['*.m'] + glob.glob('%s/**/*.*' % DISTNAME)
}
DATA_FILES = [
('share/jupyter/kernels/matlab', [
'%s/kernel.json' % DISTNAME
] + glob.glob('%s/images/*.png' % DISTNAME)
)
]
if __name__ == "__main__":
setup(name="matlab_kernel",
author="Steven Silvester, Antony Lee",
version=version,
url="https://github.com/Calysto/matlab_kernel",
license="BSD",
long_description=open("README.rst").read(),
classifiers=["Framework :: IPython",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: System :: Shells"],
packages=find_packages(include=["matlab_kernel", "matlab_kernel.*"]),
package_data=PACKAGE_DATA,
include_package_data=True,
data_files=DATA_FILES,
requires=["metakernel (>0.20.8)", "jupyter_client (>=4.4.0)",
"ipython (>=4.0.0)"],
install_requires=["metakernel>=0.20.8", "jupyter_client >=4.4.0",
"ipython>=4.0.0",
"backports.tempfile;python_version<'3.0'",
'wurlitzer>=1.0.2;platform_system!="Windows"']
)
| [
"yuvipanda@gmail.com"
] | yuvipanda@gmail.com |
210bc7bd0293918d3ca37014a57b68ebe2823f96 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03408/s214379251.py | fba6c07029d057c1512feb87f8d481f483ef4cb4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | N = int(input())
ListP = []
for i in range (N):
ListP.append(input())
M = int(input())
ListN = []
for i in range (M):
ListN.append(input())
res = 0
mid = 0
for i in range(N):
mid += ListP.count(ListP[i])
mid += -ListN.count(ListP[i])
res = max(res,mid)
mid = 0
print(res) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
07f87234adb59300c6bb17578632811553a04257 | 8cf633e92a0671c8201268620a0372f250c8aeb2 | /205.同构字符串.py | f76217c78e58ac420845c37b25d7da82a86ce71d | [
"Unlicense"
] | permissive | SprintGhost/LeetCode | 76da5c785009d474542e5f2cdac275675b8e60b8 | cdf1a86c83f2daedf674a871c4161da7e8fad17c | refs/heads/develop | 2021-06-06T04:04:28.883692 | 2021-01-01T14:09:26 | 2021-01-01T14:09:26 | 230,635,046 | 0 | 0 | Unlicense | 2020-12-11T14:55:36 | 2019-12-28T16:34:39 | Python | UTF-8 | Python | false | false | 1,636 | py | #
# @lc app=leetcode.cn id=205 lang=python3
#
# [205] 同构字符串
#
# Accepted
# 30/30 cases passed (48 ms)
# Your runtime beats 55.2 % of python3 submissions
# Your memory usage beats 16.3 % of python3 submissions (14.1 MB)
# @lc code=start
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
if (not s and t) or (not t and s):
return False
temp_s = dict()
list_s = list()
temp_t = dict()
list_t = list()
cs = 0
ct = 0
for index in range(0,len(s)):
if (s[index] in temp_s):
list_s.append(temp_s[s[index]])
else:
temp_s[s[index]] = cs
list_s.append(cs)
cs += 1
if (t[index] in temp_t):
list_t.append(temp_t[t[index]])
else:
temp_t[t[index]] = ct
list_t.append(ct)
ct += 1
if list_t[index] != list_s[index]:
return False
return True
# Accepted
# 30/30 cases passed (36 ms)
# Your runtime beats 93.12 % of python3 submissions
# Your memory usage beats 40.24 % of python3 submissions (13.7 MB)
class Solution:
def eigenValues(self, x):#
L, p, k = {}, 0, ''
for i in x:
if i not in L:
p += 1
k, L[i] = k+str(p), str(p)
else:
k += L[i]
return k
def isIsomorphic(self, s: str, t: str) -> bool:
return self.eigenValues(s) == self.eigenValues(t)
# A = Solution()
# print (A.isIsomorphic("aba", "baa"))
# @lc code=end
| [
"864047435@qq.com"
] | 864047435@qq.com |
737ec07de6c5ea89bf1610e81acecb3e9200babb | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5708284669460480_0/Python/zdan/B.py | e89eff79728bb389faaa4be1f8d9b26f813576ea | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import sys
import itertools
import numpy as np
def occurrences(string, target):
if len(target) > 1 and target[0] == target[-1]:
count = start = 0
while True:
start = string.find(target, start) + 1
if start > 0:
count += 1
else:
return count
else:
return string.count(target)
def solve(K, L, S, keyboard, target):
target_set = set(target)
keyboard_set = set(keyboard)
#keyboard_prob = {key: keyboard.count(key)/float(len(keyboard)) for key in keyboard_set}
if S < L:
return 0.
if not target_set.issubset(keyboard_set):
return 0.
if len(keyboard_set) == 1:
return 0.
total_combinations = max_bananas = payout = 0
for combination in itertools.product(keyboard, repeat=S):
total_combinations += 1
bananas = occurrences(''.join(combination), target)
payout += bananas
if max_bananas < bananas:
max_bananas = bananas
return max_bananas - float(payout)/total_combinations
if __name__ == '__main__':
filename_in = sys.argv[1]
filename_out = filename_in.partition('.')[0] + '.out'
with open(filename_out, "w") as fout:
with open(filename_in, "r") as fin:
T = int(fin.readline())
for case in range(1, T+1):
K, L, S = [int(x) for x in fin.readline().split()]
keyboard = fin.readline().strip()
target = fin.readline().strip()
print >> fout, "Case #%i:" % case, solve(K, L, S, keyboard, target)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
431bf8d8bc783d040640116cb8ab3624990c4cd1 | eb8ce153f2e4aac8d88b08bd77a1bfb452433735 | /58168_Nicolas_Carratala/clase3/diccionario_console_app/interface.py | ebde543f8ea95dc0a712688f4169c7ae509169d5 | [] | no_license | nicolascarratala/version-2019 | 63acc0328ed7012f02bf6b5ae784f2fd25bfe34c | 0c6c4d88d3284b509d6c2338e8667903fc29178a | refs/heads/master | 2020-05-18T07:35:21.328037 | 2019-05-22T13:15:38 | 2019-05-22T13:15:38 | 184,269,148 | 0 | 0 | null | 2019-04-30T13:40:52 | 2019-04-30T13:40:51 | null | UTF-8 | Python | false | false | 166 | py | from dictionary import dictionary_creator
def main():
a=input('ingrese una plabra para introducirla en un diccionario:')
print(dictionary_creator(a))
main() | [
"ncarratala@aconcaguasf.com.ar"
] | ncarratala@aconcaguasf.com.ar |
554b02c0fd1b8bac352fe742a597f5be3d13b43d | 8222dcbb226682a9112720927361877a92185407 | /fluent_contents/plugins/sharedcontent/managers.py | 7bd0a8f6915af76928eb41ced0dc3898c6d93cf6 | [
"Apache-2.0"
] | permissive | acolorbright/django-fluent-contents | ada4a5fedb590e5f679463221fce2f965730bac1 | 4e5c6e99134ceee804bb42391ec37e5e17ff5a7e | refs/heads/master | 2023-04-12T05:31:19.179528 | 2018-05-14T11:10:16 | 2018-05-14T11:10:16 | 108,149,326 | 0 | 0 | Apache-2.0 | 2023-04-04T00:22:27 | 2017-10-24T15:48:46 | Python | UTF-8 | Python | false | false | 1,888 | py | from django.conf import settings
from django.db.models import Q, Manager
from parler.managers import TranslatableQuerySet
from fluent_contents import appsettings
from fluent_contents.plugins.sharedcontent import appsettings as sharedcontent_appsettings
class SharedContentQuerySet(TranslatableQuerySet):
"""
The QuerySet for SharedContent models.
"""
def __init__(self, *args, **kwargs):
super(SharedContentQuerySet, self).__init__(*args, **kwargs)
self._parent_site = None
def _clone(self, klass=None, setup=False, **kw):
c = super(SharedContentQuerySet, self)._clone(klass, setup, **kw)
c._parent_site = self._parent_site
return c
def parent_site(self, site):
"""
Filter to the given site, only give content relevant for that site.
"""
# Avoid auto filter if site is already set.
self._parent_site = site
if sharedcontent_appsettings.FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE:
# Allow content to be shared between all sites:
return self.filter(Q(parent_site=site) | Q(is_cross_site=True))
else:
return self.filter(parent_site=site)
def _single_site(self):
"""
Make sure the queryset is filtered on a parent site, if that didn't happen already.
"""
if appsettings.FLUENT_CONTENTS_FILTER_SITE_ID and self._parent_site is None:
return self.parent_site(settings.SITE_ID)
else:
return self
def get_for_slug(self, slug):
"""
.. versionadded:: 1.0 Return the content for the given slug.
"""
return self._single_site().get(slug=slug)
class SharedContentManager(Manager.from_queryset(SharedContentQuerySet)):
"""
Extra methods attached to ``SharedContent.objects``, see :class:`SharedContentQuerySet`.
"""
pass
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
956b57a9281f9184ba90e3a56f5b32bd170bcffc | c96c0fda81a141aa949c9e93eda09f0b081e2b42 | /Database_connectivity.py | 38efb5c8b784008fee3eb2e3f569609bd2188e9b | [] | no_license | hritik1228/Python_MySQL_connectivity | 30bfbd5889c0dc919a86f027a039fbe38e9cc2f5 | 8fee934d98e85b2ee8f0615aa4721f9dc7c56df4 | refs/heads/master | 2022-12-01T01:19:48.865170 | 2020-08-13T13:46:51 | 2020-08-13T13:46:51 | 287,292,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #Database Connection using MySQL
#1.Import mysql.connector module
import mysql.connector
#2.Creating the connection
mydb=mysql.connector.connect(host='localhost',user='root',password='')
#printing the connection id
print(mydb.connection_id)
#3.creating the cursor object
cur = mydb.cursor()
#4.Execute the query
#To create a database in MySQL
cur.execute("CREATE DATABASE DB1_PYTHON")
| [
"hritikkumar0015@gmail.com"
] | hritikkumar0015@gmail.com |
9bc0bd0fb038d8aa88277056cb4e45322b6ec02c | 3f6a92eb101d1835e5878f1e899a03826d2120b8 | /lattedb/project/ga_q2/migrations/0002_auto_20191107_0016.py | 28ae99c7dcc1b75045cb82135741e0c4d99a185d | [
"BSD-3-Clause"
] | permissive | callat-qcd/lattedb | 5c96efc9bd670733d7f80e1f6e8c6dba17da04e4 | 75c06748f3d59332a84ec1b5794c215c5974a46f | refs/heads/master | 2021-06-14T20:03:14.701433 | 2020-08-07T15:11:59 | 2020-08-07T15:11:59 | 193,587,635 | 1 | 0 | BSD-3-Clause | 2021-05-31T14:40:55 | 2019-06-24T22:09:00 | Python | UTF-8 | Python | false | false | 450 | py | # Generated by Django 2.2.6 on 2019-11-07 00:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ga_q2', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='onetoallstatus',
name='tag',
field=models.CharField(blank=True, help_text='User defined tag for easy searches', max_length=200, null=True),
),
]
| [
"ithems@ithems.lbl.gov"
] | ithems@ithems.lbl.gov |
168e1711849c37b0ffbf22b2f5269a96f1f3fc6b | 4717914686d97dfbefa1f782056a731eb9d097d5 | /dagbart.py | 4d5ad71e2b2fe57eab09a448138b96d6cce82515 | [] | no_license | bartvanp/dagbart | 0d6fa5c27f4941b84dfa8707e2e5b5653901228b | 9dd40ba18d724a87ccf2ec23a9cab6e479bebd35 | refs/heads/master | 2020-03-22T05:59:45.872420 | 2018-07-03T15:45:18 | 2018-07-03T15:45:18 | 139,605,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | class person():
soort = 'mens'
def __init__(self, voornaam, naam):
self.voornaam = voornaam
self.naam = naam
def geef_naam(self):
return self.naam
def __str__(self):
return self.voornaam + " " + self.naam
def isBart(self):
return self.voornaam.lower()== 'bart'
bart = person('bart','vanparys')
print(bart.isBart())
print(bart)
| [
"noreply@github.com"
] | noreply@github.com |
0dc6107d0d2034aa1278595b927968daa55d67b6 | 7c89ecb375e359e55bf7c4295e04a1ee6231cf9e | /python/utils/get_means_stds2D.py | 503c200e9d329d9212a63f5cfd0a4e88f429de88 | [
"BSD-3-Clause"
] | permissive | JakubicekRoman/CTDeepRot | 2a33afd86c056a1a858b963fd4877c14238b294e | db99bce34b5f69eafc78ba21f53b5e57c86117cc | refs/heads/final | 2022-11-21T06:11:24.342126 | 2020-07-21T08:29:38 | 2020-07-21T08:29:38 | 269,278,216 | 5 | 1 | null | 2020-07-01T14:32:06 | 2020-06-04T06:26:56 | MATLAB | UTF-8 | Python | false | false | 1,583 | py |
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import glob
import os
from skimage.io import imread
from skimage.transform import resize
from torch.utils import data
import os
import pandas as pd
from skimage.transform import resize
path= '../../../CT_rotation_data_2D'
xl_file = pd.ExcelFile(path + os.sep+'ListOfData.xlsx')
data = pd.read_excel(xl_file,header=None)
folders=data.loc[:,0].tolist()
names=data.loc[:,1].tolist()
file_names=[]
for folder,name in zip(folders,names):
file_names.append((path + os.sep + folder.split('\\')[-1] + os.sep + name).replace('.mhd',''))
file_names=file_names[:int(len(file_names)*0.8)]
folders=['mean','max','std']
STDS = { i : 0 for i in folders }
MEANS = { i : 0 for i in folders }
for folder in folders:
means=[]
stds=[]
for k in range(3):
meas_tmp=[]
stds_tmp=[]
for i,file_name in enumerate(file_names):
print(i)
tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' )
tmp=tmp.astype(np.float32)/255
meas_tmp.append(np.mean(tmp))
stds_tmp.append(np.std(tmp))
means.append(np.mean(meas_tmp))
stds.append(np.mean(stds_tmp))
MEANS[folder]=means
STDS[folder]=stds
| [
"tomasvicar@gmail.com"
] | tomasvicar@gmail.com |
3e7e38af79168d9c95965e84323985f613c85858 | 119c699d78749bf5578cf80dfd97b4e0de7e9ff3 | /setor/migrations/0015_auto_20210909_0102.py | 558b554ef8cc2eaa69507be812f155fc234d0f7f | [] | no_license | badrussholeh0110/sppskud3 | 14cc1e8087f8b1a21913ac3d596ffdf0266f5147 | 516f98df776145df2ce68d46ce476f1170fa1c08 | refs/heads/master | 2023-08-06T12:21:16.700454 | 2021-09-13T12:25:36 | 2021-09-13T12:25:36 | 404,664,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.1.5 on 2021-09-08 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('setor', '0014_auto_20210908_0324'),
]
operations = [
migrations.AlterField(
model_name='setoran',
name='tgl',
field=models.DateField(auto_now_add=True, null=True),
),
]
| [
"badrussholeh0110@gmail.com"
] | badrussholeh0110@gmail.com |
2e9eaf643c78e585909b2256592c83e8642e3c85 | 728cf4b85d8331e2325760613cc70db5bb71b939 | /block_matching.py | 811cd87c27596cd78a5f225aa80c4e28b2114dbd | [] | no_license | fdlci/Imagerie-projet | 864c140de9deb7c387a032dc7af31014a26c54a7 | 50068d5585631bebb9463256cde2d37d40f3f414 | refs/heads/main | 2023-02-21T05:39:37.580711 | 2021-01-24T15:48:25 | 2021-01-24T15:48:25 | 323,638,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,278 | py | # In NN3D, the BM is done once on an estimate of y. We use the output
# y_tilde_1 of the CNNF.
# Expected result: A look-up-table of group coordinates S = {S1, ..., SN}
# Each Sj contains the coordinates of N2 mutually similar blocks of size N1xN1
# Each pixel in the image is covered by at least one block.
# Main steps of block matching:
# - divide the image into patches of size N1xN1 (non overlapping patches
# except at the edges because the image does not have a size which is a
# multiple of N1=10)
# - Build the look up table (choose the N2 most similar blocks for every
# patch: I included the actual patch in the similar blocks to ensure the
# presence of every patch in the look up table). The table is a dictionnary
# of the form: S = {'patch 0': [0,24,37,...,458...], 'patch 1' : [1, ...]}
# where the elements of the list are the indices of the patches most similar
# to the one considered.
import numpy as np
import matplotlib.pyplot as plt
N1, N2 = 10, 32
def dividing_into_patches(img):
"""Divides an input image into patches of size N1xN1"""
n, p = img.shape[0], img.shape[1]
patches = []
n_10, p_10 = n//10, p//10
for i in range(n_10+1):
for j in range(p_10+1):
if i < n_10 and j < p_10:
patches.append(img[i*N1:i*N1+10, j*N1:j*N1+10, :])
elif i == n_10 and j < p_10:
patches.append(img[n-10:n, j*N1:j*N1+10, :])
elif i < n_10 and j == p_10:
patches.append(img[i*N1:i*N1+10, p-10:p, :])
elif i == n_10 and j == p_10:
patches.append(img[n-10:n, p-10:p, :])
return np.array(patches)
def similarity_matrix(patches):
"""Computes the similarity matrix between the patches
Returns a matrix of size N1xN1"""
size = patches.shape[0]
similarity = np.zeros((size,size))
sim = 0
# similarity computed with the Frobenius norm
# leave the 0 similarity with itself to make sure every patch will be in S
for i in range(size):
for j in range(size):
sim = np.linalg.norm(patches[i] - patches[j])
similarity[i][j] = sim
return similarity
def building_the_look_up_table(similarity):
"""Builds a dictionnary by taking for each patch, the
N2 most similar patches (including itself to make sure
that all patches are in S"""
S = {}
n = similarity.shape[0]
for i in range(n):
idx = np.argpartition(similarity[i], N2)
S['patch ' + str(i)] = idx[:N2]
return S
def block_matching(img):
# Dividing image into patches
patches = dividing_into_patches(img)
# Computing similarity matrix
similarity = similarity_matrix(patches)
# Building look-up-table
look_up_table = building_the_look_up_table(similarity)
return patches, look_up_table
# img = plt.imread('FFDNET_IPOL\input.png')
# patches, look_up_table = block_matching(img)
# plt.figure(figsize=(12,6))
# plt.subplot(1, 2, 1)
# plt.imshow(img)
# plt.axis('off')
# plt.title('Original')
# plt.subplot(1, 2, 2)
# plt.imshow(patches[0])
# plt.axis('off')
# plt.title('Patch of size N1xN1 (top left of original)')
# plt.show()
# print(look_up_table['patch 0']) | [
"ines.florezdelacolina@student-cs.fr"
] | ines.florezdelacolina@student-cs.fr |
f79cf879ab91898933f340992e31d799a7e9b21a | 870285250bf8cebca70f852b304494ece6f23677 | /bubba-album-inotifyd | 18e110b792c7d5257c852bab4d16a3fa7fc4b340 | [] | no_license | Excito/bubba-album | 3ce6765fe71f528ce8ce17f33744f52b1913c3e7 | 00fb98879b5d98a3ee46692a425a506bef4ea073 | refs/heads/master | 2020-05-18T12:52:02.954464 | 2013-06-26T10:56:28 | 2013-06-26T10:56:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,002 | #!/usr/bin/python
# Requires Python >= 2.5
import sys
import pyinotify
import logging
import logging.handlers
from configobj import ConfigObj
import MySQLdb
from PIL import Image
import pyexiv2
from datetime import datetime
import os
import subprocess
import psutil
config = ConfigObj("/etc/dbconfig-common/bubba-album.conf")
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address='/dev/log')
my_logger.addHandler(handler)
class EventHandler(pyinotify.ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
return self.process_IN_CREATE(event)
def process_IN_MOVED_TO(self, event):
return self.process_IN_CREATE(event)
def process_IN_MOVED_FROM(self, event):
return self.process_IN_DELETE(event)
def process_IN_CREATE(self, event):
my_logger.info("Starting to process %s" % event.pathname)
if event.dir:
return
db = MySQLdb.connect(passwd=config["dbc_dbpass"], user=config["dbc_dbuser"], db=config["dbc_dbname"])
c = db.cursor()
c.execute("""SELECT COUNT(id) FROM image WHERE path = %s""", (event.pathname))
count = c.fetchone()
if count[0] > 0:
my_logger.debug("Image %s already found in database." % event.pathname)
return
try:
meta = pyexiv2.Image(event.pathname)
meta.readMetadata()
except IOError as e:
my_logger.info(e)
return
if 'Exif.Image.DateTime' in meta.exifKeys():
dt = meta['Exif.Image.DateTime']
else:
dt = datetime.today()
d = dt.date()
year_id = self.set_year(c, db, d)
month_id = self.set_month(year_id, db, c, d)
day_id = self.set_day(month_id, db, c, d)
width = height = 0
try:
im = Image.open(event.pathname)
(width, height) = im.size
except:
my_logger.warning("Unable to open %s as an image. Ignoring file" % event.pathname)
c.execute(
"""INSERT INTO image (path, name, width, height, created, album) VALUES (%s,%s,%s,%s,%s,%s)""",
(
event.pathname,
event.name,
width,
height,
dt.isoformat(),
day_id
)
)
db.commit()
image_id = c.lastrowid
my_logger.info("Added %s (%sx%s) with ID %s" % (event.pathname, width, height, c.lastrowid))
spool = "/var/spool/album"
if os.path.exists(spool) and os.path.isdir(spool):
try:
os.symlink(event.pathname, os.path.join(spool, str(image_id)))
except:
pass
if os.path.exists('/tmp/bubba-album.pid'):
pid = open('/tmp/bubba-album.pid', 'r').read()
try:
p = psutil.Process(int(pid))
if not p.is_running():
subprocess.call(['/usr/sbin/album_import.pl'])
except psutil.error.NoSuchProcess:
subprocess.call(['/usr/sbin/album_import.pl'])
else:
subprocess.call(['/usr/sbin/album_import.pl'])
def process_IN_DELETE(self, event):
if event.dir:
return
db = MySQLdb.connect(passwd=config["dbc_dbpass"], user=config["dbc_dbuser"], db=config["dbc_dbname"])
c = db.cursor()
c.execute("""SELECT id FROM image WHERE path = %s""", (event.pathname,))
ids = [str(i[0]) for i in c.fetchall()]
c.execute("""DELETE FROM image WHERE path = %s""", (event.pathname,))
db.commit()
for id in ids:
try:
os.unlink(os.path.join('/var/lib/album/thumbs/hdtv', str(id)))
my_logger.info("Tailed to remove %s" % (os.path.join('/var/lib/album/thumbs/hdtv', str(id))))
except Exception:
sys.exc_clear()
try:
os.unlink(os.path.join('/var/lib/album/thumbs/rescaled', str(id)))
except Exception:
my_logger.info("Tailed to remove %s" % (os.path.join('/var/lib/album/thumbs/thumbs', str(id))))
sys.exc_clear()
try:
os.unlink(os.path.join('/var/lib/album/thumbs/thumbs', str(id)))
except Exception:
my_logger.info("Tailed to remove %s" % (os.path.join('/var/lib/album/thumbs/thumbs', str(id))))
sys.exc_clear()
my_logger.info("Removed %s with ID %s" % (event.pathname, ", ".join(ids)))
def set_day(self, month_id, database, cursor, date):
if cursor.execute("""SELECT id FROM album WHERE name = %s AND parent = %s""", (
date.day,
month_id
)):
day_id = cursor.fetchone()[0]
else:
cursor.execute("""INSERT INTO album (name,parent,caption,path,public) VALUES (%s,%s,%s,'',0)""", (
date.day,
month_id,
"Images created on the day %s of the month %s of the year %s" % (date.day, date.month, date.year)
))
database.commit()
day_id = cursor.lastrowid
return day_id
def set_month(self, year_id, database, cursor, date):
if cursor.execute("""SELECT id FROM album WHERE name = %s AND parent = %s""", (
date.month,
year_id
)):
month_id = cursor.fetchone()[0]
else:
my_logger.debug("created for month %s for year %s" % (date.month, date.year))
cursor.execute("""INSERT INTO album (name,parent,caption,path,public) VALUES (%s,%s,%s,'',0)""", (
date.month,
year_id,
"Images created on the month %s of the year %s" % (date.month, date.year)
))
database.commit()
month_id = cursor.lastrowid
return month_id
def set_year(self, cursor, database, date):
if cursor.execute("""SELECT id FROM album WHERE name = %s AND parent IS NULL""", (date.year,)):
year_id = cursor.fetchone()[0]
else:
my_logger.debug("created for year %s" % date.year)
cursor.execute("""INSERT INTO album (name,caption,path,public) VALUES (%s,%s,'',0)""", (
date.year,
"Images created on the year %s" % date.year
))
database.commit()
year_id = cursor.lastrowid
return year_id
wm = pyinotify.WatchManager()
mask = pyinotify.ALL_EVENTS # watched events
path = '/home/storage/pictures'
handler = EventHandler()
notifier = pyinotify.Notifier(wm, handler, read_freq=10)
notifier.coalesce_events()
wm.add_watch(path, mask, rec=True, auto_add=True)
try:
notifier.loop(daemonize=True)
except pyinotify.NotifierError, err:
print >> sys.stderr, err
except:
my_logger.exception("Exception thrown :(")
| [
"carl@excito.com"
] | carl@excito.com | |
1c156904e052529e3b104bb6492ca6837c5f78f2 | a65e5dc54092a318fc469543c3b96f6699d0c60b | /Personel/Rajkumar/Python/feb17/swap.py | 017dbe26ac5f78334e999640b352f86530f059e5 | [] | no_license | shankar7791/MI-10-DevOps | e15bfda460ffd0afce63274f2f430445d04261fe | f0b9e8c5be7b28298eb6d3fb6badf11cd033881d | refs/heads/main | 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 | JavaScript | UTF-8 | Python | false | false | 166 | py | n1=int(input("Enter a 1st Number:-"))
n2=int(input("Enter a 2nd Number:-"))
print("Before Swaping:-",n1,n2)
n1=n1+n2
n2=n1-n2
n1=n1-n2
print("After Swaping:-",n1,n2)
| [
"varmarajkumar745@gmail.com"
] | varmarajkumar745@gmail.com |
efb8b0a7ec90c3adb05a59a93c8c6d7319c7e1ae | b1db906c391c4966238bc95c67dbef2a2381e3d9 | /crawler/weibo_comments_extract_top20.py | 4791dbe7dcbb13880eaa116242a11a88be2b3d9d | [] | no_license | daiyunbin/hyys2018 | 985fa1b3a13513ba301ebb4c8680d696ede67b67 | c637976c54cde1fd422c480f004d4237a14b5b11 | refs/heads/master | 2021-09-11T19:14:45.363091 | 2018-04-11T09:04:22 | 2018-04-11T09:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | import csv
if __name__=='__main__':
file_path = 'C:/Users/luopc/Desktop/hyys_2016_2018/weibo/2018/2018_热门.csv'
top_file_path = 'C:/Users/luopc/Desktop/hyys_2016_2018/weibo/2018/2018_热门_top20.csv'
with open(file_path, mode='r', encoding='GBK') as input_file,\
open(top_file_path, mode='w', encoding='GBK', newline='') as output_file:
reader = csv.reader(input_file)
head = reader.__next__()
data = []
all = set()
for row in reader:
if row[0]+'_'+row[1] not in all:
data.append(row)
all.add(row[0]+'_'+row[1])
data.sort(key=lambda x:(x[0], -int(x[4])))
count = 0
writer = csv.writer(output_file)
writer.writerow(head)
keyword = ''
for row in data:
if keyword != row[0]:
count = 0
keyword = row[0]
count += 1
if count <= 20 and int(row[4]) > 0:
writer.writerow(row)
| [
"luopengcheng_love@126.com"
] | luopengcheng_love@126.com |
4f95ffbb37ddcbd7d1965a4ed8a986c5e52274fa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_193/ch11_2019_08_22_19_37_28_357381.py | 1ee4b70ca39ef725a52d1d1a1e107d4d7747a66f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | def celsius_para_fahrenheit(x):
y = (x * 1.8) + 32
return y
a = 0
b = celsius_para_fahrenheit(a)
print(b) | [
"you@example.com"
] | you@example.com |
dd6cf28a28553b7038d8b7d0360d010b0db61caa | df6490256730a40661ec367fcb58c886702682fa | /twitter_verification/twitter_verification/settings.py | 2e59a0f6e580eb6946b5355bbcd5a513ea08840e | [] | no_license | SebastianCanoRuiz/TwitterAccountVerification_version_alterna | 388cb85977e0f245aa7402268e84f71586e0f87d | 31b16834bffdfeb40b25d4c4b1c9965d8a2f4b2f | refs/heads/master | 2022-12-14T10:44:06.319929 | 2020-08-29T22:48:23 | 2020-08-29T22:48:23 | 290,207,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,387 | py | """
Django settings for twitter_verification project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$2j%h1@kzqsvwi)ox13&7v#bwhrza9o@r_hcz5djmr&(^@v1)u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cuentas_falsas',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'twitter_verification.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'twitter_verification.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'twitter_dataset',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"jsh7590cr@gmail.com"
] | jsh7590cr@gmail.com |
f543fbc7e0ea3e9731253b8af9d9c447f1941914 | 8f9857c97b7e49d0f6922da709d3902d1fa82716 | /inference/inf_inference_3d.py | 93c1fad42f8b5ea7caaf29e6601801bf385f23b2 | [] | no_license | huangzih/WADNet | f4996fa7b26fc7871b6d9fa3354a7692e5ff0e08 | fa8360a3b2abe7973c7b344c03547685c3e9e32a | refs/heads/main | 2023-05-24T07:04:04.105677 | 2021-06-16T03:56:50 | 2021-06-16T03:56:50 | 376,763,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,278 | py | import numpy as np
import pandas as pd
import sys
import gc
import os
from os.path import isfile
from copy import deepcopy
from torch import nn, optim
from torch.nn import functional as F
from torch.nn import LSTM
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import *
from tqdm import tqdm
Model_Path = 'models_wavelstm'
# Data Preprocess
with open('./data/task1.txt', 'r') as f:
words = f.readlines()
f.close()
d3_data = []
for word in words:
idx = int(float(word.split(';')[0]))
if idx == 3: d3_data.append(','.join(word.split(';')[1:])[:-2])
with open('./data/task1-3d.csv', 'w') as f:
f.write('pos;length\n')
for word in d3_data:
f.write(word+';')
length = int(len(word.split(','))/3)
f.write(str(length)+'\n')
f.close()
del words, d3_data
gc.collect()
data = pd.read_csv('./data/task1-3d.csv', sep=';')
def SepX(df):
return ','.join(df['pos'].split(',')[:df['length']])
def SepY(df):
return ','.join(df['pos'].split(',')[df['length']:2*df['length']])
def SepZ(df):
return ','.join(df['pos'].split(',')[2*df['length']:])
data['pos_x'] = data.apply(SepX, axis=1)
data['pos_y'] = data.apply(SepY, axis=1)
data['pos_z'] = data.apply(SepZ, axis=1)
def normalize(x):
data = np.array([float(i) for i in x.split(',')])
mean = np.mean(data)
std = np.std(data)
data2 = (data - mean)/std
return ','.join([str(i) for i in data2])
data['pos_x'] = data['pos_x'].apply(lambda x: normalize(x))
data['pos_y'] = data['pos_y'].apply(lambda x: normalize(x))
data['pos_z'] = data['pos_z'].apply(lambda x: normalize(x))
# Check Model File
MarkLength = [10,15,20,25,30,40,45,50,55,60,70,80,90,100,
105,110,115,120,125,150,175,200,225,250,
275,300,325,350,375,
400,425,450,475,500,550,600,650,700,750,800,850,900,950]
flag = False
for fold in range(5):
for mark in MarkLength:
if not isfile('./{}/Fold{}/{}/bestmodel.pth'.format(Model_Path, fold, mark)):
print('Model file is missing for length {} at fold {}'.format(mark, fold))
flag = True
if flag: sys.exit(0)
# PyTorch Dataset
def fixlength(x):
assert (x>=10)
if x in MarkLength:
return x
MarkLengthTemp = deepcopy(MarkLength)
MarkLengthTemp.append(x)
MarkLengthTemp.sort()
Mark = MarkLengthTemp.index(x)
return MarkLengthTemp[Mark-1]
data['fix_length'] = data['length'].apply(lambda x: fixlength(x))
class AnDiDataset(Dataset):
def __init__(self, df):
self.df = df.copy()
def __getitem__(self, index):
data_seq_x = torch.Tensor([float(i) for i in self.df['pos_x'].iloc[index].split(',')])
data_seq_y = torch.Tensor([float(i) for i in self.df['pos_y'].iloc[index].split(',')])
data_seq_z = torch.Tensor([float(i) for i in self.df['pos_z'].iloc[index].split(',')])
ori_length = self.df['length'].iloc[index]
fix_length = self.df['fix_length'].iloc[index]
if fix_length == ori_length:
data_seq = torch.stack((data_seq_x, data_seq_y, data_seq_z), dim = 0)
return data_seq, fix_length, 1
else:
data_seq_list = []
for i in [0, ori_length-fix_length]:
seq_x = data_seq_x[i:i+fix_length]
seq_y = data_seq_y[i:i+fix_length]
seq_z = data_seq_z[i:i+fix_length]
data_seq_list.append(torch.stack((seq_x, seq_y, seq_z), dim = 0))
return data_seq_list, fix_length, 2
def __len__(self):
return len(self.df)
test_loader = DataLoader(AnDiDataset(data), batch_size=1, shuffle=False, num_workers=2)
# PyTorch Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#https://github.com/odie2630463/WaveNet/blob/master/model.py#L70
class Wave_LSTM_Layer(nn.Module):
def __init__(self, filters, kernel_size, dilation_depth, input_dim, hidden_dim, layer_dim):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.dilation_depth = dilation_depth
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.dilations = [2**i for i in range(dilation_depth)]
self.conv1d_tanh = nn.ModuleList([nn.Conv1d(in_channels=filters, out_channels=filters, kernel_size=kernel_size,
padding=dilation, dilation=dilation) for dilation in self.dilations])
self.conv1d_sigm = nn.ModuleList([nn.Conv1d(in_channels=filters, out_channels=filters, kernel_size=kernel_size,
padding=dilation, dilation=dilation) for dilation in self.dilations])
self.conv1d_0 = nn.Conv1d(in_channels=input_dim, out_channels=filters,
kernel_size=kernel_size, padding=1)
self.conv1d_1 = nn.Conv1d(in_channels=filters, out_channels=filters,
kernel_size=1, padding=0)
self.post = nn.Sequential(nn.BatchNorm1d(filters), nn.Dropout(0.1))
self.lstm = LSTM(filters, hidden_dim, layer_dim, batch_first=True)
def forward(self, x):
# WaveNet Block
x = self.conv1d_0(x)
res_x = x
for i in range(self.dilation_depth):
tahn_out = torch.tanh(self.conv1d_tanh[i](x))
sigm_out = torch.sigmoid(self.conv1d_sigm[i](x))
x = tahn_out * sigm_out
x = self.conv1d_1(x)
res_x = res_x + x
#x = res_x
x = self.post(res_x)
# LSTM Block
x = x.permute(0,2,1)
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_().to(device)
c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_().to(device)
out, state = self.lstm(x, (h0.detach(), c0.detach()))
#out = self.fc(out[:,-1,:])
return out.permute(0,2,1)
class AnDiModel(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super().__init__()
self.wave_lstm_1 = Wave_LSTM_Layer(32, 3, 16, input_dim, hidden_dim, layer_dim)
#self.wave_lstm_2 = Wave_LSTM_Layer(32, 3, 8, 16, 32, layer_dim)
#self.wave_lstm_3 = Wave_LSTM_Layer(64, 3, 4, 32, hidden_dim, layer_dim)
self.fc = nn.Sequential(nn.Dropout(p=0.1), nn.Linear(hidden_dim, output_dim))
def forward(self, x):
x = self.wave_lstm_1(x)
#x = self.wave_lstm_2(x)
#x = self.wave_lstm_3(x)
return self.fc(x.permute(0,2,1)[:,-1,:])
model = AnDiModel(3, 64, 3, 1).to(device)
# Check PyTorch Version
try:
model.load_state_dict(torch.load('./{}/Fold0/10/bestmodel.pth'.format(Model_Path)))
except:
print('fail to load model file, please check the PyTorch version (1.6.0 is required).')
# Inference
output_list_folds = []
for fold in range(5):
output_list = []
for seq_batch, seq_length, seq_mark in tqdm(test_loader):
model.load_state_dict(torch.load('./{}/Fold{}/{}/bestmodel.pth'.format(Model_Path, fold, int(seq_length))));
model.eval()
with torch.no_grad():
if int(seq_mark) == 1:
seq_batch = seq_batch.to(device)
output = model(seq_batch)
elif int(seq_mark) == 2:
output_sum = 0.
for seq in seq_batch:
output = model(seq.to(device))
output_sum += output
output = output_sum/len(seq_batch)
output_list.append(output.detach().cpu())
output_list = np.array(torch.tensor(output_list).detach().numpy())
output_list_folds.append(deepcopy(output_list))
output_list = sum(output_list_folds)/5.
output_list_final = (output_list*1.0083).clip(0.05,2.0)
with open('./output/task1-3d.txt', 'w') as f:
for i in output_list_final:
f.write('3;'+str(i)+'\n')
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
4af1a97e3d67f049f346cc7b4760ac232eb1d942 | c62040636877dc3584bcf4d22988fc71739c8a78 | /lbworkflow/tests/test_process.py | 828d4ebd11d173132620237557b9f9d4b02ff56d | [
"MIT"
] | permissive | felixcheruiyot/django-lb-workflow | 82de680f37aa68707640022cb3b99435f54ea09e | 0fb4be2d39848374d60ec27c6ee1b72913e2f674 | refs/heads/master | 2022-04-12T19:11:41.673818 | 2020-04-09T12:03:53 | 2020-04-09T12:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from lbworkflow.views.helper import user_wf_info_as_dict
from .leave.models import Leave
from .test_base import BaseTests
User = get_user_model()
class HelperTests(BaseTests):
def test_user_wf_info_as_dict(self):
leave = self.leave
leave.submit_process()
info = user_wf_info_as_dict(leave, self.users['tom'])
self.assertIsNotNone(info['task'])
self.assertIsNotNone(info['object'])
self.assertFalse(info['can_give_up'])
self.assertEqual(info['wf_code'], 'leave')
info = user_wf_info_as_dict(leave, self.users['owner'])
self.assertIsNone(info['task'])
self.assertTrue(info['can_give_up'])
info = user_wf_info_as_dict(leave, self.users['vicalloy'])
self.assertIsNone(info['task'])
class ViewTests(BaseTests):
def setUp(self):
super().setUp()
self.client.login(username='owner', password='password')
def test_start_wf(self):
resp = self.client.get(reverse('wf_start_wf'))
self.assertEqual(resp.status_code, 200)
def test_wf_list(self):
resp = self.client.get(reverse('wf_list', args=('leave', )))
self.assertEqual(resp.status_code, 200)
def test_wf_report_list(self):
resp = self.client.get(reverse('wf_report_list'))
self.assertEqual(resp.status_code, 200)
def test_wf_list_export(self):
resp = self.client.get(reverse('wf_list', args=('leave', )), {'export': 1})
self.assertEqual(resp.status_code, 200)
def test_detail(self):
resp = self.client.get(reverse('wf_detail', args=('1', )))
self.assertEqual(resp.status_code, 200)
def test_submit(self):
self.client.login(username='owner', password='password')
url = reverse('wf_new', args=('leave', ))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data = {
'start_on': '2017-04-19 09:01',
'end_on': '2017-04-20 09:01',
'leave_days': '1',
'reason': 'test save',
}
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test save')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('Draft', leave.pinstance.cur_node.name)
data['act_submit'] = 'Submit'
data['reason'] = 'test submit'
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test submit')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('A2', leave.pinstance.cur_node.name)
def test_edit(self):
self.client.login(username='owner', password='password')
data = {
'start_on': '2017-04-19 09:01',
'end_on': '2017-04-20 09:01',
'leave_days': '1',
'reason': 'test save',
}
url = reverse('wf_new', args=('leave', ))
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test save')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('Draft', leave.pinstance.cur_node.name)
url = reverse('wf_edit', args=(leave.pinstance.pk, ))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data['act_submit'] = 'Submit'
data['reason'] = 'test submit'
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test submit')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('A2', leave.pinstance.cur_node.name)
def test_delete(self):
self.client.login(username='admin', password='password')
# POST
url = reverse('wf_delete')
leave = self.create_leave('to delete')
data = {'pk': leave.pinstance.pk}
resp = self.client.post(url, data)
self.assertRedirects(resp, '/wf/list/')
self.assertIsNone(self.get_leave('to delete'))
# GET
leave = self.create_leave('to delete')
data = {'pk': leave.pinstance.pk}
resp = self.client.get(url, data)
self.assertRedirects(resp, '/wf/list/')
self.assertIsNone(self.get_leave('to delete'))
| [
"zbirder@gmail.com"
] | zbirder@gmail.com |
f4cbdc74bb0c59adc2091f47616c224a592f7092 | 7f64a86da168cab37b92999d572392c58091abd9 | /python_ex85.py | 23b32f42dc70ed48fe79bf731c0a41c89ce9b80a | [] | no_license | tomdefeo/Self_Taught_Examples | a093444872732d34fe48d4b49718cbad47baa033 | ec2f85acd8d5a919f8e299186b4b418a22294201 | refs/heads/master | 2020-12-30T13:20:28.119565 | 2017-03-06T17:07:25 | 2017-03-06T17:07:25 | 91,199,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | # Do not run.
def [function_name]([parameters]):
[function_definition]
| [
"tomdefeo@goalkeep.com"
] | tomdefeo@goalkeep.com |
5d5e7857208cf90f00e2c2887a70506d899dc5f6 | 98c75d19034575fed021979d91ba909bb30cddce | /thaifin/sources/finnomena.py | 18264c1df2cde4c3ef14950e90690b782872b1a4 | [
"ISC"
] | permissive | webclinic017/thaifin | 791101ae3ce8a8a8f5d7c822139aff50471b0a53 | 5f5f3d99092379a3425a70a6ef54412da5d10633 | refs/heads/master | 2023-01-29T00:57:52.570826 | 2020-11-22T08:51:32 | 2020-11-22T08:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,884 | py | from typing import Dict
from typing import List
from typing import Optional
import requests
from pydantic import BaseModel
class BaseResponse(BaseModel):
statusOK: bool
data: List[Dict]
class FinancialSheet(BaseModel):
SecurityID: Optional[int]
Fiscal: Optional[int]
Quarter: Optional[int]
Cash: Optional[float]
DA: Optional[float]
DebtToEquity: Optional[float]
Equity: Optional[float]
EarningPerShare: Optional[float]
EarningPerShareYoY: Optional[float]
EarningPerShareQoQ: Optional[float]
GPM: Optional[float]
GrossProfit: Optional[float]
NetProfit: Optional[float]
NetProfitYoY: Optional[float]
NetProfitQoQ: Optional[float]
NPM: Optional[float]
Revenue: Optional[float]
RevenueYoY: Optional[float]
RevenueQoQ: Optional[float]
ROA: Optional[float]
ROE: Optional[float]
SGA: Optional[float]
SGAPerRevenue: Optional[float]
TotalDebt: Optional[float]
DividendYield: Optional[float]
BookValuePerShare: Optional[float]
Close: Optional[float]
MKTCap: Optional[float]
PriceEarningRatio: Optional[float]
PriceBookValue: Optional[float]
EVPerEbitDA: Optional[float]
EbitDATTM: Optional[float]
PaidUpCapital: Optional[float]
CashCycle: Optional[float]
OperatingActivities: Optional[float]
InvestingActivities: Optional[float]
FinancingActivities: Optional[float]
Asset: Optional[float]
class FinancialSheetsResponse(BaseResponse):
data: List[FinancialSheet]
def get_financial_sheet(securityID, fiscal=2009):
url = "https://www.finnomena.com/fn3/api/stock/financial"
querystring = {"securityID": str(securityID), "fiscal": str(fiscal)}
payload = ""
headers = {
"connection": "keep-alive",
"accept": "application/json, text/plain, */*",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.58",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www.finnomena.com/stock/SCB?bt_id=60",
"accept-language": "en-US,en;q=0.9,th;q=0.8",
}
response = requests.request(
"GET", url, data=payload, headers=headers, params=querystring
)
return FinancialSheetsResponse.parse_raw(response.text)
class StockInfo(BaseModel):
name: str
thName: str
enName: str
company_id: int
security_id: int
class StockInfosResponse(BaseResponse):
data: List[StockInfo]
def get_stock_list() -> StockInfosResponse:
url = "https://www.finnomena.com/fn3/api/stock/list"
payload = ""
headers = {
"connection": "keep-alive",
"accept": "application/json, text/plain, */*",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36 Edg/83.0.478.58",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://www.finnomena.com/stock/SCB?bt_id=60",
"accept-language": "en-US,en;q=0.9,th;q=0.8",
# 'cookie': "finnakies=5MLMJIS20200601; _ga=GA1.2.1303959403.1591006631; _fbp=fb.1.1591006631416.2138659500; PHPSESSID=on32ipkohq41hfv1dcu2eutuou; anspress_session=83fa3179f4e427a9e00e328d0d2ba568; _gid=GA1.2.1759981110.1594035335; terms_cond_accepted=false; _cbclose=1; _cbclose47370=1; _uid47370=595254B6.3; _ctout47370=1; verify=test; finnakies-ss=635VtLn20200706; _hjid=053a69da-8d52-4064-b1ce-82118f02ac68; _hjAbsoluteSessionInProgress=1; __cfduid=dad57d546f7805625889ebd80a183dc091594035361; _hjDonePolls=376740; visit_time=10"
}
response = requests.request("GET", url, data=payload, headers=headers)
return StockInfosResponse.parse_raw(response.text)
| [
"me@nutchanon.org"
] | me@nutchanon.org |
5444b3bc92c299c2c3ba4d46f4e76ec7031ee469 | e941e45481c849132b8e096ff2e018c93fda86d1 | /testing.py | 99951cfbf1daaa47e9994416990e7ba7b2e7189b | [] | no_license | dvndra/audio_anomaly_detection | 1440861c56cfa5bc2c90796ecf1f58f980816b39 | 48425579d0ad33830db9834df515963394eabc88 | refs/heads/master | 2020-07-30T16:15:12.307553 | 2019-09-23T07:11:14 | 2019-09-23T07:11:14 | 210,287,581 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,008 | py | import pyaudio
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt
from scipy import signal
from sklearn import mixture
import pickle
import wave
from numpy import newaxis
import contextlib
import os
import re
import math
import lmdb
import sys
import copy
from sklearn.externals import joblib
##sys.path.append("/home/axis-inside/Downloads/caffe/python")
##sys.path.append("/home/axis-inside/Downloads/caffe/distribute/python")
##sys.path.append("/usr/local/cuda-8.0/lib64")
##import caffe
np.set_printoptions(threshold=np.nan) ## To print all elements of an array
os.getcwd() ## Get the current directory
os.chdir('/home/axis-inside/Audio_Data/anomaly_detection/audio_files/demo_validation') ## change the directory
training_list = os.listdir('.') ## list all the files in the current directory
## MAIN Function
data=[]
for file in training_list:
def audio_window(window_time, window_size, CHUNKSIZE,RATE,file):
buffer_data = [] ## declaring variable for buffering data
wf = wave.open(file, 'rb')
moving_avg_data1 = np.zeros((20)) ## Initialising array to store moving average of last 20 frames
moving_avg_data2 = np.zeros((200))
while True:
data = wf.readframes(CHUNKSIZE) ## Read & stream data based on the chunk size
numpydata = np.fromstring(data, dtype=np.int16) ## Convert audio dataframe into numpy data for further analysis
## Writing initial half of first window ###
if window_time < window_size:
buffer_data = copy.deepcopy(numpydata)
## Writing and analysing one window size with retention of half of the previous window ##
elif window_time>= window_size:
buffer_data = np.hstack((buffer_data, numpydata)) ## appending numpydata of latter half to already available buffer_data for intial half
Z = [] ## declaring variable for computing DFT of single window
n = len(buffer_data) ## length of the signal of a window
w = np.hanning(n) ## applying hanning window to avoid aliasing
buffer_data = w*buffer_data
k = np.arange(n)
T = (n*1.0)/RATE
frq = k/T # frequency range till sampling RATE (Nyquist frequency)
freq = frq[range(int(n/2))] # frequency range till half of sampling rate
Z = np.fft.fft(buffer_data)/n # fft computing and normalization
Z = Z[range(int(n/2))]
## Bark Frequency Binning of fft computed for a window ##
barc_freq = [0, 100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500, 20500, 27000]
bark = []
bark, barc_freq = np.histogram(freq, bins = barc_freq,weights = abs(Z))
bark = bark.reshape((26,1))
## MOVING AVERAGE OF LAST 20 FRAMES AMPLITUDE ACROSS ALL FREQUENCY BINS #################################
moving_avg_data1 = np.delete(moving_avg_data1,0)
moving_avg_data1 = np.append(moving_avg_data1,np.mean(bark))
moving_avg1 = np.average(moving_avg_data1, weights = np.linspace(0.00, 0.1, num=20, endpoint=True, retstep=False, dtype=np.float32) ) # moving average plank/ladder-type
moving_avg1 = 20*(np.log10(moving_avg1)) # moving average in decibel
## MOVING AVERAGE OF LAST 200 FRAMES AMPLITUDE ACROSS ALL FREQUENCY BINS #################################
moving_avg_data2 = np.delete(moving_avg_data2,0)
moving_avg_data2 = np.append(moving_avg_data2,np.mean(bark))
moving_avg2 = np.average(moving_avg_data2, weights = np.linspace(0.00, 0.01, num=200, endpoint=True, retstep=False, dtype=np.float32) ) # moving average
moving_avg2 = 20*(np.log10(moving_avg2)) # moving average in decibel
######################################################################################################
bark = 20*(np.log10(bark+0.000000001)) # bark amplitude to decibel conversion
bark1 = bark - moving_avg1 # moving average subtracted fft in decibel
bark2 = bark - moving_avg2
yield bark, bark1, bark2, freq, window_time # yield all frequency amplitude
buffer_data = copy.deepcopy(numpydata) ## Updating last half of present window for next window
window_time = window_time + (CHUNKSIZE/float(RATE)) ## increement of playtime to next chunk
wf = wave.open(file, 'rb')
p = pyaudio.PyAudio() ## create an audio object
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True) # open stream based on the wave object which has been input
CHANNELS = 1
RATE = 44100 # sampling rate
overlapping = 0.5
window_size = 0.04 # 100ms window size is used here
CHUNKSIZE = int(RATE*window_size*overlapping)
window_time = 0.00
print 'Number of channels in input audio = %d' %wf.getnchannels()
print 'Sampling rate = %d' %wf.getframerate()
### Computing duration of wave file ###
with contextlib.closing (wf) as wf:
frames = wf.getnframes()
rate = wf.getframerate()
duration = frames / float(rate)
print 'Audio duration = %.3f' %duration
print "Analyzing music file %s" %file
window = audio_window(window_time, window_size, CHUNKSIZE,RATE, file)
history0 = [] ## array to store amplitude feature without mean subtraction
history1 = [] ## array to store first amplitude data frames for the duration
history2 = [] ## array to store second amplitude data frames for the duration
bark, bark1, bark2, freq, window_time = next (window)
history0 = copy.deepcopy(bark)
history1 = copy.deepcopy(bark1)
history2= copy.deepcopy(bark2)
temp=0
while (window_time < duration):
##print ('window_time=',window_time, ', duration=', duration)
bark, bark1, bark2, freq, window_time = next (window)
## Bark Frequency Binning of moving average subtracted fft computed for a window ##
history0 = np.hstack((history0,bark))
history1 = np.hstack((history1,bark1))
history2= np.hstack((history2,bark2))
if (round(window_time/duration*100,1))%10==0:
if temp != round(window_time/duration*100,1):
print 'Finished '+repr(round(window_time/duration*100,1))+' %'
temp = round(window_time/duration*100,1)
history0 = np.vstack((history0,np.sum(history0,axis=0)))
history1 = np.vstack((history1,np.sum(history1,axis=0)))
history2 = np.vstack((history2,np.sum(history2,axis=0)))
all_combined = np.vstack((history0,history1,history2))
data.append(all_combined)
# CLEANUP STUFF
stream.close()
p.terminate()
##print np.shape(data)
data= np.hstack(data[:])
data= data.astype(np.float32)
print np.shape(data)
data= np.transpose(data) # converting into format samples x features
print np.shape(data)
## Feature Scaling & Mean Normalization
##### Reading variables from pickle ######
os.chdir('..')
pic_data_mean, pic_data_min, pic_data_max = joblib.load('40ms_3ft_demo_train_parameters.pkl')
data[:,0:26:1]=(data[:,0:26:1]-pic_data_mean[:,0:26:1])/(np.maximum((abs(pic_data_min[:,0:26:1] - pic_data_mean[:,0:26:1])),(pic_data_max[:,0:26:1] - pic_data_mean[:,0:26:1])))
data[:,27:53:1]=(data[:,27:53:1]-pic_data_mean[:,27:53:1])/(np.maximum((abs(pic_data_min[:,27:53:1] - pic_data_mean[:,27:53:1])),(pic_data_max[:,27:53:1] - pic_data_mean[:,27:53:1])))
data[:,54:80:1]=(data[:,54:80:1]-pic_data_mean[:,54:80:1])/(np.maximum((abs(pic_data_min[:,54:80:1] - pic_data_mean[:,54:80:1])),(pic_data_max[:,54:80:1] - pic_data_mean[:,54:80:1])))
data[:,26]=(data[:,26]-np.mean(data[:,26]))/(max((abs(np.min(data[:,26]) - np.mean(data[:,26])),(np.max(data[:,26])) - (np.mean(data[:,26])))))
data[:,53]=(data[:,53]-np.mean(data[:,53]))/(max((abs(np.min(data[:,53]) - np.mean(data[:,53])),(np.max(data[:,53])) - (np.mean(data[:,53])))))
data[:,80]=(data[:,80]-np.mean(data[:,80]))/(max((abs(np.min(data[:,80]) - np.mean(data[:,80])),(np.max(data[:,80])) - (np.mean(data[:,80])))))
print np.min(data), np.max(data)
print np.shape (data)
data = data[:,:,newaxis] # 2-D to 3-D array
total_frames, total_features, channels = np.shape(data)
# Converting data into picture format for caffe #
num_frames_append = 50
pic_data_array = np.ones((num_frames_append,total_features,(total_frames + 1 - num_frames_append) ))
for k in range(0, (total_frames + 1 - num_frames_append)):
for j in range(0,total_features):
for i in range(0,num_frames_append):
pic_data_array[i,j,k]= data[(i+k),j,0]
print np.shape(pic_data_array)
pic_data_array = pic_data_array[:,:,:,newaxis] # 3-D to 4-D array
pic_data_array= np.transpose(pic_data_array,(2,3,0,1))
print np.shape(pic_data_array)
##samples, channels, height, width = np.shape(pic_data.array)
##### Dumping numpy array ######
joblib.dump(pic_data_array, '40ms_3ft_demo_validation.pkl')
### Converting numpy array to lmdb ######
# Let's pretend this is interesting data
##X = joblib.load('pic_data.pkl')
##y = np.zeros(samples, dtype=np.int64)
##
### We need to prepare the database for the size. We'll set it 10 times
### greater than what we theoretically need. There is little drawback to
### setting this too big. If you still run into problem after raising
### this, you might want to try saving fewer entries in a single
### transaction.
##map_size = 429496729600
##
##env = lmdb.open('40ms_same_bkgd_train', map_size=map_size)
##
##with env.begin(write=True) as txn:
## # txn is a Transaction object
## for i in range(samples):
## datum = caffe.proto.caffe_pb2.Datum()
## datum.channels = X.shape[1]
## datum.height = X.shape[2]
## datum.width = X.shape[3]
## print i
## datum.float_data.extend(X[i].astype(float).flat)
## datum.label = int(y[i])
## str_id = '{:08}'.format(i)
## txn.put(str_id.encode('ascii'), datum.SerializeToString())
####
| [
"noreply@github.com"
] | noreply@github.com |
a84694963bcde152fccfede78a8a6750f7ce6697 | 2eb1d157ea8e919a4488128986fe7a346d7c3ee4 | /Topics/Scopes/Hero damage/main.py | 07053fc504c85a9e206e90c683528af918216d17 | [] | no_license | charmipatel06/coffee-machine | 4b3fbe639fbfb95e0dec90375f3f6bac74d4f2d2 | 92e911404ebfd27fff2980cc3faf891ab0a43025 | refs/heads/main | 2023-06-14T15:31:45.075784 | 2021-07-16T23:53:40 | 2021-07-16T23:53:40 | 384,454,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | hero_damage = 100
def double_damage():
global hero_damage
hero_damage *= 2
pass
def disarmed():
global hero_damage
hero_damage = (hero_damage * 10) / 100
pass
def power_potion():
global hero_damage
hero_damage += 100
pass | [
"patelcharmis1998@gmail.com"
] | patelcharmis1998@gmail.com |
f6d795471ab5c5702ac952424d2d647e135627a7 | 006201f615b36c47e1b9e234d76c3195402c79be | /chapter_2/problem_29.py | 924e84b12af849aeeae3824fbd45d0927682f6e8 | [] | no_license | dheerajgopi/anand-python | c83c238fb05a7f1670e3c686be93557d106da39d | 4605efb32cbc52f1b444df2b4416f3be45e9686e | refs/heads/master | 2016-09-05T09:15:38.616248 | 2015-03-09T06:15:55 | 2015-03-09T06:15:55 | 30,246,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def array(list_num, list_length):
array_list = []
for i in range(0, list_num):
array_element = []
for j in range(0, list_length):
array_element.append(None)
array_list.append(array_element)
return array_list
a = array(3,4)
print a
| [
"dheerajgopinath@gmail.com"
] | dheerajgopinath@gmail.com |
d3b4035a272a5eb72e4f2f4c440fdf2010dd82ec | 1f3cc0ccd78ec3b8b3f42250244667e11dea5504 | /client/views.py | 9226c49e4240f0d8908a1daebcfccadf5a59c11b | [] | no_license | ZongoMathieu/crm | 0e0537d746f2a97288cf9bbbb7f701aeab860bb3 | 1b6f3be72bc4ab59c14135ae6267847b16db68a9 | refs/heads/master | 2023-01-31T15:49:11.948806 | 2020-12-14T10:09:28 | 2020-12-14T10:09:28 | 318,765,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import Client
# Create your views here.
def list_client(request,pk):
client=Client.objects.get(id=pk)
commande=client.commande_set.all()
total_commande=commande.count()
context={'client':client, 'commande':commande,'total_commande':total_commande}
return render(request, 'client/list_client.html',context)
| [
"holding@CHO-65.coris-bank.com"
] | holding@CHO-65.coris-bank.com |
b462a6735937a88891a9bfed8ca0a81a82d0ce72 | c98a9c357d1d6d66420bc57a9f875b85868ddcef | /api/messagers_api.py | 8b04cb7a2c46ae18fc6d15623c2830329d4242ed | [] | no_license | Angelika1212/foreing-talker | d94e5d6d0387e4196630d4202cfce7c2d64d2d96 | a456d50175e798a37cfe123b29184b6d28d040b6 | refs/heads/master | 2022-04-21T11:27:40.373018 | 2020-04-18T15:04:50 | 2020-04-18T15:04:50 | 256,545,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,413 | py | import flask
from flask import render_template, redirect
from data import db_session, users, message, friends
from forms import message_form
from flask_login import login_required, current_user
blueprint = flask.Blueprint('messagers_api', __name__, template_folder='templates')
@blueprint.route('/messager/<int:id>', methods=['GET', 'POST'])
def new_chat(id):
session = db_session.create_session()
interlocutor = session.query(users.User).filter(users.User.id == id).first().name
user = current_user.name
names = '_'.join(sorted([user, interlocutor]))
form = message_form.MessageForm()
if form.validate_on_submit():
new_dialog = message.Message()
new_dialog.names = names
new_dialog.message = form.messages.data
new_dialog.from_who = user
session.add(new_dialog)
session.commit()
messages = session.query(message.Message).filter(message.Message.names == names).all()
if session.query(friends.Friend).filter(friends.Friend.friend_name == interlocutor,
friends.Friend.user_name == user).first() is None:
add_friend_user(user, interlocutor)
add_friend_interlocutor(interlocutor, user)
if messages is not None:
return render_template('messages.html', title='messages', form=form, previous_sms=messages)
return render_template('messages.html', title='messages', form=form, previous_sms='')
def add_friend_user(user, new_user_friend):
session = db_session.create_session()
new_friend = friends.Friend()
new_friend.user_name = user
new_friend.friend_name = new_user_friend
session.add(new_friend)
session.commit()
def add_friend_interlocutor(interlocator, user):
session = db_session.create_session()
user_friend = friends.Friend()
user_friend.user_name = interlocator
user_friend.friend_name = user
session.add(user_friend)
session.commit()
@blueprint.route('/Message', methods=['GET'])
@login_required
def user_interlocutors():
session = db_session.create_session()
all_interlocutors = session.query(friends.Friend).filter(friends.Friend.user_name == current_user.name).all()
if all_interlocutors is not None:
return render_template('all_interlocutors.html', title='Friends', friends=all_interlocutors)
return render_template('all_interlocutors.html', title='Friends', friends='')
| [
"ZAnzelika@ya.ru"
] | ZAnzelika@ya.ru |
302e9c632f39d2176d58841878f3d48caf0ce900 | f9a360071761d908d1a844225a87bd6eedf38780 | /day3/test/test2.py | 8c551128ce88770d08804e14c632379a7d8ba849 | [] | no_license | xiaolang/python_test | 40de0084b8268ea0fadefa7d8a50487769148f54 | 2b6bb883ed7e79cf1fd5f5e40357c224b372da76 | refs/heads/master | 2020-05-17T12:35:16.057296 | 2014-06-01T11:53:30 | 2014-06-01T11:53:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/env python
test = "test.txt"
f = file(test)
c = f.readlines()
while True:
while True:
user_input = raw_input("\033[32;1muser:\033[0m")
if len(user_input) == 0 :
print "empty input!"
continue
else :
break
for line in c:
if user_input in line:
print line
break
else:
print "\033[31;1mmuyou!\033[0m"
| [
"xiaolang@foxmail.com"
] | xiaolang@foxmail.com |
48b56952ac3dc1fd3a8bd513d93bad85874010cd | 3927b135bd77100532e3dc82c405a2d377fc8517 | /vndk/tools/definition-tool/tests/test_vndk.py | 8938e68aa18145dd971748268f9c1f6e06f6e889 | [
"Apache-2.0"
] | permissive | eggfly/platform_development | b9367c9ecd775c766dd552bf0b417c29bc4cc1cc | 52c291d53c8f58cfe67cd3251db19b0d94b4a9c8 | refs/heads/master | 2020-05-20T22:54:41.470361 | 2017-03-10T02:06:38 | 2017-03-10T02:06:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | #!/usr/bin/env python3
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from compat import StringIO
from vndk_definition_tool import ELF, ELFLinker, PT_SYSTEM, PT_VENDOR
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(SCRIPT_DIR ,'testdata', 'test_vndk')
class ELFLinkerVNDKTest(unittest.TestCase):
def _get_paths_from_nodes(self, nodes):
return sorted([node.path for node in nodes])
def test_compute_vndk(self):
class MockBannedLibs(object):
def is_banned(self, name):
return False
input_dir = os.path.join(TESTDATA_DIR, 'pre_treble')
graph = ELFLinker.create_from_dump(
system_dirs=[os.path.join(input_dir, 'system')],
vendor_dirs=[os.path.join(input_dir, 'vendor')])
vndk = graph.compute_vndk(sp_hals=set(), vndk_stable=set(),
vndk_customized_for_system=set(),
vndk_customized_for_vendor=set(),
generic_refs=None,
banned_libs=MockBannedLibs())
self.assertEqual(['/system/lib/libcutils.so',
'/system/lib64/libcutils.so'],
self._get_paths_from_nodes(vndk.vndk_core))
self.assertEqual([], self._get_paths_from_nodes(vndk.vndk_fwk_ext))
self.assertEqual([], self._get_paths_from_nodes(vndk.vndk_vnd_ext))
if __name__ == '__main__':
unittest.main()
| [
"loganchien@google.com"
] | loganchien@google.com |
32869036f4fdb417fb2ca1431988aa2a720d81fe | 00aef5565d72fe558e29735af2d6dba2f1d91200 | /simulations/consistent_crab_cavity_scan/restart_sim_nobunch.py | 85c72154adc2d971aa93541c5be76860e358cac1 | [] | no_license | lgiacome/WarPyECLOUD | c450dc43bc7976cddb804d5ea18089cab9a8ab32 | e164af63932f34f582329d3d0a69a9c59b3a0bd7 | refs/heads/master | 2022-02-21T04:51:57.305951 | 2022-02-09T13:52:29 | 2022-02-09T13:52:29 | 227,175,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,341 | py | import faulthandler; faulthandler.enable()
import sys
import os
from pathlib import Path
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
from warp import restart, me, picmi
BIN = os.path.expanduser("../../")
if BIN not in sys.path:
sys.path.append(BIN)
from dump_restart import reinit
import warp_pyecloud_sim
import numpy as np
def plot_dens(self, l_force=0):
fontsz = 16
plt.rcParams['axes.labelsize'] = fontsz
plt.rcParams['axes.titlesize'] = fontsz
plt.rcParams['xtick.labelsize'] = fontsz
plt.rcParams['ytick.labelsize'] = fontsz
plt.rcParams['legend.fontsize'] = fontsz
plt.rcParams['legend.title_fontsize'] = fontsz
chamber = self.chamber
if l_force or picmi.warp.top.it%1 == 0: #self.stride_imgs == 0:
(Nx, Ny, Nz) = np.shape(self.ecloud.wspecies.get_density())
fig, axs = plt.subplots(1, 2, figsize = (13.5, 5))
fig.subplots_adjust(left = 0.1, bottom = 0.07, right = 0.99,
top = 0.87)
d = (self.beam.wspecies.get_density()+self.ecloud.wspecies.get_density())
im1 = axs[0].imshow(d[:, :, int(Nz/2)] .T, cmap = 'jet',
origin = 'lower', vmin = 0,
vmax = 1e10,
extent = [chamber.xmin, chamber.xmax ,
chamber.ymin, chamber.ymax])
axs[0].set_xlabel('x [m]')
axs[0].set_ylabel('y [m]')
axs[0].set_title('rho')
fig.colorbar(im1, ax = axs[0])
im2 = axs[1].imshow(d[int(Nx/2), :, :], cmap = 'jet',
origin = 'lower',
vmin = 0,
vmax = 1e10,
extent=[chamber.zmin, chamber.zmax,
chamber.ymin, chamber.ymax])
axs[1].set_aspect((chamber.zmax-chamber.zmin)/(chamber.xmax-chamber.xmin))
axs[1].set_xlabel('z [m]')
axs[1].set_ylabel('y [m]')
axs[1].set_title('rho')
fig.suptitle('t = %1.6e' %picmi.warp.top.time, fontsize=fontsz)
fig.colorbar(im2, ax = axs[1])
figname = 'images_dens'+ '/' + repr(int(picmi.warp.top.it)).zfill(4) + '.png'
plt.savefig(figname)
del fig
plt.close('all')
#print(np.max(self.solver.solver.fields.Ey))
def plots_crab(self, l_force = 0):
#fontsz = 16
#plt.rcParams['axes.labelsize'] = fontsz
#plt.rcParams['axes.titlesize'] = fontsz
#plt.rcParams['xtick.labelsize'] = fontsz
#plt.rcParams['ytick.labelsize'] = fontsz
#plt.rcParams['legend.fontsize'] = fontsz
#plt.rcParams['legend.title_fontsize'] = fontsz
chamber = self.chamber
if self.laser_source_z is None: here_laser_source_z = chamber.zmin + 0.5*(-chamber.l_main_z/2-chamber.zmin)
else: here_laser_source_z = self.laser_source_z
em = self.solver.solver
k_antenna = int((here_laser_source_z - chamber.zmin)/em.dz)
j_mid_waveguide = int((chamber.ycen6 - chamber.ymin)/em.dy)
flist = ['Ex','Ey','Ez','Bx','By','Bz']
flist = ['Jy']
pw = picmi.warp
if pw.top.it%10==0 or l_force:
#fig = plt.figure( figsize=(7,7))
for ffstr in flist:
if ffstr == 'Ex': ff = em.gatherex()
if ffstr == 'Ey':
ff = em.gatherey()
maxe =35e6*self.em_scale_fac #np.max(ey[:,:,:])
mine = -35e6*self.em_scale_fac #np.min(ey[:,:,:])
if ffstr == 'Ez': ff = em.gatherez()
if ffstr == 'Bx': ff = em.gatherbx()
if ffstr == 'By': ff = em.gatherby()
if ffstr == 'Bz': ff = em.gatherbz()
if ffstr == 'elecs':
ff = self.ecloud.wspecies.get_density()
maxe = 5e9 #np.max(ey[:,:,:])
mine = 0 #np.min(ey[:,:,:])
if ffstr == 'Jy':
ff = em.gatherjy()
maxe = np.max(ff[:,:,:])
mine = np.min(ff[:,:,:])
if me==0:
plot_field_crab(ff, ffstr, mine, maxe, k_antenna, j_mid_waveguide, chamber)
restart('cavity.0.dump')
reinit(sim, laser_func, plot_dens)
dt = picmi.warp.top.dt
n_bunches = 3
newsteps = int(np.round(25e-9*n_bunches/dt))
sim.tot_nsteps += newsteps
sim.saver.tot_nsteps += newsteps
sim.saver.extend_probe_vectors(newsteps)
sim.t_offs = 1000
sim.all_steps_no_ecloud()
| [
"lorenzo.giacomel@cern.ch"
] | lorenzo.giacomel@cern.ch |
7e520c862eed41e71e45a0087e0640e51b7a216c | 7f0bcc27cc74cbe901e4a31bbff71e299a2fa681 | /apps/language_voice_diction_chinese_yesir/restudy.py | 1ba03f64d436b0cca6a36c8fc9ff7bae67f7b800 | [] | no_license | JiangEndian/learngit | 4682826149472f942a6219b8d8b3cb7c276635e5 | e479b7efcc97c12120bd284259e8da0407c17a53 | refs/heads/master | 2021-07-18T20:48:28.063374 | 2019-05-11T11:16:02 | 2019-05-11T11:16:02 | 90,952,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,990 | py | #!/usr/bin/env python3
from tableDefine import *
def addEveryWeek():
global every_week
global common
#day = input('Day(%s):' % getnowtime('week')) or getnowtime('week')
day = getnowtime('week')
con = input('word:')
if not con:
print('wordEmpty...')
exit()
env = input('mean:') #[ɪnˈvaɪrənmənt]环境
#print('请确认:%s\n%s\n%s' % (day, env, con))
#if input('InputYES2Save:') == 'YES':
if True:
every_week.add(Day=day, Con=con, Other1=env, Other3=3)
print(every_week.find('Con', con))
common.add(Ymd=getdaystime(1), Con=con, Other1=env)
common.add(Ymd=getdaystime(3), Con=con, Other1=env)
def addExt(dataobj, con):
extend = dataobj.find(NAME='Con', value=con)[0][4]
if extend == None:
extend = input('InputExampleAndIt\'sMean:')
else:
extend = extend + '\n' + input('InputExampleAndIt\'sMean:')
#print(extend)
dataobj.update(NAME='Con', value=con, Other2=extend)
print(dataobj.find(NAME='Con', value=con))
def acceptInput(dataobj, con):
CMD = input() or '2'
print(CMD)
#if CMD == 'EXT':
#addExt(dataobj, con)
#CMD = input()
if CMD == 'ADD':
while CMD == 'ADD':
addEveryWeek()
CMD = input()
elif CMD == 'NO':
#print(dataobj.find(NAME='Con', value=con))
#input()
#dataobj.delete(NAME='Con', value=con)
#input()
pass
elif CMD == 'exit':
exit()
return CMD
def addEveryMonth():
global every_month
day = input('Day(%s):' % getnowtime('d')) or getnowtime('d')
con = input('EveryMonthContent:')
print('请确认(%s:%s).' % (day, con))
if input('InputYES2Save:') == 'YES':
every_month.add(Day=day, Con=con)
def addEveryYear():
global every_year
monthday = input('MonthDay(%s):' % getnowtime('md')) or getnowtime('md')
con = input('EveryYearContent:')
print('请确认(%s:%s).' % (monthday, con))
if input('InputYES2Save:') == 'YES':
every_year.add(MonthDay=monthday, Con=con)
def addCommon(*days):
global common
con = input('Content:')
print('请确认(%s:%s).' % (','.join(days), con))
if input('InputYES2Save:') == 'YES':
for day in days:
common.add(Ymd=day, Con=con)
def showAllTable():
global common
global every_week
global every_month
global every_year
print('common:', common.find())
print('every_week:', every_week.find())
print('every_month:', every_month.find())
print('every_year:', every_year.find())
try:
conn, cursor = opendb('PlanDatabase.sqlite')
class Common(MyORM): #通用的计划放这里
conn = conn
cursor = cursor
tableInfo = tableCommon
class EveryWeek(MyORM): #通用的计划放这里
conn = conn
cursor = cursor
tableInfo = tableEveryWeek
class EveryMonth(MyORM): #通用的计划放这里
conn = conn
cursor = cursor
tableInfo = tableEveryMonth
class EveryYear(MyORM): #通用的计划放这里
conn = conn
cursor = cursor
tableInfo = tableEveryYear
global common
global every_week
global every_month
global every_year
common = Common()
every_week = EveryWeek()
every_month = EveryMonth()
every_year = EveryYear()
def showOneDay(daydelta=0, limit='no'):
#ISp = '______________________________________________'
ISp = ''
global common
global every_week
global every_month
global every_year
#print('\t%s' % getdaystime(0))
#input(ISp)
day = datetime.now() + timedelta(days=daydelta)
week_day = day.strftime('%w')
month_day = day.strftime('%d')
monthday = day.strftime('%m%d')
ymd = day.strftime('%Y%m%d')
def printExtConEnv(ext, con, env, times=1, limit='no', iscommon=False):
print(con)
if limit != 'no':
print('\n_________提前年的,能跟读,则可直接回车删\n')
CMD = play_enter('~/grace_voice_file/%s.mp3' % ext, time4input=1, times=times)
print(env)
if CMD == '':
CMD = '2'
if iscommon:
input()
elif CMD == 'p':
CMD = input('输入相应命令2/8继续:')
elif CMD == 'dd':
if input('单词及意思正确,则可y删除\n') != 'y':
CMD = 'NO'
elif CMD == '8':
if env:
input()
else:
if env:
input()
print(CMD)
return CMD
if ext and env:
#print('%s\n\n%s\n\n补充/感想:\n%s' % (env, con, ext))
#print('%s\n\n%s\n\n补充/感想:\n%s' % (con, env, ext))
#print(ISp)
print(con)
input()
print(env)
input()
print(ext)
print(ISp)
print(ISp)
elif env:
#print('%s\n\n%s\n' % (env, con))
#print('%s\n\n%s\n' % (con, env))
print(con)
input()
print(env)
print(ISp)
print(ISp)
elif ext:
#print('%s\n\n补充/感想:\n%s' % (con, ext))
print(con)
input()
print(ext)
print(ISp)
print(ISp)
else:
print('%s' % (con))
print(ISp)
print(ISp)
#统一先查好,避免因为数据变表而重复查询
every_year_info = every_year.find('MonthDay', monthday)
every_month_info = every_month.find('Day', month_day)
every_week_info = every_week.find('Day', week_day)
common_info = common.find('Ymd', ymd)
if limit == 'year':
every_month_info = False
every_week_info = False
common_info = False
#打印안내서...
#input('YES, NO, EXT, exit')
if every_year_info:
for ey_info in every_year_info:
runsyscmd()
print('__________________________________\n\nYear\n__________________________________')
print(ISp)
con = ey_info[2]
env = ey_info[3]
ext = ey_info[4]
CMD = printExtConEnv(ext, con, env, times=2, limit=limit)
#CMD = acceptInput(every_year, con)
if CMD == 'NO' or CMD == '8':
every_year.delete('Con', con)
every_month.add(Day=getnowtime('d'), Con=con, Other1=env, Other2=ext)
elif CMD == 'YES' or CMD == 'dd' or CMD == '2':
every_year.delete('Con', con)
if every_month_info:
for em_info in every_month_info:
runsyscmd()
print('_________________________________\n\nMonth\n_________________________________')
print(ISp)
con = em_info[2]
env = em_info[3]
ext = em_info[4]
CMD = printExtConEnv(ext, con, env)
#CMD = acceptInput(every_month, con)
if CMD == 'YES' or CMD == '2':
#print('delete%s' % con)
every_month.delete(NAME='Con', value=con)
#print('insert%s' % con)
every_year.add(MonthDay=getnowtime('md'), Con=con, Other1=env, Other2=ext)
elif CMD == 'NO' or CMD == '8':
#print('%s-1' % times)
every_month.delete(NAME='Con', value=con)
every_week.add(Day=getnowtime('week'), Con=con, Other1=env, Other2=ext)
if every_week_info:
for ew_info in every_week_info:
runsyscmd()
print('_________________________________\n\nWeek\n_________________________________')
print(ISp)
con = ew_info[2]
env = ew_info[3]
ext = ew_info[4]
CMD = printExtConEnv(ext, con, env)
#CMD = acceptInput(every_week, con)
if CMD == 'YES' or CMD == '2':
every_week.delete(NAME='Con', value=con)
every_month.add(Day=getnowtime('d'), Con=con, Other1=env, Other2=ext, Other3=2)
elif CMD == 'NO' or CMD == '8':
#print('%s-1' % times)
#every_week.update(NAME='Other3', value=times, Other3=int(times)-1)
#print('下周继续,加油!')
common.add(Ymd=getdaystime(1), Con=con, Other1=env, Other2=ext)
common.add(Ymd=getdaystime(3), Con=con, Other1=env, Other2=ext)
common.add(Ymd=getdaystime(5), Con=con, Other1=env, Other2=ext)
#input()
if common_info:
for c_info in common_info:
runsyscmd()
print('common')
print(ISp)
con = c_info[2]
env = c_info[3]
ext = c_info[4]
CMD = printExtConEnv(ext, con, env, 1, iscommon=True)
#printExtConEnv(ext, con, env)
#acceptInput(common, con)
runsyscmd()
#print('축하합니다.今天任务完成!')
yesterday(common) #删除昨天的
############显示今天的###########
#print('___________________________\n\n')
#everyday = readffile('everydaytoread.txt')
#if everyday:
#print(everyday.strip())
#input()
#showOneDay(298, 'year')
showOneDay()
#showAllTable()
finally:
closedb(conn,cursor)
| [
"c_cstudy@126.com"
] | c_cstudy@126.com |
90224c175a2dc4e34b4109beb4d8f1f44191a42f | d55ff94ebcf05fc796656e92a2c670f3973a384f | /Webscrape-Sunburst_mongo/app.py | 69c15f002f42b316fa13b665aefd3433b8ed83c3 | [] | no_license | taureanh/Project2_trial | d2f50f3fffc39d1a339bce2512dc73ac8e7b4475 | 314a5f1987f40ef32a514c22b4c3572d53cb6e24 | refs/heads/main | 2022-12-30T19:45:44.241086 | 2020-10-17T13:28:10 | 2020-10-17T13:28:10 | 304,882,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | #import dependencies for SQL
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import renewable_scrape
app = Flask(__name__)
#Need to fix
app.config["MONGO_URI"] = "mongodb://localhost:27017/renewables"
mongo = PyMongo(app)
@app.route("/")
def home():
data = mongo.db.renewables.find_one()
return render_template("index.html",r_last_refresh=data["renewable_refresh"],renewable_title_0=data["renewable_titles"][0],renewable_link_0=data["renewable_links"][0],renewable_title_1=data["renewable_titles"][1],renewable_link_1=data["renewable_links"][1], renewable_title_2 = data["renewable_titles"][2],renewable_link_2=data["renewable_links"][2],renewable_title_3=data["renewable_titles"][3],renewable_link_3=data["renewable_links"][3])
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape function
renewable_data = renewable_scrape.renewable_scrape()
# Update the Mongo database using update and upsert=True
mongo.db.renewables.replace_one({}, renewable_data, upsert=True)
# Redirect back to home page
return redirect("/")
@app.route("/maps")
def maps():
#put html for maps
return
if __name__ == "__main__":
app.run(debug=True)
| [
"taurean.hutchinson@gmail.com"
] | taurean.hutchinson@gmail.com |
b4c39c047d688dd6539210be500c5952408ac905 | 77465eb5bfbb7635aa6a2fcaf717a2655f5cf17f | /src/models/DenseDet_SKU_fusion_bfp_x101_32x4d_v1.py | b6eef9c8391686a6f3caf41388ef6097b1decbd0 | [] | no_license | SebasGarcia08/sku110-dense-object-detection | 451f29a56ae6a58c2ee0d1f4692496bfad95f7f3 | e31a0c35bbaf1037f583cc57042a8d5e724b5ee1 | refs/heads/master | 2023-04-21T17:58:28.479373 | 2021-05-06T18:51:25 | 2021-05-06T18:51:25 | 364,759,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,822 | py | # model settings
model = dict(
type='CascadeRCNN',
num_stages=2,
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32,64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
]
)
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=1024,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=3072,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=3072,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=3000,
nms_post=3000,
max_num=3000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.7), max_per_img=400)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(3000, 1800), keep_ratio=True),
dict(type='UniformRandomCrop', crop_size=(1200,1200)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(3000, 1800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file='/DATA/home/tianzerong/sku/SKU_COCO_train.json',
img_prefix='/DATA/data/SKU110K_fixed/images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file='/DATA/home/tianzerong/sku/SKU_COCO_train.json',
img_prefix='/DATA/data/SKU110K_fixed/images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file='/DATA/home/tianzerong/sku/SKU_COCO_val.json',
img_prefix='/DATA/data/SKU110K_fixed/images/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/SKU_fusion_bifpn_x50'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"segaracos@outlook.com"
] | segaracos@outlook.com |
e1682205360b4928220bbc12cb3953be8221e9f8 | 14252ea933a08056363230c6df89223b996a0da2 | /app/enquiry/admin.py | 71f3c238e9b152b6658810ef408539597e9ec865 | [
"MIT"
] | permissive | S3Infosoft/mvr-insights | eeb02aa2e6767e6a23818d4e09f7be7ce29f80cb | ac73feff03c1592d5efd8e0b82f72dd4dbd3e921 | refs/heads/master | 2020-05-29T14:08:11.070784 | 2020-04-23T19:46:57 | 2020-04-23T19:46:57 | 189,184,619 | 0 | 1 | MIT | 2020-04-23T19:46:58 | 2019-05-29T08:35:56 | CSS | UTF-8 | Python | false | false | 918 | py | from . import models
from django.contrib import admin
@admin.register(models.OTA)
class OTAAdmin(admin.ModelAdmin):
list_display = "name", "registration", "contact_person", "contact_number",\
"contact_email",
search_fields = "name", "contact_person",
@admin.register(models.Partner)
class PartnerAdmin(admin.ModelAdmin):
list_display = "name", "partner_type", "created", "contact_person", \
"contact_number", "contact_email",
search_fields = "name", "contact_person",
@admin.register(models.Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = "headline_slim", "source_slim", "rating", "created",
list_filter = "rating",
search_fields = "headline",
list_editable = "rating",
@staticmethod
def headline_slim(inst):
return inst.headline[:70]
@staticmethod
def source_slim(inst):
return inst.source[:70]
| [
"abhie.lp@gmail.com"
] | abhie.lp@gmail.com |
7e97dec12b5a269ee009a038ff2b1bb48711aff7 | 5577a04c006e73b8a40f68055b2173ffe34ce83e | /htsint/database/fetchTimeExperiment.py | 52b01c5ccf358b0f3acfe468ea3b6ae2dc535dfc | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | changanla/htsint | 1617c56bd5f02ab01e0de80d3d06d2d75983a376 | a343aff9b833979b4f5d4ba6d16fc2b65d8ccfc1 | refs/heads/master | 2020-03-16T13:10:15.082839 | 2017-05-24T21:27:27 | 2017-05-24T21:27:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | #!/usr/bin/python
import sys,time
from sqlalchemy.sql import select
from htsint.database import db_connect,fetch_annotations,fetch_taxa_annotations
from htsint.database import Taxon,taxa_mapper,Gene,gene_mapper
session,engine = db_connect()
conn = engine.connect()
#timeStart = time.time()
#annotations = fetch_annotations(['31251'],engine,idType='ncbi',useIea=False,aspect='biological_process')
#print("end: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
#print annotations
##7091(small), 7227(large)
timeStart = time.time()
annotations,goTerms = fetch_taxa_annotations(['7227'],engine,idType='ncbi',useIea=False,aspect='biological_process')
print("end: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
#print annotations
sys.exit()
###########
widget = Gene#Taxon
print("scanning %s"%widget.__tablename__)
timeStart = time.time()
myDict = {}
s = select([widget.id,widget.ncbi_id])
_result = conn.execute(s)
result = [row for row in _result]
print("core: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
sys.exit()
timeStart = time.time()
for t in session.query(widget).yield_per(5):
myDict[t.ncbi_id] = t.id
print("yield per: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
| [
"adamricha@gmail.com"
] | adamricha@gmail.com |
96c271f4ba502360e86ae8b36745e783d53d418e | d3f30c67faf0b593565fc5fa526d6b96a8a9f65f | /tests/test_dates.py | 9c3a7b40745a472ca8520756a080d082d887c101 | [
"BSD-3-Clause"
] | permissive | has2k1/mizani | 4b3732b13380c6f2660f313877d95f63095781f3 | 90b0a54dd3a76528fae7997083d2ab8d31f82a58 | refs/heads/main | 2023-09-02T00:47:17.321472 | 2023-09-01T09:44:57 | 2023-09-01T13:45:36 | 62,319,878 | 41 | 15 | BSD-3-Clause | 2022-04-04T04:26:51 | 2016-06-30T15:02:41 | Python | UTF-8 | Python | false | false | 2,210 | py | from datetime import datetime
from zoneinfo import ZoneInfo
import pytest
from mizani._core.date_utils import (
align_limits,
ceil_mid_year,
ceil_second,
ceil_week,
floor_mid_year,
floor_second,
floor_week,
)
from mizani._core.dates import (
datetime_to_num,
get_tzinfo,
num_to_datetime,
)
def test_tzinfo():
tz = ZoneInfo("Africa/Kampala")
assert get_tzinfo("Africa/Kampala") == tz
assert get_tzinfo(tz) is tz
with pytest.raises(TypeError):
assert get_tzinfo(10) # type: ignore
def test_floor_mid_year():
d1 = datetime(2022, 3, 1)
d2 = datetime(2022, 11, 9)
assert floor_mid_year(d1) == datetime(2022, 1, 1)
assert floor_mid_year(d2) == datetime(2022, 7, 1)
def test_ceil_mid_year():
d1 = datetime(2022, 1, 1)
d2 = datetime(2022, 1, 2)
d3 = datetime(2022, 8, 2)
assert ceil_mid_year(d1) == datetime(2022, 1, 1)
assert ceil_mid_year(d2) == datetime(2022, 7, 1)
assert ceil_mid_year(d3) == datetime(2023, 1, 1)
def test_floor_week():
d1 = datetime(2000, 1, 11)
d2 = datetime(2000, 8, 21)
assert floor_week(d1) == datetime(2000, 1, 8)
assert floor_week(d2) == datetime(2000, 8, 15)
def test_ceil_week():
d1 = datetime(2000, 1, 15)
d2 = datetime(2000, 8, 20)
assert ceil_week(d1) == datetime(2000, 1, 15)
assert ceil_week(d2) == datetime(2000, 8, 22)
def test_floor_second():
d1 = datetime(2000, 1, 1, 10, 10, 24, 1000)
assert floor_second(d1) == datetime(2000, 1, 1, 10, 10, 24)
def test_ceil_second():
d1 = datetime(2000, 1, 1, 10, 10, 24, 1000)
assert ceil_second(d1) == datetime(2000, 1, 1, 10, 10, 25)
def test_num_to_datetime():
limits = num_to_datetime((25552, 27743))
assert limits[0] == datetime(2039, 12, 17, tzinfo=ZoneInfo("UTC"))
assert limits[1] == datetime(2045, 12, 16, tzinfo=ZoneInfo("UTC"))
d = num_to_datetime((27742 + 1.9999999999,))[0]
assert d.microsecond == 0
def test_datetime_to_num():
x = []
res = datetime_to_num([])
assert len(res) == 0
# Just for test coverage
# TODO: Find a better test
def test_align_limits():
limits = (2009, 2010)
align_limits(limits, 1 + 1e-14)
| [
"has2k1@gmail.com"
] | has2k1@gmail.com |
43722d5fe07102d70d34d9f72fb9ab3df7e72248 | 6f407956c4eb87d46c35776a61e1d788ac148f26 | /Scripts/ProcessExperimentUtils/analyseResults.py | 1713755f1a1da3fb98d7aff21519e5d036efb486 | [] | no_license | decobeirne/collab-rob-fwork | 3b99402132d49568ffb745732d10530a80575fb7 | 527c9f09c8a49af28a33fe2dccffd0ffa9bbd547 | refs/heads/master | 2021-01-01T18:18:36.408989 | 2013-11-24T20:13:16 | 2013-11-24T20:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,666 | py | import math
from pdb import set_trace
STD_DEV_COST_COEFF = 200.0
BATTERY_LOSS_MOVE = 20.0
STD_DEV_MAX = 20
'''
gross - profit we est we'll get when we adopt the target
grossPerStep - how much the robot ests it will be getting for each step for this target
gross_subd - the profit we calc that this target will get when we redraw the local map for submission
gross_actual - gross_subd * profitRatio for the subd map (profit actually incd into global map / profit that the robot ests it will get when submitting)
'''
def analyseLocalMaps(robotIndex, incdMapsOnBoard, subdMapsOnRobot, moveList, mapScanList):
'''
Get relationship between estd profit, and actual profit.
'''
profitPerMoveOverStdDevs = []
profitRatioOverStdDevs = []
localMapAvgs = {}
gtepSessionList = {}
gtepSessionDict = {}
incdMapsOnBoardFromMe = [x for x in incdMapsOnBoard if x['robotIndex'] == robotIndex]
incdAndSubdMaps = zip(incdMapsOnBoardFromMe, subdMapsOnRobot)
currentIteration = -1
for mapTuple in incdAndSubdMaps:
incdMap, subdMapProfit = mapTuple
assert incdMap['robotIndex'] == subdMapProfit['robotIndex']
assert incdMap['localMapIndex'] == subdMapProfit['localMapIndex']
# Get further info about submitted local map: map scans integrated while exploring and moves made.
movesThisMap = [x for x in moveList if x['iteration'] > currentIteration and x['iteration'] <= subdMapProfit['iteration']]
#scansThisMap = [x for x in mapScanList if x['iteration'] > currentIteration and x['iteration'] <= subdMapProfit['iteration']]
#estdGainFromScans = sum(x['estdGain'] for x in scansThisMap)
nMoves = len(movesThisMap)
# Take other interesting data and store in subdMapProfit.
#subdMapProfit['estdGainFromScans'] = estdGainFromScans
subdMapProfit['nMoves'] = nMoves
profitRatio = (incdMap['profit'] / subdMapProfit['estdProfit']) if subdMapProfit['estdProfit'] else 0
subdMapProfit['incdToSubdProfit'] = profitRatio
subdMapProfit['incdToSubdNCells'] = (incdMap['nMapCells'] / subdMapProfit['estdGain']) if subdMapProfit['estdGain'] else 0
subdMapProfit['incdToSubdAvgStdDev'] = (incdMap['avgMapCellStdDev'] / subdMapProfit['avgStdDev']) if subdMapProfit['avgStdDev'] else 0
subdMapProfit['expProfit_actual'] = subdMapProfit['expProfit'] * profitRatio
subdMapProfit['gtepProfit_actual'] = subdMapProfit['gtepProfit'] * profitRatio
if nMoves:
profitPerMoveOverStdDev = (incdMap['profit'] / nMoves, incdMap['avgMapCellStdDev'])
profitPerMoveOverStdDevs.append(profitPerMoveOverStdDev)
# TODO calculate actual profitRatio for each target in each local map. can really only do for exp tars
# we already have for each target: gross, resources, expenditure
# and for the subd local map we have the profit for each tar. therfore can use actualGross for incd map to
# calc actual gross for each target. can update the targets with this info
# (targets should have the id for each local map they have. then need a dict for local maps (with id as key))
profitRatioOverStdDevs.append(profitRatioOverStdDev)
for gtepSessionId, estdGtepProfit, nMapScans in subdMapProfit['gtepProfitList']:
allocdGtepProfit = estdGtepProfit * profitRatio
if not gtepSessionId in gtepSessionList:
gtepSessionList[gtepSessionId] = {'estdProfit': 0.0, 'allocdProfit': 0.0, 'nLocalMaps': 0, 'nMapScans': 0}
gtepSessionList[gtepSessionId]['estdProfit'] += estdGtepProfit
gtepSessionList[gtepSessionId]['allocdProfit'] += allocdGtepProfit
gtepSessionList[gtepSessionId]['nLocalMaps'] += 1
gtepSessionList[gtepSessionId]['nMapScans'] += nMapScans
currentIteration = subdMapProfit['iteration']
# Get averages for all local maps.
nMaps = len(subdMapsOnRobot)
localMapAvgs['estdProfit'] = sum(x['estdProfit'] for x in subdMapsOnRobot) / nMaps
localMapAvgs['incdToSubdProfit'] = sum(x['incdToSubdProfit'] for x in subdMapsOnRobot) / nMaps
localMapAvgs['estdGain'] = sum(x['estdGain'] for x in subdMapsOnRobot) / nMaps
localMapAvgs['incdToSubdNCells'] = sum(x['incdToSubdNCells'] for x in subdMapsOnRobot) / nMaps
localMapAvgs['avgStdDev'] = sum(x['avgStdDev'] for x in subdMapsOnRobot) / nMaps
localMapAvgs['nMoves'] = sum(x['nMoves'] for x in subdMapsOnRobot) / nMaps
# Get averages for GOTO_EXPLORATION_PT sessions
nSessions = len(gtepSessionList.keys())
gtepSessionDict['avgEstdProfit'] = sum(x['estdProfit'] for x in gtepSessionList.values()) / nSessions if nSessions else 0
gtepSessionDict['avgAllocdProfit'] = sum(x['allocdProfit'] for x in gtepSessionList.values()) / nSessions if nSessions else 0
gtepSessionDict['avgNLocalMaps'] = sum(x['nLocalMaps'] for x in gtepSessionList.values()) / nSessions if nSessions else 0 # Remember, should be 1 apart from loop-closing
gtepSessionDict['avgNMapScans'] = sum(x['nMapScans'] for x in gtepSessionList.values())
# Setup dict to allow quick accessing of actual/estimated profit for each local map.
localMapProfitPtrs = {}
for localMap in subdMapsOnRobot:
index = localMap['localMapIndex']
incdToSubdProfit = localMap['incdToSubdProfit']
assert index not in localMapProfitPtrs
localMapProfitPtrs[index] = incdToSubdProfit
return localMapAvgs, localMapProfitPtrs, gtepSessionDict, gtepSessionList, profitPerMoveOverStdDevs
def getAvgErrorPerMove(errorPerMoveTuples):
'''Given tuples containing (nMoves, avgError), return the overall average error per move.'''
nTotal = 0
avgTotal = 0.0
for (nMoves, avgError) in errorPerMoveTuples:
if nMoves:
avgTotal += avgError * nMoves
nTotal += nMoves
if nTotal:
return (avgTotal / nTotal, nTotal)
return (0.0, 0)
def getMoveBreakdown(moveList):
total = len(moveList)
fwdMoves = [x for x in moveList if x['direction'] == 0]
return len(fwdMoves), total - len(fwdMoves)
def updateExpTargetsWrtEnvir(expTargetsList, profitWrtEnvir):
"""
When printing what exploration targets look like relative to the environment, i.e. the state of the local map and the sup area,
it will also be useful to determine how the envir typically changes with each target.
"""
for iteration in profitWrtEnvir:
iterationDict = profitWrtEnvir[iteration]
if iterationDict['exp']:
matchingExpTargets = [x for x in expTargetsList if x['iteration'] == iteration]
if not matchingExpTargets:
print('WARNING: should have a matching exp dict for iteration %d' % iteration)
iterationDict['exp']['mapGain_subd'] = 0
else:
iterationDict['exp']['mapGain_subd'] = matchingExpTargets[0]['mapGain_subd']
def analyseResultsDictList(resultsDictList):
temp_nExpTars = 0
temp_nExpTarsWithCoalSet = 0
for resultsDict in resultsDictList:
index = 0
MAX_INDEX = 20
while index < MAX_INDEX:
if 'subdMapList_%d' % index not in resultsDict:
index += 1
continue
avgErrorPerMove, nMoves = getAvgErrorPerMove(resultsDict['globalMapDetails']['errorPerMoveList'])
resultsDict['globalMapDetails']['movesAvgError'] = avgErrorPerMove
resultsDict['globalMapDetails']['movesN'] = nMoves
nRotations, nFwdMoves = getMoveBreakdown(resultsDict['globalMapDetails']['moveList'])
resultsDict['globalMapDetails']['movesDirRotations'] = nRotations
resultsDict['globalMapDetails']['movesDirFwd'] = nFwdMoves
resultsDict['globalMapDetails']['movesDirRatioRotations'] = nRotations / float(nFwdMoves)
subdMapsList = resultsDict['subdMapList_%d' % index]
tempSubdMapDict = {} # Map localMapIndex to subdMap dict object
for subdMap in subdMapsList:
if subdMap['estdGross'] == 0.0:
subdMap['estdGross'] = 1.0
subdMap['profitRatio'] = subdMap['incdProfit'] / subdMap['estdGross']
subdMap['profitRatioList'] = []
assert subdMap['localMapIndex'] not in tempSubdMapDict
tempSubdMapDict[subdMap['localMapIndex']] = subdMap
coalitionInfoList = resultsDict['coalitionInfo_%d' % index]
gtepTargetsList = resultsDict['gtepTargetList_%d' % index]
tempGtepTargetDict = {}
for gtepTarget in gtepTargetsList:
if gtepTarget['localMapIndex'] not in tempSubdMapDict:
set_trace()
print("ERROR")
assert gtepTarget['localMapIndex'] in tempSubdMapDict
gtepTarget['nScans_actual'] = 0
gtepTarget['gross_subd'] = 0
gtepTarget['gross_actual'] = 0
gtepTarget['nLocalMaps'] = 0
for subdMap in subdMapsList:
targetProfits = [x for x in subdMap['targetProfits'] if x['id'] == gtepTarget['targetIndex']]
assert len(targetProfits) in (0, 1)
if targetProfits:
gtepTarget['nScans_actual'] += targetProfits[0]['nScans']
gtepTarget['gross_subd'] += targetProfits[0]['profit']
gtepTarget['gross_actual'] += targetProfits[0]['profit'] * subdMap['profitRatio']
gtepTarget['nLocalMaps'] += 1
grossRatio = gtepTarget['gross_actual'] / gtepTarget['gross_subd'] if gtepTarget['gross_subd'] else 0.0
gtepTarget['expenditure_actual'] = gtepTarget['expenditure'] * grossRatio
# TODO: N.B. Make sure these match values in c code
gtepTarget['resources_actual'] = gtepTarget['nScans_actual_redundant'] * BATTERY_LOSS_MOVE + gtepTarget['stdDevInc_actual'] * STD_DEV_COST_COEFF
gtepTarget['ratio_actual'] = (gtepTarget['gross_actual'] - gtepTarget['expenditure_actual']) / gtepTarget['resources_actual'] if gtepTarget['resources_actual'] else 0
# Arbitrary - add it to the first local map that this target had any scans in - mostly only one anyway.
tempSubdMapDict[gtepTarget['localMapIndex']]['profitRatioList'].append(gtepTarget['ratio_actual'])
# List for allocated profit. There may be multiple gtep targets with the same
# potentialGtepSessionId, i.e. some will be adopted but the session not started. This will
# be ok tho, as the last gtep target in the list will overwrite the previous ones.
gtepTarget['allocatedProfitList'] = []
tempGtepTargetDict[gtepTarget['potentialGtepSessionId']] = gtepTarget
expTargetsList = resultsDict['expTargetList_%d' % index]
for expTarget in expTargetsList:
temp_nExpTars += 1
assert expTarget['localMapIndex'] in tempSubdMapDict
expTarget['nScans_actual'] = 0
expTarget['gross_subd'] = 0
expTarget['mapGain_subd'] = 0
expTarget['gross_actual'] = 0
expTarget['nLocalMaps'] = 0
for subdMap in subdMapsList:
targetProfits = [x for x in subdMap['targetProfits'] if x['id'] == expTarget['targetIndex']]
assert len(targetProfits) in (0, 1)
if targetProfits:
expTarget['nScans_actual'] += targetProfits[0]['nScans']
expTarget['gross_subd'] += targetProfits[0]['profit']
expTarget['mapGain_subd'] += targetProfits[0]['estdGain']
expTarget['gross_actual'] += targetProfits[0]['profit'] * subdMap['profitRatio']
expTarget['nLocalMaps'] += 1
grossRatio = expTarget['gross_actual'] / expTarget['gross_subd'] if expTarget['gross_subd'] else 0.0
expTarget['expenditure_actual'] = expTarget['expenditure'] * grossRatio
expTarget['grossPerStep'] = expTarget['gross'] / expTarget['nSteps']
expTarget['grossPerStep_actual'] = expTarget['gross_actual'] / expTarget['nSteps']
# TODO: N.B. Make sure these match values in c code
expTarget['resources_actual'] = expTarget['nScans_actual_redundant'] * BATTERY_LOSS_MOVE + expTarget['stdDevInc_actual'] * STD_DEV_COST_COEFF
expTarget['ratio_actual'] = (expTarget['gross_actual'] - expTarget['expenditure_actual']) / expTarget['resources_actual'] if expTarget['resources_actual'] else 0
# Arbitrary - add it to the first local map that this target had any scans in - mostly only one anyway.
tempSubdMapDict[expTarget['localMapIndex']]['profitRatioList'].append(expTarget['ratio_actual'])
if expTarget['gtepSession'] != -1:
profitToGtep_actual = expTarget['profitToGtep'] * grossRatio
tempGtepTargetDict[expTarget['gtepSession']]['allocatedProfitList'].append(profitToGtep_actual)
if expTarget['profitToCoal'] != 0:
temp_nExpTarsWithCoalSet += 1
assert expTarget['iteration'] in coalitionInfoList
coalition = coalitionInfoList[expTarget['iteration']]['explorationCoalition']
if coalition not in resultsDict['globalMapDetails']['coalInfo']:
resultsDict['globalMapDetails']['coalInfo'][coalition] = {'coalAllocs': [], 'subdMapProfits': []}
profitToCoal_actual = expTarget['profitToCoal'] * grossRatio
resultsDict['globalMapDetails']['coalInfo'][coalition]['coalAllocs'].append(profitToCoal_actual)
# Count up profit from allocatedProfitList allocd above
for gtepTarget in gtepTargetsList:
gtepTarget['allocd_income'] = sum(gtepTarget['allocatedProfitList'])
gtepTarget['allocd_nPayments'] = len(gtepTarget['allocatedProfitList'])
gtepTarget['allocd_income_gross_ratio'] = gtepTarget['allocd_income'] / gtepTarget['gross'] if gtepTarget['gross'] else 0
#gtepTarget['allocd_ratio_actual_after'] = (gtepTarget['gross_actual'] + gtepTarget['income'] - gtepTarget['expenditure_actual']) / gtepTarget['resources_actual'] if gtepTarget['resources_actual'] else 0
# Calc average profit ratio for each target in each local map
for subdMap in subdMapsList:
profitRatioList = subdMap['profitRatioList']
subdMap['avgProfitRatio'] = sum(profitRatioList) / len(profitRatioList) if profitRatioList else 0.0
# Process close loop data
closeLoopSessionList = resultsDict['closeLoopSessions_%d' % index]
for session in closeLoopSessionList:
target = session['target']
session['nScansTotalWhenAdopting'] = target['additionalInfo']['nScansTotal']
session['nScansInitial'] = target['additionalInfo']['nScansInitial']
session['nScansLoopClosingWhenAdopting'] = target['additionalInfo']['nScansCloseLoop']
session['nScansAdjustedWhenAdopting'] = target['additionalInfo']['nScansAdjusted']
session['nScansAdjustedInitialWhenAdopting'] = max(0, session['nScansAdjustedWhenAdopting'] - session['nScansLoopClosingWhenAdopting'])
maxScanWhenAdopting = max(target['scanBehaviours'].keys())
session['lastStdDevWhenAdopting'] = target['scanBehaviours'][maxScanWhenAdopting]['origStdDev']
adjustedScansWhenSubmitting = [x for x in session['scanAdjustments'] if session['scanAdjustments'][x]['event'] == 'adjust']
session['nScansAdjustedWhenSubmitting'] = len(adjustedScansWhenSubmitting)
session['nScansTotalWhenSubmitting'] = len(session['perScanProfits'])
session['nScansLoopClosingWhenSubmitting'] = len(session['perScanProfits']) - target['additionalInfo']['nScansInitial']
session['nScansAdjustedWhenSubmitting'] = max(0, session['nScansAdjustedWhenSubmitting'] - session['nScansLoopClosingWhenSubmitting'])
maxScanWhenSubmitting = max(session['scanAdjustments'].keys())
session['lastStdDevWhenSubmitting'] = session['scanAdjustments'][maxScanWhenSubmitting]['origStdDev']
# From submitted scans, calc gross from new scans and from adjustments
#l1 = [x for x in session['scanAdjustments'].keys() if x not in session['perScanProfits'].keys()]
#if l1:
# set_trace()
for scanIndex in session['scanAdjustments']:
scanAdj = session['scanAdjustments'][scanIndex]
# A scan may not appear in 'perScanProfits', if when doing a loop-close, one map had no map data in it (i.e. all scans had too high std dev)
if scanIndex in session['perScanProfits']:
scanAdj['profit'] = session['perScanProfits'][scanIndex]['profit']
scanAdj['gain'] = session['perScanProfits'][scanIndex]['gain']
else:
scanAdj['profit'] = 0
scanAdj['gain'] = 0
scanAdj['wasInitialScan'] = (scanIndex < session['nScansInitial'])
initialScans = [x for x in session['scanAdjustments'].values() if x['wasInitialScan']]
newScans = [x for x in session['scanAdjustments'].values() if not x['wasInitialScan']]
adjustmentGross = 0
for scanAdj in initialScans:
if scanAdj['event'] == 'adjust':
if scanAdj['adjStdDev'] >= scanAdj['origStdDev']:
print('WEIRD: adjusted std dev should have been smaller than original std dev')
# Calc gross as in calcStdDevReductionProfit in code
red = scanAdj['origStdDev'] - scanAdj['adjStdDev']
adjustmentGross += scanAdj['gain'] * (red / STD_DEV_MAX)
session['grossAdjsWhenSubmitting'] = adjustmentGross
newScanGross = 0
for scanAdj in newScans:
newScanGross += scanAdj['profit']
session['grossNewScansWhenSubmitting'] = newScanGross
session['grossAdjsWhenAdopting'] = target['additionalInfo']['grossAdjs']
session['grossNewScansWhenAdopting'] = target['additionalInfo']['grossNewScans']
# Calc errors before/after adjustment in adjusted scans
def _pointDist(pt1, pt2):
assert len(pt1) == 2
assert len(pt2) == 2
xDist = abs(pt1[0] - pt2[0])
yDist = abs(pt1[1] - pt2[1])
dist = math.sqrt(xDist * xDist + yDist * yDist)
return dist
def __diffLen(pt):
assert len(pt) == 2
dist = math.sqrt(pt[0] * pt[0] + pt[1] * pt[1])
return dist
for scanIndex in session['scanAdjustments']:
scanAdj = session['scanAdjustments'][scanIndex]
scanAdj['offsetOrig'] = __diffLen(scanAdj['origDiff'])
if 'adjDiff' in scanAdj:
scanAdj['offsetAdj'] = __diffLen(scanAdj['adjDiff'])
reversedKeys = list(reversed(sorted(session['scanAdjustments'].keys())))
scanAdj = session['scanAdjustments'][reversedKeys[0]]
session['scanAdjsAvgIncOffset'] = ''
session['scanAdjsNormdIncOffset'] = ''
session['scanAdjsAvgIncStdDev'] = ''
if 'offsetAdj' in scanAdj:
currentOffset = scanAdj['offsetAdj']
currentStdDev = scanAdj['adjStdDev']
offsetIncs = []
stdDevIncs = []
for scanIndex in reversedKeys[1:]:
scanAdj = session['scanAdjustments'][scanIndex]
if 'offsetAdj' in scanAdj:
offsetInc = scanAdj['offsetAdj'] - currentOffset
offsetIncs.append(offsetInc)
currentOffset = scanAdj['offsetAdj']
stdDevInc = scanAdj['adjStdDev'] - currentStdDev
stdDevIncs.append(stdDevInc)
currentStdDev = scanAdj['adjStdDev']
if offsetIncs:
#set_trace()
session['scanAdjsAvgIncOffset'] = sum(offsetIncs) / len(offsetIncs)
session['scanAdjsNormdIncOffset'] = session['scanAdjsAvgIncOffset'] * len(offsetIncs)
session['scanAdjsAvgIncStdDev'] = sum(stdDevIncs) / len(stdDevIncs)
profitWrtEnvir = resultsDict['profitWrtEnvir_%d' % index]
updateExpTargetsWrtEnvir(expTargetsList, profitWrtEnvir)
index += 1
# Iterate again, once profit allocd to coalitions has been counted for all robots
index = 0
MAX_INDEX = 20
while index < MAX_INDEX:
if 'subdMapList_%d' % index not in resultsDict:
index += 1
continue
# For exploration (which is the only behaviour for which profit is directly attribd for map gain ), we
# have already subtracted (in c code) the profit to attrib to the coal (this is in the step below). It
# would/may be more correct to attrib profit from all profits to coal though. This will not mess up our
# graphs either, as this profit is not counted anyway.
coalitionInfoList = resultsDict['coalitionInfo_%d' % index]
subdMapsList = resultsDict['subdMapList_%d' % index]
for subdMap in subdMapsList:
if subdMap['iteration'] in coalitionInfoList:
coalitionId = coalitionInfoList[subdMap['iteration']]['explorationCoalition']
if coalitionId != -1:
# It may occur that a coal gets no exp targets or subd maps, so it won't be added to this dict,
# so add a dict with empty lists in this case
if coalitionId not in resultsDict['globalMapDetails']['coalInfo']:
print('WARNING: looks like no profit attribd to coal id=%d' % coalitionId)
resultsDict['globalMapDetails']['coalInfo'][coalitionId] = {'coalAllocs': [], 'subdMapProfits': []}
coalitionDict = resultsDict['globalMapDetails']['coalInfo'][coalitionId]
profitToCoal = subdMap['incdProfit'] * 0.01 # moveCost=20, idleCost=0.2
subdMap['profitToCoal'] = profitToCoal
coalitionDict['subdMapProfits'].append(profitToCoal)
supTargetsList = resultsDict['supTargetList_%d' % index]
for supTarget in supTargetsList:
if supTarget['coalId'] not in resultsDict['globalMapDetails']['coalInfo']:
supTarget['nAllocs'] = 0
supTarget['profitAllocd'] = 0
supTarget['nSubdMaps'] = 0
supTarget['profitSubdMaps'] = 0
else:
coalDict = resultsDict['globalMapDetails']['coalInfo'][supTarget['coalId']]
allocList = coalDict['coalAllocs']
subdMapList = coalDict['subdMapProfits']
supTarget['nAllocs'] = len(allocList)
supTarget['profitAllocd'] = sum(allocList)
supTarget['nSubdMaps'] = len(subdMapList)
supTarget['profitSubdMaps'] = sum(subdMapList)
index += 1
print('temp_nExpTars=%d temp_nExpTarsWithCoalSet=%d\n' % (temp_nExpTars, temp_nExpTarsWithCoalSet)) | [
"declan.obeirne@gmail.com"
] | declan.obeirne@gmail.com |
8d8a4e9070cec42c52b01be5f9fe0ab168fa0a4d | 7e25e1542ed2b7ef8a628495eff7c87b88e6e868 | /WD-Firmware/demo/build/toolchain/comrv/ovlymgr.py | de03a0066e890e56997873ba7080a947f0db72bb | [
"Apache-2.0"
] | permissive | Global19-atlassian-net/riscv-fw-infrastructure | ff24e649fa97aef392a366eadcad657e77033570 | 34eca62724b750ee1459f9e4de460e3be2efd860 | refs/heads/master | 2023-03-16T01:07:08.921974 | 2021-02-28T15:13:46 | 2021-02-28T15:28:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106,865 | py | '''
SPDX-License-Identifier: Apache-2.0
Copyright 2021 Western Digital Corporation or its affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import gdb
import re
import sys
from gdb.unwinder import Unwinder
from gdb.FrameDecorator import FrameDecorator
# Required to make calls to super () work in python2.
__metaclass__ = type
#=====================================================================#
# Notes on ComRV stack management.
#
# The ComRV stack is full-descending.
#
# Register $t3 points to the current comrv stack frame, that is the
# frame that was used for the last call into ComRV.
#
# Register $t4 points to the next comrv stack frame to use, this is the
# frame that will be used by the next call into ComRV.
#
# Each ComRV stack frame can potentially be of different sizes, though
# at the time of writing this, I have never seen a non-standard sized
# ComRV stack frame in the wild. It might be the case that only when
# using an RTOS will ComRV stack frames be anything other than the
# standard size(s).
#
# The layout and size of a standard ComRV stack frame depends on
# whether the application has multi-group support compiled in, and
# then there are two versions of multi-group support, depending on how
# large your multi-groups might be.
#
# This is one layout of a stack frame (size 12-bytes):
#
# |0|1|2|3| [ Byte numbering, for information, not part of the stack. ]
# | A|B|C| A=Offset (2-bytes), B=Alignment (1-byte), C=Multi-group Index (1-byte)
# | | 4-byte token.
# | | 4-byte return address.
#
# This is another layout of a stack frame (size 16-bytes):
#
# |0|1|2|3| [ Byte numbering, for information, not part of the stack. ]
# | D| E| D=Padding (2-bytes), E=Multi-group Index (2-bytes)
# | A|B|C| A=Offset (2-bytes), B=Alignment (1-byte), C=Padding (1-byte)
# | | 4-byte token.
# | | 4-byte return address.
#
# The first stack frame entry, created when the comrv enginee is
# initialised will have return address, token, and alignment fields
# all set to 0. The Offset field will be set to 0xdead. This entry
# marks the outer most entry of the comrv stack, we never unwind past
# this. To try to do so is a mistake.
#
# The 'Offset' field: This contains the offset in bytes between the
# current comrv stack frame pointer and the previous comrv-stack frame
# pointer.
#
# The 'Alignment' is used during the process of unwinding. When we
# have a return address within the cache, and we know which overlay
# group used to be mapped in, the alignment is used as part of the
# process of figuring out the correct return address.
#
# The 'Token' field contains the token that was requested when ComRV
# was called into. This will represent the overlay that is going to
# be loaded into memory with this call (or maybe the overlay will
# already be loaded).
#
# When calling a non-overlay function from an overlay function we
# still need to pass through ComRV, so returns from the non-overlay
# function will, upon passing through ComRV trigger the overlay
# function to be mapped back in. As a result, the 'Token' field can
# also contain real addresses in non-overlay functions.
#
# The 'return address' field is the value of $ra on entry into ComRV.
# If we call to ComRV from non-overlay code, then this will be a real
# address we can return too. If we call into ComRV from overlay code
# then this address will be in the cache and we will need to figure
# out which overlay used to be mapped, and recompute a new return
# address.
#=====================================================================#
# Should debug messages be printed?
DEFAULT_DEBUG = False
# The default maximum group size.
DEFAULT_MAX_GROUP_SIZE = 4096
# The default size for the "pages" in the ComRV cache and storage area.
DEFAULT_MIN_COMRV_CACHE_ENTRY_SIZE_IN_BYTES = 512
OVERLAY_MIN_CACHE_ENTRY_SIZE_IN_BYTES = 512
COMRV_INFO_EVICTION_FIELD = 0x30
COMRV_INFO_EVICT_POLICY_LRU = 0x10
COMRV_INFO_EVICT_POLICY_LFU = 0x20
COMRV_INFO_EVICT_POLICY_MIX = 0x30
# Various symbols that are read in order to parse ComRV.
MULTI_GROUP_OFFSET_SYMBOL = "g_stComrvCB.ucMultiGroupOffset"
COMRV_INFO_SYMBOL = "g_uiComrvInfo"
OVERLAY_STORAGE_START_SYMBOL = "OVERLAY_START_OF_OVERLAYS"
OVERLAY_STORAGE_END_SYMBOL = "OVERLAY_END_OF_OVERLAYS"
OVERLAY_CACHE_START_SYMBOL = "__OVERLAY_CACHE_START__"
OVERLAY_CACHE_END_SYMBOL = "__OVERLAY_CACHE_END__"
COMRV_RETURN_FROM_CALLEE_LABEL = "comrv_ret_from_callee"
COMRV_RETURN_FROM_CALLEE_CONTEXT_SWITCH_LABEL = "comrv_ret_from_callee_context_switch"
COMRV_IGONR_CALLER_THUNK_STACK_FRAME = "comrv_igonr_caller_thunk_stack_frame"
COMRV_INVOKE_CALLEE_LABEL = "comrv_invoke_callee"
COMRV_ENTRY_LABEL = "comrvEntry"
COMRV_END_LABEL = "comrvEntryDisable"
COMRV_ENTRY_CONTEXT_SWITCH_LABEL = "comrvEntry_context_switch"
COMRV_EXIT_LABEL = "comrv_exit_ret_to_caller"
# The following symbols are actually used as format strings. They must
# include a single '%d' format specified which is replaced with the ComRV
# cache index.
OVERLAY_CACHE_AT_INDEX_TO_GROUP_ID \
= "g_stComrvCB.stOverlayCache[%d].unToken.stFields.uiOverlayGroupID"
OVERLAY_CACHE_AT_INDEX_TO_SIZE_IN_MIN_UNITS \
= "g_stComrvCB.stOverlayCache[%d].unProperties.stFields.usSizeInMinGroupSizeUnits"
OVERLAY_TABLE_ENTRY_EVICT_LOCK_VAL \
= "g_stComrvCB.stOverlayCache[%d].unProperties.stFields.usEvictLock"
OVERLAY_TABLE_ENTRY_GROUP_LOCK_VAL \
= "g_stComrvCB.stOverlayCache[%d].unProperties.stFields.usEntryLock"
OVERLAY_TABLE_ENTRY_DATA_OVL_VAL \
= "g_stComrvCB.stOverlayCache[%d].unProperties.stFields.usData"
OVERLAY_TABLE_ENTRY_LRU_EVICTION_VAL \
= "g_stComrvCB.stOverlayCache[%d].unLru.stFields.typNextLruIndex"
OVERLAY_TABLE_ENTRY_LRU_INDEX_VAL \
= "g_stComrvCB.ucLruIndex"
OVERLAY_TABLE_ENTRY_MRU_INDEX_VAL \
= "g_stComrvCB.ucMruIndex"
OVERLAY_TABLE_ENTRY_TOKEN_VAL \
= "g_stComrvCB.stOverlayCache[%d].unToken.uiValue"
#=====================================================================#
# A class for the control variable 'set/show debug comrv on|off'.
class debug_parameter (gdb.Parameter):
'''Controls debugging messages from the Python Overlay Manager. This
should only be needed to help track down bugs in the Python code.'''
set_doc = "Set whether debugging from the Python Overlay Manager is on."
show_doc = "Show whether debugging from the Python Overlay Manager is on."
def __init__ (self):
gdb.Parameter.__init__ (self, "debug comrv",
gdb.COMMAND_MAINTENANCE,
gdb.PARAM_BOOLEAN)
self.value = DEFAULT_DEBUG
def get_show_string (self, value):
return ("Debugging of ComRV Python overlay manager is %s."
% (value))
def __nonzero__ (self):
if (self.value):
return 1
else:
return 0
def __bool__ (self):
return self.value
# An instance of the debug parameter. Due to operator overloading
# this can be treated as a boolean like:
# if (overlay_debug):
# ...
overlay_debug = debug_parameter ()
# Class to create the 'set comrv' prefix command.
class set_comrv_prefix_command (gdb.Command):
def __init__ (self):
gdb.Command.__init__ (self, "set comrv", gdb.COMMAND_NONE, gdb.COMPLETE_NONE, True)
# Class to create the 'show comrv' prefix command.
class show_comrv_prefix_command (gdb.Command):
def __init__ (self):
gdb.Command.__init__ (self, "show comrv", gdb.COMMAND_NONE, gdb.COMPLETE_NONE, True)
# Now instantiate the above classes, actually creating the 'set comrv' and
# 'show comrv' prefix commands.
set_comrv_prefix_command ()
show_comrv_prefix_command ()
# Class that represents the maximum overlay group size. This is used when
# unwinding the ComRV stack.
class max_group_size_parameter (gdb.Parameter):
'''The maximum group size. This is a hard coded constant within the
toolchain and ComRV. The value here must be adjusted to match. This value
is required in order to correctly unwind the ComRV stack.
Changing this once GDB has already parsed the ComRV data structures will
cause undefined behaviour. This should only be modified once, immediately
after initially loading the ComRV support Pythong script.'''
set_doc = "Set the maximum overlay group size."
show_doc = "Show the maximum overlay group size."
def __init__ (self):
gdb.Parameter.__init__ (self, "comrv max-group-size",
gdb.COMMAND_STACK,
gdb.PARAM_ZUINTEGER)
self.value = DEFAULT_MAX_GROUP_SIZE
def get_show_string (self, value):
return ("Maximum ComRV overlay group size is %s." % (value))
# Instance of parameter object. Use the value field of this object.
max_group_size = max_group_size_parameter ()
# Class that represents the size of the "pages" in the ComRV cache and
# storage area. This is the minimum unit size in which overlays are
# measured.
class min_overlay_entry_size_parameter (gdb.Parameter):
'''The minimum size for a ComRV overlay. Many aspects of Comrv are
measured in multiples of this minimum size. This value must match the
value used by the compiler, linker, and ComRV enginee.
Changing this once GDB has already parsed the ComRV data structures will
cause undefined behaviour. This should only be modified once, immediately
after initially loading the ComRV support Pythong script.'''
set_doc = "Set the minimum ComRV entry size."
show_doc = "Show the minimum ComRV entry size."
def __init__ (self):
gdb.Parameter.__init__ (self, "comrv min-entry-size",
gdb.COMMAND_STACK,
gdb.PARAM_ZUINTEGER)
self.value = DEFAULT_MIN_COMRV_CACHE_ENTRY_SIZE_IN_BYTES
def get_show_string (self, value):
return ("Minimum ComRV entry size, in bytes, is %s." % (value))
# Instance of parameter object. Use the value field of this object.
min_entry_size = min_overlay_entry_size_parameter ()
# A class for the control variable 'set/show comrv show-frames'.
class show_comrv_frames_parameter (gdb.Parameter):
'''Controls whether to show the comrv frames in the backtrace. When
this is off 'comrv' frames will be hidden unless they are the
currently selected frame.'''
set_doc = "Set whether ComRV frames are shown in the backtrace."
show_doc = "Show whether ComRV frames are shown in the backtrace."
def __init__ (self):
gdb.Parameter.__init__ (self, "comrv show-frames",
gdb.COMMAND_STACK,
gdb.PARAM_BOOLEAN)
self.value = True
def get_show_string (self, value):
return ("Display of ComRV frames in the backtrace is %s."
% (value))
def __nonzero__ (self):
if (self.value):
return 1
else:
return 0
def __bool__ (self):
return self.value
show_comrv_frames = show_comrv_frames_parameter ()
# A class for the control variable 'set/show comrv show-token'.
class show_comrv_tokens_parameter (gdb.Parameter):
'''Controls whether to show the comrv token in the backtrace. When
this is on GDB will display the ComRV token passed to each 'comrv'
frame.
The tokens can only be displayed when 'comrv show-frames' is on.'''
set_doc = "Set whether ComRV tokens are shown in the backtrace."
show_doc = "Show whether ComRV tokens are shown in the backtrace."
def __init__ (self):
gdb.Parameter.__init__ (self, "comrv show-tokens",
gdb.COMMAND_STACK,
gdb.PARAM_BOOLEAN)
self.value = True
def get_show_string (self, value):
return ("Display of ComRV tokens in the backtrace is %s."
% (value))
def __nonzero__ (self):
if (self.value):
return 1
else:
return 0
def __bool__ (self):
return self.value
show_comrv_tokens = show_comrv_tokens_parameter ()
# A class for the control variable 'set/show comrv initialized'.
class comrv_initialized_parameter (gdb.Parameter):
'''This parameter displays whether the ComRV engine is currently
initialized. This parameter will initially be `off`, but will
automatically enter the `on` state when GDB detects that ComRV
must now be initialized.
Manually switching this parameter back to `off` will cause GDB
to discard the currently cached ComRV state. However, the next
time GDB detects that ComRV must be enabled this parameter will
be switched back to `on`.
Alternatively, manually switching this parameter to `on` will
cause GDB to assume that ComRV is initialized. GDB will parse
the static ComRV and cache it.'''
set_doc = "Set whether ComRV is currently initialized."
show_doc = "Show whether ComRV is currently initialized."
def __init__ (self):
gdb.Parameter.__init__ (self, "comrv initialized",
gdb.COMMAND_STACK,
gdb.PARAM_BOOLEAN)
self.value = False
# Called to display this property.
def get_show_string (self, value):
if (value):
return ("The ComRV engine is considered initialized.")
else:
return ("The ComRV engine is NOT considered initialized.")
# Called to print a string when the user sets this property. We
# make use of this to tweak the state when this property is
# adjusted.
def get_set_string (self):
if (not self.value):
# User has requested that we cosider ComRV not
# initialized. Discard any existing cached data.
overlay_data.clear ()
return ""
# Allow this property to be treated as an integer.
def __nonzero__ (self):
if (self.value):
return 1
else:
return 0
# Allow this property to be treated as a boolean.
def __bool__ (self):
return self.value
# An instance of this property.
is_comrv_initialized_p = comrv_initialized_parameter ()
#=====================================================================#
# Return True if we believe that ComRV should have been initialised,
# and it is therefore safe to try and read the ComRV tables from
# memory. Otherwise, return False.
def global_has_comrv_been_initialised_yet ():
global is_comrv_initialized_p
return is_comrv_initialized_p.value
# Mark the ComRV engine as initialised.
def global_mark_comrv_as_initialised ():
global is_comrv_initialized_p
is_comrv_initialized_p.value = True
#=====================================================================#
# Print STRING as a debug message if OVERLAY_DEBUG is True.
def debug (string):
global overlay_debug
if not overlay_debug:
return
print (string)
sys.stdout.flush()
# Helper class, create an instance of this to temporarily turn on
# debug for the enclosing scope, and turn debug off when we leave the
# scope.
class temp_debug_on:
def __init__ (self):
global overlay_debug
self._old_overlay_debug = overlay_debug
overlay_debug = True
def __del__ (self):
global overlay_debug
overlay_debug = self._old_overlay_debug
#=====================================================================#
# Thanks to: https://stackoverflow.com/a/32031543/3228495
def sign_extend (value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
# Wrapper around access to the global configuration parameter.
def get_comrv_min_entry_size ():
global min_entry_size
return min_entry_size.value
# Get the address of LABEL which is a string. If the address of LABEL
# can't be found then return None.
def get_symbol_address (label):
try:
return int (gdb.parse_and_eval ("&%s" % (label)))
except:
return None
# Return the gdb.Block of the function containing address ADDR, or
# None if the block could not be found.
def function_block_at (addr):
block = gdb.current_progspace().block_for_pc(addr)
if (block == None or block.global_block == None):
return None
while (not (block.superblock.is_global or block.superblock.is_static)):
block = block.superblock
return block
# Takes TOKEN_OR_ADDRESS which could be a 32-bit token, or a valid
# 32-bit RISC-V code address and returns true if it is a ComRV token,
# or false if it is a code address.
#
# This choice is based on the least-significant bit of the value in
# TOKEN_OR_ADDRESS, as a valid code address must have the least
# significant bit set to zero, while ComRV tokens have the least
# significant bit set to 1.
def is_overlay_token_p (token_or_address):
return ((token_or_address & 0x1) == 1)
# Takes TOKEN which is a 32-bit multi-group token and returns true if
# TOKEN is a multi-group token, otherwise, returns false.
def is_multi_group_token_p (token):
assert (is_overlay_token_p (token))
return ((token >> 31) & 0x1) == 1
# Takes TOKEN which is a 32-bit multi-group token and returns the
# overlay group number extracted from the token.
def mg_token_group_id (token):
assert (is_multi_group_token_p (token))
return (token >> 1) & 0xffff
# Takes TOKEN, a non-multi-group token, and extract the group-id from
# the token.
def overlay_token_group_id (token):
assert (not is_multi_group_token_p (token))
return (token >> 1) & 0xffff
# Takes TOKEN, a non-multi-group token, and extract the function
# offset in bytes for the function referenced by this token.
def overlay_token_func_offset (token):
assert (not is_multi_group_token_p (token))
return ((token >> 17) & 0x3ff) * 4
# Class to wrap reading memory. Provides an API for reading unsigned
# values of various sizes from memory.
class mem_reader:
# Read a value LENGTH bytes long from ADDRESS. The returned value
# is unsigned.
@staticmethod
def _read_generic (address, length):
inf = gdb.selected_inferior ()
b = inf.read_memory (address, length)
shift = 0
val = 0
for i in range(len(b)):
t = ord (b[i])
t <<= shift
val |= t
shift += 8
return val
@staticmethod
def read_8_bit (address):
return mem_reader._read_generic ((address & 0xffffffff), 1)
@staticmethod
def read_16_bit (address):
return mem_reader._read_generic ((address & 0xffffffff), 2)
@staticmethod
def read_32_bit (address):
return mem_reader._read_generic ((address & 0xffffffff), 4)
# The Overlay Cache Area is defined by a start and end label, this is
# the area into which code (and data?) is loaded in order to use it.
# This area is divided into "pages", each page is (currently) 512
# bytes (0x200) in size, but this can be modified by the user.
# The overlay tables are loaded into the last page of this cache
# area.
class overlay_data:
_instance = None
# Holds information about all the groups and multi-groups.
class _overlay_group_data:
def __init__ (self, groups, multi_groups, multi_group_table):
self._groups = groups
self._multi_groups = multi_groups
self._multi_group_table = multi_group_table
def get_group (self, index):
return self._groups[index]
def get_group_count (self):
return len (self._groups)
def get_multi_group (self, index):
return self._multi_groups[index]
def get_multi_group_count (self):
return len (self._multi_groups)
def get_token_from_multi_group_table (self, index):
return self._multi_group_table[index]
# Holds information about a single group.
class _overlay_group:
def __init__ (self, base_address, size_in_bytes, id):
self._base_address = base_address
self._size_in_bytes = size_in_bytes
self._id = id
def base_address (self):
return self._base_address
def size_in_bytes (self):
return self._size_in_bytes
@property
def id (self):
return self._id
# Holds information about a single member of a multi-group.
class _overlay_multi_group_member:
def __init__ (self, overlay_group, token):
self._overlay_group = overlay_group
self._token = token
self._offset = overlay_token_func_offset (token)
@property
def token (self):
return self._token
@property
def overlay_group (self):
return self._overlay_group
@property
def offset (self):
return self._offset
# Holds information about a single multi-group. NUMBER is
# assigned to each multi-group in the order they are encountered
# in the multi-group table, with 0 assigned to the first
# multi-group, then 1, etc.
#
# The INDEX is the index into the multi-group table for the first
# token of that multi-group, so the first multi-group always has
# index 0, but the second multi-group could have any index value.
#
# The MEMBERS is the list of multi-group member objects.
#
# The SIZE is the size in bytes of the function that is the
# goal of this multi-group.
#
# The FUNC is a string, the name of the function this is a
# multi-group for, or None if this couldn't be figured out.
class _overlay_multi_group:
def __init__ (self, number, index, members, size, func):
self._number = number
self._index = index
self._members = members
self._size_in_bytes = size
self._function = func
@property
def tokens (self):
return map (lambda m : m.token,
self._members)
def index (self):
return self._index
def number (self):
return self._number
@property
def members (self):
return self._members
@property
def size_in_bytes (self):
return self._size_in_bytes
@property
def function_name (self):
return self._function
# A class to describe an area of memory. This serves as a base
# class for the cache region descriptor, and the storage region
# descriptor classes.
class _memory_region:
# The START is the first address within the region, while END
# is the first address just beyond the region.
def __init__ (self, start, end):
self._start_address = start
self._end_address = end
# Returns the first address within the region.
def start_address (self):
return self._start_address
# Return the first address past the end of the region.
def end_address (self):
return self._end_address
# A static description of the overlay cache area. This is the
# area of memory into which overlays are loaded so they can be
# used.
class _cache_descriptor (_memory_region):
def __init__ (self, start, end):
super (overlay_data._cache_descriptor, self).__init__ (start, end)
# Return the address for the start of the cache region.
def base_address (self):
return self.start_address ()
# Return the total size of the cache in bytes, including the tables
# region.
def size_in_bytes (self):
return self.end_address () - self.start_address ()
# Return the number of entries that are available for holding
# overlays. This excludes the area that is given up to hold the
# overlay tables. Currently the tables are copied into the last entry
# in the cache.
def number_of_working_entries (self):
entry_size = self.entry_size_in_bytes ()
return ((self.size_in_bytes () / entry_size)
- (self.tables_size_in_bytes () / entry_size))
# Return the total number of entries that are in the cache, this
# includes any entries being used to hold the overlay tables.
def total_number_of_entries (self):
entry_size = self.entry_size_in_bytes ()
return (self.cache_size_in_bytes () / entry_size)
# The address of the overlay tables within the cache. Currently these
# are always in the last entry of the cache, and are one entry in size.
def tables_base_address (self):
entry_size = self.entry_size_in_bytes ()
return self.end_address () - self.tables_size_in_bytes ()
# Return the size of the overlay tables region in bytes. This is
# currently always a single page of the cache.
def tables_size_in_bytes (self):
return self.entry_size_in_bytes ()
# Return the size in bytes of a single entry (or page) within the
# cache.
def entry_size_in_bytes (self):
return get_comrv_min_entry_size ()
# A class that describes the overlay systems storage area. This
# is the area of memory from which the overlays are loaded. The
# debug information will refer to this area,
class _storage_descriptor (_memory_region):
def __init__ (self, start, end):
super (overlay_data._storage_descriptor, self).__init__ (start, end)
class _comrv_labels ():
def __init__ (self):
self.comrv_invoke_callee \
= get_symbol_address (COMRV_INVOKE_CALLEE_LABEL)
self.ret_from_callee \
= get_symbol_address (COMRV_RETURN_FROM_CALLEE_LABEL)
self.comrv_ret_from_callee_context_switch \
= get_symbol_address (COMRV_RETURN_FROM_CALLEE_CONTEXT_SWITCH_LABEL)
self.comrv_igonr_caller_thunk_stack_frame \
= get_symbol_address (COMRV_IGONR_CALLER_THUNK_STACK_FRAME)
self.comrv_entry \
= get_symbol_address (COMRV_ENTRY_LABEL)
self.comrv_end \
= get_symbol_address (COMRV_END_LABEL)
self.comrv_entry_context_switch \
= get_symbol_address (COMRV_ENTRY_CONTEXT_SWITCH_LABEL)
self.comrv_exit \
= get_symbol_address (COMRV_EXIT_LABEL)
self.enabled = (self.comrv_invoke_callee
and self.ret_from_callee
and self.comrv_entry and self.comrv_exit)
# A wrapper class to hold all the different information we loaded from
# target memory. An instance of this is what we return from the fetch
# method.
class _overlay_data_inner:
def __init__ (self, cache_descriptor, storage_descriptor, groups_data,
mg_index_offset, info_sym):
self._cache_descriptor = cache_descriptor
self._groups_data = groups_data
self._storage_descriptor = storage_descriptor
self._multi_group_index_offset = mg_index_offset
self._info_sym = info_sym
def cache (self):
return self._cache_descriptor
def storage (self):
return self._storage_descriptor
def group (self, index):
return self._groups_data.get_group (index)
def group_count (self):
return self._groups_data.get_group_count ()
def multi_group (self, index):
return self._groups_data.get_multi_group (index)
def multi_group_count (self):
return self._groups_data.get_multi_group_count ()
def is_multi_group_enabled (self):
return self._multi_group_index_offset > 0
def multi_group_index_offset (self):
return self._multi_group_index_offset
def get_token_from_multi_group_table (self, index):
return self._groups_data.get_token_from_multi_group_table (index)
def comrv_initialised (self):
return (not self._groups_data == None)
def labels (self):
# TODO: Maybe we could do some caching here?
return overlay_data._comrv_labels ()
def comrv_info (self):
return self._info_sym
# Read the group offset for overlay group GROUP_NUMBER. The
# overlay data starts at address BASE_ADDRESS in memory.
#
# Return the offset in bytes for the specified overlay group.
@staticmethod
def _read_overlay_offset (base_address, end_address, group_number):
base_address = base_address + (2 * group_number)
if ((base_address + 1) >= end_address):
raise RuntimeError ("out of bounds access while reading offset "
+ "table for group %d" % (group_number))
scaled_offset = mem_reader.read_16_bit (base_address)
offset = get_comrv_min_entry_size () * scaled_offset
return offset
# Read a 32-bit overlay token from the multi-group table. ADDRESS
# is the exact address from which the token should be loaded.
@staticmethod
def _read_overlay_token (address):
token = mem_reader.read_32_bit (address)
return token
# Load information about all of the groups and multi-groups from the
# overlay cache tables, and return an instance of an object holding all of
# this data.
@staticmethod
def _load_group_data (table_start, table_size, storage_desc,
multi_group_offset):
def _load_overlay_groups (table_start, table_end, storage_start):
groups = list ()
# Read all of the overlay group offsets from memory, adding
# entries to the overlay group list as we go.
grp = 0
# Read the offset of the very first overlay group. This
# should always be 0, but lets check it anyway.
prev_offset \
= overlay_data._read_overlay_offset (table_start,
table_end,
grp)
if (prev_offset != 0):
raise RuntimeError ("offset of first overlay group is 0x%x not 0"
% (prev_offset))
while (True):
# Read the offset for the start of the next overlay group.
next_offset \
= overlay_data._read_overlay_offset (table_start,
table_end,
(grp + 1))
# An offset of 0 indicates the end of the group table.
if (next_offset == 0):
break
# Calculate the size of this overlay group, and create an
# object to represent it.
size = next_offset - prev_offset
groups.append (overlay_data.
_overlay_group (storage_start + prev_offset,
size, grp))
grp += 1
prev_offset = next_offset
return groups
def _mg_members_to_func_and_size (id, members):
mg_block = None
for m in members:
addr = m.overlay_group.base_address () + m.offset
#size = size_of_function_at (addr)
b = function_block_at (addr)
if (mg_block == None):
mg_block = b
elif (b != None and b != mg_block):
raise RuntimeError ("multiple sizes for multi-group %d" % id)
if (mg_block == None):
raise RuntimeError ("unable to find size of multi-group %d" % id)
mg_name = None
mg_size = None
if (mg_block != None):
if (mg_block.function != None):
mg_name = mg_block.function.name
mg_size = mg_block.end - mg_block.start
return (mg_name, mg_size)
def _load_overlay_multi_groups (table_start, table_end, overlay_groups):
multi_groups = list ()
all_tokens = list ()
# The start and end of the region containing the
# multi-group table.
mg_start = table_start
mg_end = table_end
# A number assigned to each multi-group. Starts at 0, and
# increments by one for each multi-group.
mg_num = 0
# An index assigned to each multi-group. This is the
# index of the first member of the multi-group.
mg_idx = 0
# Used to track the index into the multi-group table.
idx = 0
# The tokens within the current multi-group.
mg_tokens = list ()
while (mg_start < mg_end):
# Read a 32-bit overlay token from the multi-group table.
ovly_token = overlay_data._read_overlay_token (mg_start)
all_tokens.append (ovly_token)
idx += 1
# A token of 0 indicates the end of a multi-group.
if (ovly_token == 0):
# If this is the first entry in a multi-group then we
# have reached the end of all multi-group data, and
# we're done.
if (len (mg_tokens) == 0):
break
# Otherwise, we've reached the end of this
# multi-group, but there might be more after this.
# Finalise this multi-group, and prepare to parse the
# next.
else:
# Take TOKEN, a non-multi-group token
# extracted from the multi-group table, and
# return a new multi-group member object.
def token_to_member (token):
g = overlay_token_group_id (token)
og = overlay_groups[g]
return overlay_data.\
_overlay_multi_group_member (og, token)
# Convert MG_TOKENS, a list of all the
# non-multi-group tokens that are within this
# multi-group, into a list of multi-group
# member objects (in MG_MEMBERS).
mg_members = map (token_to_member, mg_tokens)
(mg_func, mg_size) \
= _mg_members_to_func_and_size (mg_num, mg_members)
multi_groups.append \
(overlay_data._overlay_multi_group \
(mg_num, mg_idx, mg_members, mg_size,
mg_func))
# Now reset ready to read the next multi-group.
mg_num += 1
mg_idx = idx
mg_tokens = list ()
# Otherwise a non-zero token is a member of the multi-group.
else:
mg_tokens.append (ovly_token)
mg_start += 4 # The size of one overlay token.
return multi_groups, all_tokens
storage_start = storage_desc.start_address ()
if (multi_group_offset >= 0):
table_end = table_start + multi_group_offset
else:
table_end = table_start + table_size
groups = _load_overlay_groups (table_start,
table_end,
storage_start)
if (multi_group_offset >= 0):
table_end = table_start + table_size
table_start += multi_group_offset
multi_groups, all_tokens \
= _load_overlay_multi_groups (table_start, table_end, groups)
else:
multi_groups = list ()
all_tokens = list ()
return (overlay_data.
_overlay_group_data (groups, multi_groups, all_tokens))
# Read the address of symbol NAME from the inferior, return the
# address as an integer. If an error is thrown (missing symbol?)
# then None is returned.
@staticmethod
def _read_symbol_address_as_integer (name):
return get_symbol_address (name)
# Read the value of symbol NAME from the inferior, return the
# value as an integer. If the symbol can't be read (missing
# symbol?) then return None.
@staticmethod
def _read_symbol_value_as_integer (name):
try:
return int (gdb.parse_and_eval ("%s" % (name)))
except:
return None
# Load from target memory information about the overlay cache and the
# overlay groups.
@staticmethod
def fetch ():
if (overlay_data._instance != None):
return overlay_data._instance
# The overlay cache is defined by two symbols, a start and end
# symbol. Read these and create a cache descriptor object.
cache_start = overlay_data.\
_read_symbol_address_as_integer \
(OVERLAY_CACHE_START_SYMBOL)
cache_end = overlay_data.\
_read_symbol_address_as_integer \
(OVERLAY_CACHE_END_SYMBOL)
if (cache_start and cache_end):
cache_desc = overlay_data._cache_descriptor (cache_start, cache_end)
else:
cache_desc = None
# Similarly, the storage area, where overlays are loaded from, is
# defined by a start and end symbol.
storage_start = overlay_data.\
_read_symbol_address_as_integer \
(OVERLAY_STORAGE_START_SYMBOL)
storage_end = overlay_data.\
_read_symbol_address_as_integer \
(OVERLAY_STORAGE_END_SYMBOL)
if (storage_start and storage_end):
storage_desc \
= overlay_data._storage_descriptor (storage_start, storage_end)
else:
storage_desc = None
# This is the offset to the start of the multi-group table
# from the start of the overlay tables. We set this to -1
# here, if this ComRV doesn't have multi-group support then
# this is left as -1.
multi_group_offset = -1
# Finally, if ComRV has been initialised then load the current state
# from memory.
init_been_called = global_has_comrv_been_initialised_yet ()
if (init_been_called):
try:
multi_group_offset = overlay_data.\
_read_symbol_value_as_integer (MULTI_GROUP_OFFSET_SYMBOL)
# The multi-group offset is held in the number of
# 2-byte chunks, so convert this into a byte offset.
multi_group_offset *= 2
except:
pass
# read the overlay info value
info_sym = overlay_data.\
_read_symbol_value_as_integer (COMRV_INFO_SYMBOL)
if (info_sym == None):
raise RuntimeError ("Couldn't read info symbol `%s'"
% COMRV_INFO_SYMBOL)
groups_data = overlay_data.\
_load_group_data (cache_desc.tables_base_address (),
cache_desc.tables_size_in_bytes (),
storage_desc, multi_group_offset)
else:
groups_data = None
info_sym = None
# Work out the size in bits of the multi-group index on the comrv stack.
# A size of zero means this ComRV does not have multi-group support.
if multi_group_offset > 0:
multi_group_index_offset = info_sym & 0xF
if (multi_group_index_offset not in [11, 14]):
raise RuntimeError ("Invalid multi-group index offset (expected "
+ " 11 or 14, but got " + str(multi_group_index_offset) + ")")
else:
multi_group_index_offset = 0
# Now package all of the components into a single class
# instance that we return. We only cache the object if ComRV
# has been initialised, in this way we shouldn't get stuck
# with a cached, not initialised object.
obj = overlay_data._overlay_data_inner (cache_desc, storage_desc,
groups_data,
multi_group_index_offset,
info_sym)
if (init_been_called):
overlay_data._instance = obj
return obj
# Discard the information loaded from the cache. The next time fetch is
# called the information will be reread.
@staticmethod
def clear ():
overlay_data._instance = None
# Class for walking the overlay data structures and calling the
# visit_mapped_overlay method for every mapped overlay group.
class mapped_overlay_group_walker:
class eviction_lru(object):
def __init__(self):
self.eviction_values = []
def read_values(self):
# get the lru and mru values
lru = gdb.parse_and_eval(OVERLAY_TABLE_ENTRY_LRU_INDEX_VAL)
mru = gdb.parse_and_eval(OVERLAY_TABLE_ENTRY_MRU_INDEX_VAL)
mru = int(mru)
lru = int(lru)
# this is a case where cache is fully ocupied with one
# group - so lru and mru point to the same location
if lru == mru:
self.eviction_values.append(0)
else:
# walk trouogh lru list and save eviction index
while (lru != 255):
self.eviction_values.append(lru)
lru = gdb.parse_and_eval(OVERLAY_TABLE_ENTRY_LRU_EVICTION_VAL % (lru))
lru = int(lru)
def get_eviction_value(self, index):
return self.eviction_values.index(index)
class eviction_factory(object):
def __init__(self, eviction_type):
if eviction_type == COMRV_INFO_EVICT_POLICY_LRU:
self.evict_obj = mapped_overlay_group_walker.eviction_lru()
else:
raise RuntimeError ("Unknown eviction type")
if not getattr(self.evict_obj, "read_values", None):
raise RuntimeError ("missing read_values implementation")
if not getattr(self.evict_obj, "get_eviction_value", None):
raise RuntimeError ("missing get_eviction_value implementation")
self.evict_obj.read_values()
def get_eviction_value(self, index):
return self.evict_obj.get_eviction_value(index)
# Call this to walk the overlay manager data structures in memory and
# call the visit_mapped_overlay method for each mapped overlay group.
def walk_mapped_overlays (self):
# Grab the data that describes the current overlay state.
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
self.comrv_not_initialised ()
return
# Now walk the overlay cache and see which entries are mapped in.
index = 0
# read eviction values list
evict_obj = self.eviction_factory(ovly_data.comrv_info() & COMRV_INFO_EVICTION_FIELD)
while (index < ovly_data.cache ().number_of_working_entries ()):
group = gdb.parse_and_eval (OVERLAY_CACHE_AT_INDEX_TO_GROUP_ID % (index))
group = int (group)
offset = None
if (group != 0xffff):
# Found an entry that is mapped in.
group_desc = ovly_data.group (group)
src_addr = group_desc.base_address ()
length = group_desc.size_in_bytes ()
dst_addr = (ovly_data.cache ().base_address ()
+ (index
* ovly_data.cache ().entry_size_in_bytes ()))
# get entry token
token_val = gdb.parse_and_eval (OVERLAY_TABLE_ENTRY_TOKEN_VAL % (index))
token_val = int (token_val)
# get cache entry evict lock property
evict_lock = gdb.parse_and_eval (OVERLAY_TABLE_ENTRY_EVICT_LOCK_VAL % (index))
evict_lock = int (evict_lock)
# get cache entry lock property
entry_lock = gdb.parse_and_eval (OVERLAY_TABLE_ENTRY_GROUP_LOCK_VAL % (index))
entry_lock = int (entry_lock)
# get cache data property
data = gdb.parse_and_eval (OVERLAY_TABLE_ENTRY_DATA_OVL_VAL % (index))
data = int (data)
if (not self.visit_mapped_overlay (src_addr, dst_addr, length,
index, group, evict_lock, entry_lock,
evict_obj.get_eviction_value(index), data, token_val)):
break
offset = gdb.parse_and_eval (OVERLAY_CACHE_AT_INDEX_TO_SIZE_IN_MIN_UNITS % (index))
offset = int (offset)
if (offset == 0):
# Something has gone wrong here. An overlay
# appears to be mapped, but has 0 size. Maybe we
# could load the overlay size from the static
# data, after all we do know it. For now just
# force to 1 so we don't get stuck.
offset = 1
else:
# Found an entry that is not currently mapped - get the entry size
offset = gdb.parse_and_eval (OVERLAY_CACHE_AT_INDEX_TO_SIZE_IN_MIN_UNITS % (index))
offset = int (offset)
# Move to the next cache entry.
index += offset
# Default implementation of visit_mapped_overlay, sub-classes should
# override this method. Return true to continue walking the list of
# mapped overlays, or return false to stop.
def visit_mapped_overlay (self, src_addr, dst_addr, length,
cache_index, group_number, evict_lock = 0,
entry_lock = 0, evict_value = 0, data = 0,
token_val = 0):
return True
# Default implementation of comrv_not_initialised, sub-classes
# should override this method. This is called if
# walk_mapped_overlays is called before ComRV is initialised.
def comrv_not_initialised (self):
None
def print_current_comrv_state ():
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
print ("ComRV not yet initialisd:")
return
print ("Overlay Regions:")
print (" %-9s%-12s%-12s%-8s" % ("Region", "Start", "End", "Size"))
print (" %-9s0x%-10x0x%-10x0x%-6x"
% ("storage",
ovly_data.storage ().start_address (),
ovly_data.storage ().end_address (),
(ovly_data.storage ().end_address () -
ovly_data.storage ().start_address ())))
print (" %-9s0x%-10x0x%-10x0x%-6x"
% ("cache",
ovly_data.cache ().start_address (),
ovly_data.cache ().end_address (),
(ovly_data.cache ().end_address () -
ovly_data.cache ().start_address ())))
print ("")
print ("Overlay groups:")
grp_num = 0
while (grp_num < ovly_data.group_count ()):
grp = ovly_data.group (grp_num)
if (grp == None):
break
if (grp_num == 0):
print (" %-7s%-12s%-12s%-8s" % ("Group", "Start", "End", "Size"))
print (" %-7d0x%-10X0x%-10X0x%-6X"
% (grp_num, grp.base_address (),
(grp.base_address () + grp.size_in_bytes ()),
grp.size_in_bytes ()))
grp_num += 1
print ("")
print ("Overlay multi-groups:")
if (ovly_data.is_multi_group_enabled ()):
for grp_num in range (0, ovly_data.multi_group_count ()):
mg = ovly_data.multi_group (grp_num)
if (grp_num == 0):
print (" %6s%-7s%-12s%-9s%-10s%-10s%-10s"
% ("", "", "", "Overlay", "Function",
"Function", "Function"))
print (" %-6s%-7s%-12s%-9s%-10s%-10s%-10s"
% ("Num", "Index", "Token", "Group", "Offset",
"Size", "Name"))
else:
print (" %-6s%-7s%-12s%-9s%-10s%-10s%-10s"
% ("---", "---", "---", "---", "---", "---",
"---"))
for m in mg.members:
print (" %-6d%-7d0x%08x %-9d0x%-8x0x%-8x%s"
% (grp_num, mg.index (), m.token,
m.overlay_group.id, m.offset,
mg.size_in_bytes, mg.function_name))
else:
print (" Not supported in this ComRV build.")
print ("")
print ("Current overlay mappings:")
# Class to walk the currently mapped overlays and print a summary.
class print_mapped_overlays (mapped_overlay_group_walker):
def __init__ (self):
self._shown_header = False
self.walk_mapped_overlays ()
if (not self._shown_header):
self.nothing_is_mapped ()
def visit_mapped_overlay (self, src_addr, dst_addr, length,
cache_index, group_number, evict_lock,
entry_lock, evict_value, data, token_val):
if (not self._shown_header):
self._shown_header = True
print (" %-7s%-9s%-12s%-12s%-9s%-7s%-7s%-12s%-9s%-12s"
% ("Cache", "Overlay", "Storage", "Cache", "Group", "Evict", "Entry", "Evict", "Data", "Token"))
print (" %-7s%-9s%-12s%-12s%-9s%-7s%-7s%-12s%-9s%-12s"
% ("Index", "Group", "Addr", "Addr", "Size", "Lock", "Lock", "Value", "Overlay", "Addr"))
print (" %-7d%-9d0x%-10X0x%-10X0x%-7X%-7d%-7d0x%-10X%-9d0x%-10X"
% (cache_index, group_number, src_addr, dst_addr, length,
evict_lock, entry_lock, evict_value, data, token_val))
return True
def nothing_is_mapped (self):
print (" No overlays groups are currently mapped.")
print_mapped_overlays ()
# Model a single frame on the ComRV stack.
class comrv_stack_frame:
def __init__ (self, addr, mg_index_offset):
self._frame_addr = addr
self._return_addr = mem_reader.read_32_bit (addr)
self._token = mem_reader.read_32_bit (addr + 4)
self._offset = mem_reader.read_16_bit (addr + 8)
self._align = mem_reader.read_8_bit (addr + 10)
if (mg_index_offset > 0):
if (mg_index_offset == 11):
index = mem_reader.read_8_bit (addr + 11)
self._mg_index = sign_extend (index, 8)
else:
index = mem_reader.read_16_bit (addr + 14)
self._mg_index = sign_extend (index, 16)
else:
self._mg_index = 0
def frame_address (self):
return (self._frame_addr & 0xffffffff)
def return_address (self):
return (self._return_addr & 0xffffffff)
def token (self):
return (self._token & 0xffffffff)
def align (self):
return ((self._align & 0xffff) * get_comrv_min_entry_size ())
def multi_group_index (self):
return (self._mg_index & 0xffff)
def offset (self):
return self._offset
class comrv_prefix_command (gdb.Command):
def __init__ (self):
gdb.Command.__init__ (self, "comrv", gdb.COMMAND_NONE, gdb.COMPLETE_NONE, True)
# The class represents a new GDB command 'comrv status' that reads the current
# overlay status and prints a summary to the screen.
class comrv_status_command (gdb.Command):
'''Display the current state of ComRV overlays.
This command only works once ComRV has been initialised.
The information displayed includes the addresses of the ComRV
cache and storage areas, a summary of all the groups and multi-groups
as well as which overlay groups are currently mapped in.'''
def __init__ (self):
gdb.Command.__init__ (self, "comrv status", gdb.COMMAND_NONE)
def invoke (self, args, from_tty):
print_current_comrv_state ()
# Discard the cached cache data, incase we ran this command at the
# wrong time and the cache information is invalid. This will force
# GDB to reload the information each time this command is run.
overlay_data.clear ()
# The class represents a new GDB command 'comrv stack' that reads the
# current ComRV stack, and prints a summary. This is related to, but
# not the same as backtracing, as the backtrace interprets the ComRV
# stack, while this is a raw peek into the stack.
class comrv_stack_command (gdb.Command):
'''Display the ComRV stack.
This is different to the normal GDB backtrace in that backtrace interprets
the ComRV stack, while this command just dumps the raw stack contents.
This requires ComRV to be initialised before any stack can be displayed.
The fields are:
Frame - The frame number, just an index, with the lowest number being
the most recent frame.
Address - The address of the frame, that is the value of register t3 that
points to this frame.
R/A - The return address field for this stack frame.
Token - The token field for this stack frame.
Alignment - The alignment field from the ComRV stack, alignment to size of
maximum group size.
M/G - (Only in multi-group builds of ComRV) The overlay group token
for the specific overlay that was used.
Size - The size of this stack frame. The outermost frame should have
a size of 0xdead, this indicates the end of the stack.'''
def __init__ (self):
gdb.Command.__init__ (self, "comrv stack", gdb.COMMAND_NONE)
def invoke (self, args, from_tty):
ovly_data = overlay_data.fetch ()
is_initialised = ovly_data.comrv_initialised ()
is_mg = ovly_data.is_multi_group_enabled ()
mg_index_offset = ovly_data.multi_group_index_offset ()
overlay_data.clear ()
if (not is_initialised):
print ("ComRV not yet initialised")
return
t3_addr = int (gdb.parse_and_eval ("$t3"))
depth = 0
if (is_mg):
print ("%5s %10s %10s %10s %10s %6s %6s"
% ("Frame", "Address", "R/A", "Token", "Alignment", "M/G", "Size"))
else:
print ("%5s %10s %10s %10s %10s %6s"
% ("Frame", "Address", "R/A", "Token", "Alignment", "Size"))
while (True):
frame = comrv_stack_frame (t3_addr, mg_index_offset)
if (is_mg):
print ("%5s %10s %10s %10s %10s %6s %6s"
% (("#%d" % (depth)),
("0x%08x" % (frame.frame_address ())),
("0x%08x" % (frame.return_address ())),
("0x%08x" % (frame.token ())),
("0x%08x" % (frame.align ())),
("0x%04x" % (frame.multi_group_index ())),
("0x%x" % (frame.offset ()))))
else:
print ("%5s %10s %10s %10s %10s %6s"
% (("#%d" % (depth)),
("0x%08x" % (frame.frame_address ())),
("0x%08x" % (frame.return_address ())),
("0x%08x" % (frame.token ())),
("0x%08x" % (frame.align ())),
("0x%x" % (frame.offset ()))))
depth += 1
if (frame.offset () == 0xdead or frame.offset () == 0x0):
break
t3_addr += frame.offset ()
# The command 'parse-comrv' existed once, but is now deprecated.
class ParseComRV (gdb.Command):
'Parse the ComRV data table.'
def __init__ (self):
gdb.Command.__init__ (self, "parse-comrv", gdb.COMMAND_NONE)
def invoke (self, args, from_tty):
raise RuntimeError ("this command is deprecated, use 'comrv status' instead")
# The class represents a new GDB command 'comrv group <LOC>' that
# takes a location specifier, as taken by the 'break' command, and
# reports which groups that location is in.
class comrv_groups_command (gdb.Command):
'''Display the overlay groups a particular location is in.
This command only works once ComRV has been initialised.
Takes a single argument that is a string describing a location in the program
being debugged, in the same format as the breakpoint command. This location
is translated to an address (or multiple addresses), and then the group or
groups those addresses appear in are listed.'''
def __init__ (self):
gdb.Command.__init__ (self, "comrv groups", gdb.COMMAND_NONE,
gdb.COMPLETE_LOCATION)
# If ADDR is inside a multi-group then return a list of all the
# storage area addresses that are duplicates of ADDR. Otherwise
# return a single entry list containing just ADDR.
def _expand_mg_addresses (self, addr, ovly_data):
# If we have multi-groups in this program, we need to expand
# them now and figure out if our address is in any of them.
for i in range (0, ovly_data.multi_group_count ()):
mg = ovly_data.multi_group (i)
for m in mg.members:
low = m.overlay_group.base_address () + m.offset
high = low + mg.size_in_bytes
if (addr >= low and addr < high):
# Is in this multi-group!
offset = addr - low
return map (
lambda x : (x.overlay_group.base_address ()
+ x.offset + offset),
mg.members)
# Not in any multi-groups.
return [addr]
# Find an overlay group containing ADDR and return it, otherwise, return
# None.
def _find_group (self, ovly_data, addr):
for grp_num in range (0, ovly_data.group_count ()):
grp = ovly_data.group (grp_num)
if (addr >= grp.base_address ()
and addr < (grp.base_address () + grp.size_in_bytes ())):
return (grp, grp_num)
return (None, None)
def invoke (self, args, from_tty):
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
print ("ComRV not yet initialisd:")
if (args == ""):
raise RuntimeError ("missing location argument");
(junk,sal) = gdb.decode_line (args)
if (junk != None):
raise RuntimeError ("junk at end of line: %s" % (junk))
print ("%-12s%-7s%-12s%-12s%-8s%-8s"
% ("", "Group", "Group", "Group", "Group", ""))
print ("%-12s%-7s%-12s%-12s%-8s%-8s"
% ("Address", "Number", "Start", "End", "Size", "Offset"))
for s in (sal):
pc = s.pc
if (pc >= ovly_data.storage ().start_address ()
and pc < ovly_data.storage ().end_address ()):
all_addresses = self._expand_mg_addresses (pc, ovly_data)
for addr in (all_addresses):
# Figure out which overlay group address ADDR is in.
(grp, idx) = self._find_group (ovly_data, addr)
if (grp == None):
print ("0x%-10x\t** not in an overlay group **"
% (addr))
else:
print ("0x%-10x%-7d0x%-10x0x%-10x0x%-6x0x%-6x"
% (addr, idx, grp.base_address (),
(grp.base_address () + grp.size_in_bytes ()),
grp.size_in_bytes (),
(addr - grp.base_address ())))
else:
print ("0x%-10x\t** is not in overlay storage area **"
% (pc))
# Discard the cached cache data, incase we ran this command at the
# wrong time and the cache information is invalid. This will force
# GDB to reload the information each time this command is run.
overlay_data.clear ()
class MyOverlayManager (gdb.OverlayManager):
def __init__ (self):
gdb.OverlayManager.__init__ (self, True)
# STOP !
#
# No code should be placed here that assumes the ELF being
# debugged is currently loaded. It is highly likely that this
# file is sourced before the ELF being debugged is loaded (for
# example in Eclipse) in which case non of the required
# symbols will exist.
pass
def get_region_data (self):
ovly_data = overlay_data.fetch ()
debug ("Setting up overlay manager region data:")
debug ("Cache:")
debug (" Start: 0x%x" % (ovly_data.cache ().start_address ()))
debug (" End: 0x%x" % (ovly_data.cache ().end_address ()))
debug ("Storage:")
debug (" Start: 0x%x" % (ovly_data.storage ().start_address ()))
debug (" End: 0x%x" % (ovly_data.storage ().end_address ()))
self.set_storage_region (ovly_data.storage ().start_address (),
ovly_data.storage ().end_address ())
self.set_cache_region (ovly_data.cache ().start_address (),
ovly_data.cache ().end_address ())
def __del__ (self):
print ('Destructor called for MyOverlayManager')
# Return a string, where GDB should place the overlay event
# breakpoint.
def event_symbol_name (self):
debug ("In Python code, event_symbol_name")
return "_ovly_debug_event"
# Return an integer, the number of multi-groups. Return the
# special value -1 to indicate ComRV is not yet initialised, and
# so we don't know how many multi-groups there are, in this case
# GDB will ask again later. Otherwise return a value greater
# than, or equal to zero, GDB will cache this answer and not ask
# again.
def get_multi_group_count (self):
debug ("In Python get_multi_group_count method")
mg_count = -1
ovly_data = overlay_data.fetch ()
if (ovly_data.comrv_initialised ()):
mg_count = ovly_data.multi_group_count ()
debug ("In Python get_multi_group_count method = %d" % (mg_count))
return mg_count
# For multi-group number ID return a list of all the storage area
# addresses of all the functions within this multi-group.
def get_multi_group (self, id):
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV not yet initialised")
if (id >= ovly_data.multi_group_count ()):
raise RuntimeError ("Multi-group index out of range")
res = list ()
mg = ovly_data.multi_group (id)
for m in mg.members:
addr = m.overlay_group.base_address () + m.offset
res.append (addr)
return res
# Called to read the current state of ComRV, which overlays are
# mapped in. Should call the ADD_MAPPING method on ourselves
# (implemented inside GDB) to inform GDB about an active overlay
# mapping.
def read_mappings (self):
debug ("In Python code, read_mappings")
# If we're reading mappings then ComRV must be initialised.
global_mark_comrv_as_initialised ()
global overlay_debug
if (overlay_debug):
print_current_comrv_state ()
# Class to walk mapped overlays and add them to the list of currently
# mapped overlays.
class map_overlays (mapped_overlay_group_walker):
def __init__ (self, manager):
self._manager = manager
self.walk_mapped_overlays ()
def visit_mapped_overlay (self, src_addr, dst_addr, length,
cache_index, group_number, evict_lock = 0,
entry_lock = 0, evict_value = 0, data = 0,
token_val = 0):
self._manager.add_mapping (src_addr, dst_addr, length)
return True
# Create an instance of the previous class, this does all the work in
# its constructor.
map_overlays (self)
debug ("All mappings added")
return True
# Return the base address, within the storage area, for overlay
# group ID. The base address is the first address of an overlay
# group.
def get_group_storage_area_address (self, id):
debug ("get_group_storage_area_address (%d) = ..." % (id))
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV not initialised, overlay "
+ "storage area address unknown")
group_desc = ovly_data.group (id)
tmp = group_desc.base_address ()
debug ("get_group_storage_area_address (%d) = 0x%x"
% (id, tmp))
return tmp
# Get the callee that the overlay manager is calling. This method should
# only be called when the pc is at one of the comrv entry points for a call.
def get_callee_primary_storage_area_address (self):
global_mark_comrv_as_initialised ()
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
# Assert pc is at one of the comrv entry points for a call.
labels = ovly_data.labels()
pc = int (gdb.parse_and_eval ("$pc"))
assert (pc in [labels.comrv_entry,
labels.comrv_entry_context_switch])
token = int (gdb.parse_and_eval ("$t5"))
if (not is_overlay_token_p (token)):
# The callee is a non-overlay function and token is destination
# address.
return token;
if is_multi_group_token_p (token):
multi_group_id = mg_token_group_id (token)
token = ovly_data.get_token_from_multi_group_table (multi_group_id)
# TOKEN is now a non-multi-group token.
assert (not is_multi_group_token_p (token))
group_id = overlay_token_group_id (token)
func_offset = overlay_token_func_offset (token)
ba = self.get_group_storage_area_address (group_id);
addr = ba + func_offset;
return addr
#=====================================================================#
# Frame Filters
#=====================================================================#
#
# The frame filter modifies how GDB displays certain frames in the
# backtrace.
#
# As the assembler core of ComRV is split up into parts by the
# different global labels, normally GDB would display a different
# function name depending on which part of the ComRV core you are in.
#
# However, we install a frame filter that groups all of these parts
# together and labels them all as 'comrv'.
#
# The frame filter can also provide, or modify, the arguments that are
# displayed for a particular frame. For ComRV we create a single
# pseudo-argument 'token', in which we try to display the token that
# was passed in to ComRV.
#
# Obviously, we can't always figure out the ComRV token, for example,
# after calling the callee, during the return phase, the previous
# token is gone. In this case we return an optimised out value for
# the token. The intention is that _if_ GDB displays a token value,
# then it should be the correct token value.
#
# The user command 'set comrv show-token on|off' can be used to
# control whether GDB displays the pseudo token parameter or not.
#
#=====================================================================#
class comrv_frame_filter ():
"""
A class for filtering ComRV stack frame entries.
This class does one of two jobs based on the current value of
SHOW_COMRV_FRAMES. When SHOW_COMRV_FRAMES is true then this class
identifies ComRV stack frames and applies the DECORATOR sub-class
to those frames. When SHOW_COMRV_FRAMES is false this class
causes the ComRV stack frames to be skipped so they will not be
printed in the backtrace.
"""
class decorator (FrameDecorator):
"""
A FrameDecorator to change the name of the ComRV stack frames.
This class is applied to ComRV stack frames when
SHOW_COMRV_FRAMES is true, and changes the name of the frame
to be simply "comrv".
"""
def __init__(self, frame):
FrameDecorator.__init__ (self, frame)
self.uint_t = gdb.lookup_type ("unsigned int")
self._frame = frame
def function (self):
return "comrv"
def frame_args (self):
'''Add pseudo-parameters to comrv frames. When SHOW_COMRV_TOKENS is
true this function returns a description of the 'token'
parameter for the comrv frame.'''
class _sym_value ():
def __init__ (self, name, value):
self._name = name
self._value = value
def symbol (self):
return self._name
def value (self):
return self._value
if (not show_comrv_tokens):
return None
def _get_token_from_comrv_stack (obj):
# Find the token on the ComRV stack.
t3 = obj._frame.inferior_frame ()\
.read_register ("t3").cast (obj.uint_t)
t3 &= 0xfffffffe
ovly_data = overlay_data.fetch ()
mg_index_offset = ovly_data.multi_group_index_offset ()
comrv_frame = comrv_stack_frame (t3, mg_index_offset)
labels = ovly_data.labels ()
assert (labels.ret_from_callee != None)
while (comrv_frame.return_address () == labels.ret_from_callee
and comrv_frame.return_address () != 0
and comrv_frame.offset () != 0xdead):
t3 += comrv_frame.offset ()
comrv_frame = comrv_stack_frame (t3, mg_index_offset)
token = comrv_frame.token ()
return token
addr = self._frame.address ()
labels = overlay_data.fetch ().labels ()
if (addr <= labels.comrv_entry_context_switch):
token = self._frame.inferior_frame ().read_register ("t5")
elif (addr < labels.comrv_igonr_caller_thunk_stack_frame):
token = _get_token_from_comrv_stack (self)
else:
token = self.uint_t.optimized_out ()
return [_sym_value ("token", gdb.Value (token).cast (self.uint_t))]
class iterator ():
"""
An iterator to wrap the default iterator and filter frames.
An instance of this iterator is created around GDB's default
FrameDecorator iterator. As frames are extracted from this
iterator, if the frame looks like a ComRV frame then we apply
an extra decorator to it.
"""
def __init__ (self, iter):
self.iter = iter
def __iter__(self):
return self
def next (self):
"""Called each time GDB needs the next frame. If the frame
looks like a ComRV frame (based on its $pc value) then we
either apply the comrv frame decorator, or we skip the
frame (based on the value of SHOW_COMRV_FRAMES)."""
frame = next (self.iter)
addr = frame.address ()
labels = overlay_data.fetch ().labels ()
if (addr >= labels.comrv_entry
and addr <= labels.comrv_end):
if (not show_comrv_frames
and (frame.inferior_frame ()
!= gdb.selected_frame ())):
return next (self.iter)
else:
return comrv_frame_filter.decorator (frame)
return frame
def __next__ (self):
return self.next ()
def __init__ (self):
self.name = "comrv filter"
self.priority = 100
self.enabled = True
gdb.frame_filters [self.name] = self
def filter (self, frame_iter):
return self.iterator (frame_iter)
# Register the frame filter.
comrv_frame_filter ()
#=====================================================================#
# Disassembly Analysis
#=====================================================================#
#
# The following provides a mechanism for analysing assembly code at a
# very basic level. These utilities are used by the stack unwinders.
# These analysis routines have just enough logic to analyse the ComRV
# assembler core, and are not sufficient for general assembler analysis.
#
#=====================================================================#
class pv_type:
@property
def type (self):
return self.__class__.__name__
class pv_register (pv_type):
def __init__ (self, name, addend = 0):
self._name = name
self._addend = addend
def __str__ (self):
if (self._addend > 0):
return "PV_register (%s + %d)" % (self._name, self._addend)
elif (self._addend < 0):
return "PV_register (%s - %d)" % (self._name, abs (self._addend))
else:
return "PV_register (%s)" % (self._name)
@property
def addend (self):
return self._addend
@property
def reg (self):
return self._name
class pv_unknown (pv_type):
def __init__ (self):
pass
def __str__ (self):
return "PV_unknown ()"
class pv_constant (pv_type):
def __init__ (self, imm):
self._imm = int (imm)
def __str__ (self):
return "PV_constant (%d)" % (self._imm)
@property
def imm (self):
return self._imm
def pv_add (src1, src2):
if (src1.type == "pv_register"
and src2.type == "pv_constant"):
return pv_register (src1.reg, src1.addend + src2.imm)
elif (src1.type == "pv_constant"
and src2.type == "pv_constant"):
return pv_constant (src1.imm + src2.imm)
else:
return pv_unknown ()
def pv_sub (src1, src2):
if (src1.type == "pv_register"
and src2.type == "pv_constant"):
return pv_register (src1.reg, src1.addend - src2.imm)
elif (src1.type == "pv_constant"
and src2.type == "pv_constant"):
return pv_constant (src1.imm - src2.imm)
else:
return pv_unknown ()
# INSN is a decoded_instruction, and REGS is a register_tracker.
def pv_simulate (insn, regs):
if (insn.is_2_reg_insn ("mv")):
regs[insn.rd] = regs[insn.rs1]
elif (insn.mnemonic == "lw"
or insn.mnemonic == "lh"
or insn.mnemonic == "lb"):
regs[insn.rd] = pv_unknown ()
elif (insn.mnemonic in ["sw", "sh", "sb"]):
# Ignore these instructions.
pass
elif (insn.is_3_reg_insn ("add")):
regs[insn.rd] = pv_add (regs[insn.rs1], regs[insn.rs2])
elif (insn.is_2_reg_and_imm_insn ("addi")):
regs[insn.rd] = pv_add (regs[insn.rs1], pv_constant (insn.imm))
elif (insn.is_3_reg_insn ("sub")):
regs[insn.rd] = pv_sub (regs[insn.rs1], regs[insn.rs2])
elif ((insn.mnemonic in ["andi", "ori", "slli"])
and insn.rd != None):
regs[insn.rd] = pv_unknown ()
elif (insn.mnemonic in ["beqz", "bltz"]):
# Ignore these instructions.
pass
elif (insn.mnemonic == "jal"):
for r in ["ra", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7"]:
regs[r] = pv_unknown ()
else:
print ("Unknown instruction: %s" % str (insn))
class register_tracker:
def __init__ (self):
self._regs = {}
all_regs = ["x0", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1",
"a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
"t3", "t4", "t5", "t6"]
self._reg_names = all_regs
self._iter_index = 0
for r in all_regs:
self._regs[r] = pv_register (r)
def __getitem__ (self, key):
return self._regs[key]
def __setitem__ (self, key, value):
self._regs[key] = value
def __iter__ (self):
self._iter_index = 0
return self
def next (self):
return self.__next__ ()
def __next__ (self):
try:
r = self._reg_names[self._iter_index]
except IndexError:
raise StopIteration
self._iter_index += 1
return r
class decoded_instruction:
def __init__ (self, insn):
# Split the INSN so everything up to the first space is the
# mnemonic, and everything after that is the operands.
insn = insn.lstrip ().strip ()
self._raw = insn
(mnemonic, xxx, operands) = insn.partition ("\t")
self._mnemonic = mnemonic
self._rd = None
self._rs1 = None
self._rs2 = None
self._imm = None
# Now parse the operands.
if (operands.find (",") == -1):
# An immediate.
(imm, sep, rest) = operands.partition (" ")
self._imm = imm
return
(rd, sep, operands) = operands.partition (",")
self._rd = rd
if (operands.find ("(") != -1):
# We have IMM(REG) remaining.
(imm, sep, reg) = operands.partition ("(")
(reg, sep, rest) = reg.partition (")")
self._imm = imm
self._rs1 = reg
return
if (operands.find (",") != -1):
# We have REG,REG or REG,IMM remaining.
(reg, sep, operands) = operands.partition (",")
self._rs1 = reg
# We should now have either REG or IMM left.
m = re.match (r"^((?:0x[0-9a-f]+)|(?:-?[0-9]+))", operands)
if (m):
self._imm = m.group (0)
return
if (self._rs1 != None):
self._rs2 = operands
else:
self._rs1 = operands
def __str__ (self):
return "[%s] MNEM=%s RD=%s, RS1=%s, RS2=%s, IMM=%s" % (
self._raw, self._mnemonic, self._rd,
self._rs1, self._rs2, self._imm
)
@property
def mnemonic (self):
return self._mnemonic
@property
def rd (self):
return self._rd
@property
def rs1 (self):
return self._rs1
@property
def rs2 (self):
return self._rs2
@property
def imm (self):
return self._imm
def is_2_reg_insn (self, mnem):
return (self.mnemonic == mnem
and self.rd != None
and self.rs1 != None)
def is_2_reg_and_imm_insn (self, mnem):
return (self.mnemonic == mnem
and self.rd != None
and self.rs1 != None
and self.imm != None)
def is_3_reg_insn (self, mnem):
return (self.mnemonic == mnem
and self.rd != None
and self.rs1 != None
and self.rs2 != None)
def comrv_disassemble_and_analyse (start, end):
regs = register_tracker ()
dis = gdb.execute (("disassemble 0x%x,0x%x" % (start, end)), False, True)
dis = dis.splitlines ()
found_header = False
insns = []
for l in (dis):
if (l.startswith ("Dump of assembler code from ")):
found_header = True
continue
elif (l.startswith ("End of assembler dump")):
break
elif (not found_header):
continue
elif (l.startswith ("=> ")):
break
(b,s,a) = l.partition (":")
insns.append (decoded_instruction (a))
for i in (insns):
pv_simulate (i, regs)
return regs
#=====================================================================#
# Stack Unwinder
#=====================================================================#
#
# The following stack unwinder performs unwinding for the ComRV
# assembler core. The stack unwinder should be sufficient to unwind
# from any location within the assembler core.
#
#=====================================================================#
class comrv_unwinder (Unwinder):
"""
A class to aid in unwinding through the ComRV engine.
Implements GDB's Unwinder API in order to add support for
unwinding through the ComRV engine.
"""
class frame_id (object):
"""
Holds information about a ComRV stack frame.
An instance of this class is created for each identified ComRV
stack frame. The attributes of this class are as needed to
satisfy GDB's frame unwinder API.
"""
def __init__ (self, sp, pc):
"""Create an instance of this class, SP and PC are
gdb.Value objects."""
self.sp = sp
self.pc = pc
def __init__ (self):
Unwinder.__init__ (self, "comrv stack unwinder")
# If no executable is set, void pointer length will default to 8 bytes.
# Setting a new executable may change this, so void_ptr_t is now updated
# in __call__.
self.void_ptr_t = gdb.lookup_type("void").pointer()
def _get_multi_group_table_by_index (self, index):
"""Return the overlay token at position INDEX in the
multi-group table."""
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV not yet initialised")
if (not ovly_data.is_multi_group_enabled ()):
raise RuntimeError ("Multi-group not supported")
return ovly_data.get_token_from_multi_group_table (index)
# If ra is a cache address return the corresponding primary storage address,
# otherwise return ra unchanged.
def _get_primary_storage_area_ra (self, ra, addr):
global max_group_size
orig_ra = ra
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
mg_index_offset = ovly_data.multi_group_index_offset ()
cache_start = ovly_data.cache ().start_address ()
cache_end = ovly_data.cache ().end_address ()
if (ra >= cache_start and ra < cache_end):
prev_frame = comrv_stack_frame (addr, mg_index_offset)
if ((prev_frame.token () & 0x1) != 0x1):
raise RuntimeError ("returning to overlay function, "
+ "second stack frame token is "
+ str (prev_frame.token ()))
token = prev_frame.token ()
if is_multi_group_token_p (token):
idx = mg_token_group_id (token)
token = self._get_multi_group_table_by_index (idx)
group_id = (token >> 1) & 0xffff
func_offset = (token >> 17) & 0x3ff
alignment = prev_frame.align ()
group_size = ovly_data.group (group_id).size_in_bytes ()
max_grp_size = max_group_size.value
group_offset = (func_offset
+ ((orig_ra - func_offset
- alignment) & (max_grp_size - 1)))
base_addr = ovly_data.group (group_id).base_address ()
ra = base_addr + group_offset
debug ("Unwinder:")
debug (" frame.return_addr: " + hex (orig_ra))
debug (" group_id: " + str (group_id))
debug (" func_offset: " + hex (func_offset))
debug (" alignment: " + hex (alignment))
debug (" group_size: " + hex (group_size))
debug (" max_group_size: " + hex (max_grp_size))
debug (" group_offset: " + hex (group_offset))
debug (" base_addr: " + hex (base_addr))
debug (" ra: " + str (ra))
return ra
def _unwind (self, addr, allow_uninitialised_frame_p = False):
"""Perform an unwind of one ComRV stack frame. ADDR is the
address of a frame on the ComRV stack. This function returns
a tuple of the address to return to and the previous ComRV
stack frame pointer.
If the stack can't be unwound then an error is thrown."""
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
mg_index_offset = ovly_data.multi_group_index_offset ()
# Create a stack frame object at ADDR to represent the stack
# frame we are unwinding.
labels = ovly_data.labels ()
frame = comrv_stack_frame (addr, mg_index_offset)
assert (labels.ret_from_callee != None)
while (frame.return_address () == labels.ret_from_callee
and frame.return_address () != 0
and frame.offset () != 0xdead):
addr += frame.offset ()
frame = comrv_stack_frame (addr, mg_index_offset)
# Check for an uninitialised stack frame. We allow this case when
# performing stack unwinding as we need to be able to unwind at the
# point when the stack is in the process of being set up.
if (not allow_uninitialised_frame_p
and (frame.return_address () == 0
and frame.token () == 0)):
raise RuntimeError ("uninitialized comrv stack frame")
# Check to see if we have hit the top of the ComRV Stack.
if (frame.offset () == 0xdead):
raise RuntimeError ("hit top of ComRV stack (2)")
# Adjust the ComRV stack pointer; ADDR is now the ComRV stack
# pointer as it was in the previous frame.
addr += frame.offset ()
# Grab the return address from the ComRV stack. This can be
# the address of an overlay, or non-overlay function.
ra = frame.return_address ()
return self._get_primary_storage_area_ra (ra, addr), addr
def _unwind_before_comrv_exit (self, pending_frame, labels):
# Check we're in the valid range of $pc values for this unwinder.
pc = pending_frame.read_register ("pc").cast (self.void_ptr_t)
assert (pc > labels.comrv_ret_from_callee_context_switch
and pc < labels.comrv_exit)
# This disassembly deliberately starts early so we can figure
# out the stack adjustment that is required.
regs = comrv_disassemble_and_analyse (labels.comrv_igonr_caller_thunk_stack_frame,
pc)
# Now we can calculate the stack pointer value for this frame-id.
sp = pending_frame.read_register ("sp")
if (regs["sp"].type == "pv_register" and regs["sp"].reg == "sp"):
sp = int (sp) - regs["sp"].addend
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
sp = gdb.Value (sp).cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
# Read the current values for $t3 and $ra.
t3 = pending_frame.read_register ("t3").cast (self.void_ptr_t)
ra = int (pending_frame.read_register ("ra").cast (self.void_ptr_t))
# The token or address we're going to retunr too will have
# been placed into $a0 already if this unwinder is reached,
# but within the scope of this unwinder the value in $a0 is
# moved back into $ra.
if (not (regs["a0"].type == "pv_register" and regs["a0"].reg == "ra")
and (ra == int (pc))):
ra = int (pending_frame.read_register ("a0").cast (self.void_ptr_t))
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
cache_start = ovly_data.cache ().start_address ()
cache_end = ovly_data.cache ().end_address ()
if (ra >= cache_start and ra < cache_end):
ra = self._get_primary_storage_area_ra (ra, int (t3))
ra = gdb.Value (ra).cast (self.void_ptr_t)
unwind_info.add_saved_register ("sp", sp)
unwind_info.add_saved_register ("pc", ra)
unwind_info.add_saved_register ("t3", t3)
unwind_info.add_saved_register ("t4", pending_frame.read_register ("t4"))
return unwind_info
def _unwind_through_comrv_stack (self, pending_frame, labels):
# Build the unwind info. No stack adjustment is needed here,
# the stack pointer has been restored to the value it had when
# comrvEntry was called.
sp = pending_frame.read_register ("sp").cast (self.void_ptr_t)
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
# Find the values of the registers in the caller's frame and
# save them in the result:
t3 = pending_frame.read_register ("t3").cast (self.void_ptr_t)
prev_t4 = t3
ra, t3 = self._unwind (int (t3))
unwind_info.add_saved_register("pc", gdb.Value (ra).cast (self.void_ptr_t))
unwind_info.add_saved_register("t3", gdb.Value (t3).cast (self.void_ptr_t))
unwind_info.add_saved_register("t4", prev_t4)
unwind_info.add_saved_register("sp", sp)
# Return the result:
return unwind_info
def _unwind_early_common (self, pending_frame, labels):
pc = pending_frame.read_register ("pc").cast (self.void_ptr_t)
# Disassemble from the start of comrvEntry up to the current
# value of the program counter. Use this disassembly to
# analyse the current state of the machine, and apply our
# understanding of how ComRV operates to this state in order
# to build a picture of how to unwind from here.
regs = comrv_disassemble_and_analyse (labels.comrv_entry, pc)
# Create UnwindInfo. Usually the frame is identified by the
# stack pointer and the program counter. We try to be good
# and always use the stack pointer as it was at the start of
# this frame.
sp = pending_frame.read_register ("sp")
if (regs["sp"].type == "pv_register"
and regs["sp"].reg == "sp"):
sp = int (sp) - regs["sp"].addend
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
sp = gdb.Value (sp).cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
# Setup unwinding for the `s` registers. This logic is correct in a
# non-rtos world, but is not going to be correct once we start seeing
# builds of ComRV for RTOS as these registers get spilt to the stack.
unwind_info.add_saved_register ("fp", pending_frame.read_register ("fp"))
unwind_info.add_saved_register ("s1", pending_frame.read_register ("s1"))
unwind_info.add_saved_register ("s2", pending_frame.read_register ("s2"))
unwind_info.add_saved_register ("s3", pending_frame.read_register ("s3"))
unwind_info.add_saved_register ("s4", pending_frame.read_register ("s4"))
unwind_info.add_saved_register ("s5", pending_frame.read_register ("s5"))
unwind_info.add_saved_register ("s6", pending_frame.read_register ("s6"))
unwind_info.add_saved_register ("s7", pending_frame.read_register ("s7"))
unwind_info.add_saved_register ("s8", pending_frame.read_register ("s8"))
unwind_info.add_saved_register ("s9", pending_frame.read_register ("s9"))
unwind_info.add_saved_register ("s10", pending_frame.read_register ("s10"))
unwind_info.add_saved_register ("s11", pending_frame.read_register ("s11"))
return unwind_info, regs, pc, sp
def _unwind_before_context_switch (self, pending_frame, labels):
unwind_info, regs, pc, sp = self._unwind_early_common (pending_frame,
labels)
# This is what we know about the range of possible pc values.
assert (pc >= labels.comrv_entry
and pc <= labels.comrv_entry_context_switch)
# Unwind $t4 which contains the "next" comrv stack frame pointer.
# Once we've setup the comrv stack frame we're about to use then the
# old "next" frame, is the frame we're now about to use.
if (regs["t4"].type == "pv_register"
and regs["t4"].reg == "t4"):
unwind_info.add_saved_register("t4", pending_frame.read_register ("t4"))
elif (regs["t2"].type == "pv_register"
and regs["t2"].reg == "t4"):
unwind_info.add_saved_register("t4", pending_frame.read_register ("t2"))
else:
t3 = int (pending_frame.read_register ("t3").cast (self.void_ptr_t))
t3 &= 0xfffffffe
t3 = gdb.Value (t3).cast (self.void_ptr_t)
unwind_info.add_saved_register("t4", t3)
# Unwinding of $t3 is tricky.
if (regs["t3"].type == "pv_unknown"
and regs["t2"].type == "pv_register"
and regs["t2"].reg == "t4"):
t2 = int (pending_frame.read_register ("t2").cast (self.void_ptr_t))
t3 = int (pending_frame.read_register ("t3").cast (self.void_ptr_t))
t3 = gdb.Value (t2 + t3).cast (self.void_ptr_t)
elif (regs["t3"].type == "pv_register"
and regs["t3"].reg == "t3"):
t3 = pending_frame.read_register ("t3")
elif ((regs["t3"].type == "pv_register"
and regs["t3"].reg == "t4")
or (regs["t2"].type == "pv_unknown"
and regs["t3"].type == "pv_unknown"
and regs["t4"].type == "pv_unknown")):
t3 = int (pending_frame.read_register ("t3").cast (self.void_ptr_t))
t3 &= 0xfffffffe
ra, t3 = self._unwind (t3, True)
# The return address we just fetched is not valid as the actual
# return address might not yet have been written to the stack.
# Set ra to None here, just to ensure this doesn't get used be
# accident.
ra = None
# Pack the t3 integer back into a Value object.
t3 = gdb.Value (t3).cast (self.void_ptr_t)
unwind_info.add_saved_register("t3", t3)
# If the return address is in the cache region then we need to
# map the cache address to the address within the storage area
# that this cache region represented.
ra = int (pending_frame.read_register ("ra"))
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
cache_start = ovly_data.cache ().start_address ()
cache_end = ovly_data.cache ().end_address ()
if (ra >= cache_start and ra < cache_end):
ra = self._get_primary_storage_area_ra (ra, int (t3))
ra = gdb.Value (ra).cast (self.void_ptr_t)
unwind_info.add_saved_register("pc", ra)
unwind_info.add_saved_register("sp", sp)
return unwind_info
def _unwind_before_invoke_callee (self, pending_frame, labels):
unwind_info, regs, pc, sp = self._unwind_early_common (pending_frame,
labels)
# This is what we know about the range of possible pc values.
assert (pc > labels.comrv_entry_context_switch
and pc <= labels.comrv_invoke_callee)
# Find the values of the registers in the caller's frame and save them
# in the result. The stack-pointer is super easy as the early unwind
# logic figured this out for us. The other two values need to be
# pulled from the comrv stack.
unwind_info.add_saved_register("sp", sp)
t3 = pending_frame.read_register ("t3").cast (self.void_ptr_t)
prev_t4 = t3
ra, t3 = self._unwind (int (t3))
unwind_info.add_saved_register("pc", gdb.Value (ra).cast (self.void_ptr_t))
unwind_info.add_saved_register("t3", gdb.Value (t3).cast (self.void_ptr_t))
unwind_info.add_saved_register("t4", prev_t4)
return unwind_info
def _unwind_before_ret_from_callee_context_switch (self, pending_frame, labels):
# This is what we know about the range of possible pc values.
pc = int (pending_frame.read_register ("pc").cast (self.void_ptr_t))
assert (pc >= labels.comrv_igonr_caller_thunk_stack_frame
and pc <= labels.comrv_ret_from_callee_context_switch)
# First thing we need to do is figure out the stack pointer at entry
# to this function.
#
# For the region we're looking at the stack pointer does get adjusted,
# but at the start of the block the stack pointer should be correct.
regs = comrv_disassemble_and_analyse (labels.comrv_igonr_caller_thunk_stack_frame,
pc)
# Check we understand the current contents of the stack pointer.
if (regs["sp"].type != "pv_register" or regs["sp"].reg != "sp"):
return None
sp = int (pending_frame.read_register ("sp").cast (self.void_ptr_t))
sp = sp - regs["sp"].addend
sp = gdb.Value (sp).cast (self.void_ptr_t)
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
unwind_info.add_saved_register("sp", sp)
if (regs["t3"].type == "pv_register" and regs["t3"].reg == "t5"):
prev_t3 = pending_frame.read_register ("t3")
else:
prev_t3 = pending_frame.read_register ("t5")
unwind_info.add_saved_register("t3", prev_t3)
# If the return address is in the cache region then we need to
# map the cache address to the address within the storage area
# that this cache region represented.
ra = int (pending_frame.read_register ("ra"))
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
cache_start = ovly_data.cache ().start_address ()
cache_end = ovly_data.cache ().end_address ()
if (ra >= cache_start and ra < cache_end):
ra = self._get_primary_storage_area_ra (ra, int (prev_t3))
ra = gdb.Value (ra).cast (self.void_ptr_t)
unwind_info.add_saved_register("pc", ra)
if (regs["t4"].type == "pv_register" and regs["t4"].reg == "t3"):
unwind_info.add_saved_register("t4", pending_frame.read_register ("t4"))
else:
unwind_info.add_saved_register("t4", pending_frame.read_register ("t3"))
return unwind_info
def _unwind_at_return (self, pending_frame, labels):
# Check where we are.
pc = int (pending_frame.read_register ("pc").cast (self.void_ptr_t))
assert (pc == labels.comrv_exit)
# We are at the return point. All registers that are going to be
# restored should have been restored by now.
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
sp = pending_frame.read_register ("sp").cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
unwind_info.add_saved_register("sp", sp)
unwind_info.add_saved_register("pc", pending_frame.read_register ("ra"))
unwind_info.add_saved_register("t3", pending_frame.read_register ("t3"))
unwind_info.add_saved_register("t4", pending_frame.read_register ("t4"))
return unwind_info
def _unwind_before_comrv_end (self, pending_frame, labels):
# Check where we are.
pc = int (pending_frame.read_register ("pc").cast (self.void_ptr_t))
assert (pc > labels.comrv_exit and pc < labels.comrv_end)
# Build the unwind info.
sp = pending_frame.read_register ("sp").cast (self.void_ptr_t)
frame_pc = gdb.Value (labels.comrv_entry).cast (self.void_ptr_t)
unwind_info = pending_frame.create_unwind_info (self.frame_id (sp, frame_pc))
prev_t3 = pending_frame.read_register ("t3").cast (self.void_ptr_t)
# If the return address is in the cache region then we need to
# map the cache address to the address within the storage area
# that this cache region represented.
ra = int (pending_frame.read_register ("ra"))
ovly_data = overlay_data.fetch ()
if (not ovly_data.comrv_initialised ()):
raise RuntimeError ("ComRV is not initialised")
cache_start = ovly_data.cache ().start_address ()
cache_end = ovly_data.cache ().end_address ()
if (ra >= cache_start and ra < cache_end):
ra = self._get_primary_storage_area_ra (ra, int (prev_t3))
ra = gdb.Value (ra).cast (self.void_ptr_t)
unwind_info.add_saved_register("pc", ra)
#t3 = pending_frame.read_register ("t3").cast (self.void_ptr_t)
t4 = pending_frame.read_register ("t4").cast (self.void_ptr_t)
unwind_info.add_saved_register("t3", prev_t3)
unwind_info.add_saved_register("t4", t4)
unwind_info.add_saved_register("sp", sp)
return unwind_info
def __call__ (self, pending_frame):
# Check if we are inside the core ComRV function that runs
# from the comrv entry label to the comrv exit label.
labels = overlay_data.fetch ().labels ()
# Lookup void pointer type again in case setting an executable changed
# it. If void_ptr_t has the wrong length, it will cause an invalid cast
# error.
self.void_ptr_t = gdb.lookup_type("void").pointer()
pc = pending_frame.read_register ("pc").cast (self.void_ptr_t)
if (not labels.enabled
or pc < labels.comrv_entry or pc >= labels.comrv_end):
return None
# We know that the user is unside the comrv entry point. At this
# point we are allowed to assume that comrv is initialised. If comrv
# is not initialised then the user has messed up (calling into comrv
# before initialisation) and so all bets are off.
global_mark_comrv_as_initialised ()
# For unwinding we split the ComRV assembler core into
# regions, and use a different unwinder for each region. When
# working on this unwinder please consider that the frame_id
# created in each different unwinder should use the $pc value
# for 'comrvEntry' and should use the $sp value as it was at
# the point of entry into 'comrvEntry'; this ensures that the
# frame-id will not change as the user single steps through
# the assembler core, a changing frame-id will confuse GDB.
#
# Each unwind handler should start with an assertion for the
# range of $pc values that it handles.
if (pc <= labels.comrv_entry_context_switch):
return self._unwind_before_context_switch (pending_frame, labels)
elif (pc <= labels.comrv_invoke_callee):
return self._unwind_before_invoke_callee (pending_frame, labels)
elif (pc < labels.comrv_igonr_caller_thunk_stack_frame):
return self._unwind_through_comrv_stack (pending_frame, labels)
elif (pc <= labels.comrv_ret_from_callee_context_switch):
return self._unwind_before_ret_from_callee_context_switch (pending_frame, labels)
elif (pc < labels.comrv_exit):
return self._unwind_before_comrv_exit (pending_frame, labels)
elif (pc == labels.comrv_exit):
return self._unwind_at_return (pending_frame, labels)
elif (pc < labels.comrv_end):
return self._unwind_before_comrv_end (pending_frame, labels)
raise RuntimeError ("no unwinder logic for address 0x%x" % pc)
# Register the ComRV stack unwinder.
gdb.unwinder.register_unwinder (None, comrv_unwinder (), True)
#=====================================================================#
# Final Setup
#=====================================================================#
#
# Perform some final initialisation steps.
#
#=====================================================================#
# Create an instance of the command class.
ParseComRV ()
comrv_prefix_command ()
comrv_status_command ()
comrv_stack_command ()
comrv_groups_command ()
# Create an instance of the overlay manager class.
MyOverlayManager ()
gdb.execute ("overlay auto", False, False)
# We need to disable use of the 'Z' packet while using overlays. The
# problem case is:
# 1. Breakpoint set using 'z' packet.
# 2. Overlay group is replaced with a new overlay.
# 3. GDB want's to remove the overlay using 'Z', but this is not
# possible.
#
# At step 3 the remote will remove the breakpoint and write back the
# original memory contents. Unfortunately the original contents are
# from the first overlay group, not the new group.
#
# It's posible to work around this if we only use memory breakpoints.
gdb.execute ("set remote software-breakpoint-packet off", False, False)
# This line is commented out, but left in at the request of WD.
# Turning this packet off will force GDB to make use of read/write
# software breakpoints, however, on some targets these don't appear to
# play well, probably with pipeline caching or some such.
#
# gdb.execute ("set remote software-breakpoint-packet off", False, False)
| [
"noreply@github.com"
] | noreply@github.com |
1b0049bfc9938f275b5f4738645dbe5761b7609c | dd0e9a9c0824a1808282e91925eec67fd185d27f | /project4/network/migrations/0016_auto_20210210_0612.py | a2f864a9a978a98cbaacd4f710f875183b1f68e7 | [] | no_license | daniel-mota/cs50W | 041830c1a9ce5ade7f742069fbc1bad9befd5a89 | 648b5cb551176f5756157194167e5088ec6f63bf | refs/heads/main | 2023-03-09T22:01:27.429039 | 2021-02-27T00:12:50 | 2021-02-27T00:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # Generated by Django 3.1.5 on 2021-02-10 06:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0015_auto_20210210_0543'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='userFollowers',
),
migrations.RemoveField(
model_name='user',
name='userFollowing',
),
migrations.CreateModel(
name='UserFollowing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
('userFollower', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='followers', to=settings.AUTH_USER_MODEL)),
('userFollowing', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='following', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"DanielMota@Daniels-MacBook-Pro-6.local"
] | DanielMota@Daniels-MacBook-Pro-6.local |
f87a42e88289c0861d982737506948170ed9af0a | 8bfc1058c3f72871d588693c6d843a67485ab2a2 | /Project Euler/crap.py | 13d6cba06cec6fd28a6b549cda22fdfaf5f20cb7 | [] | no_license | Kylar42/wvup | f357bcf3da2c8c80b0af76a3f6ad71d508c9c844 | d60a7dfe719a380ba22aa685133411d1a0f58ffb | refs/heads/master | 2021-01-01T16:39:26.301329 | 2014-09-30T21:13:33 | 2014-09-30T21:13:33 | 3,193,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | import math
crap = pow(2, 52)
print crap
| [
"kylar42@gmail.com"
] | kylar42@gmail.com |
58b7f2c696ee6df680f34658e112ba3ceb045e99 | 4503c155a0252eea7f4c80ec499999a8b52bc8b6 | /nntool/model/sequential.py | 8a9984b8e4fd3d8c6aec1d4526eff9e3e02fa3b0 | [
"MIT"
] | permissive | NLP-Deeplearning-Club/nntool | e76f7be29dd184be18a6fde509b89918a8692639 | 1bbf0a20c7526d423f351ba9a854902a669d3713 | refs/heads/master | 2020-12-03T01:42:44.316321 | 2017-07-12T16:08:30 | 2017-07-12T16:08:30 | 95,854,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | from nntool.abc.modelabc import ModelABC
import numpy as np
class Sequential(ModelABC):
"""序列模型,这个是keras中的概念,将模型理解为层的堆叠
"""
_layers = []
_trained = False
def add(self,layer:'layer'):
self._layers.append(layer)
@property
def trained(self):
"""是否已经训练过"""
return self._trained
def train(self,trainner):
"""训练模型"""
trainner(self)
self._trained = True
def fit(self,dev_x,dev_y):
"""测试在dev数据集上的效果"""
total = 0
correct = 0
for i in range(len(dev_y)):
total += 1
if self.predict(dev_x[i]).argmax() == dev_y[i].argmax():
correct += 1
correct_rate = correct/total
print('total:{total},corrcet:{correct},Correct rate:{correct_rate}'.format(
total=total,correct=correct,correct_rate=correct_rate))
def _forward(self,x,i=0):
"""前向运算"""
#print("forward {i} layer".format(i=i))
if i == len(self._layers):
#print('result:{re}'.format(re=x))
return x
else:
y = self._layers[i].forward(x)
i += 1
#print('result:{re}'.format(re=y))
return self._forward(y,i)
def predict_probability(self,x_test):
result = self._forward(x_test)
return result
def predict(self,x_test):
"""预测数据"""
probabilitys = self.predict_probability(x_test)
maxindex = probabilitys.argmax()
result = np.array([True if i == maxindex else False for i in range(
len(probabilitys))])
return result
| [
"hsz1273327@gmail.com"
] | hsz1273327@gmail.com |
1c044ba4743b38aac2ab64ae6c77a3240977fd40 | b5a6487105c530ca02d973e78aefaa7703de556e | /heechan/testProj/testProj/asgi.py | 0869363eaf90b22ef3beb10abdf3b4a5a6fdc5b2 | [] | no_license | jaehoonkimm/likelion-crawling_bgm | 885d7d126af85d601fb07b6dabf7132ea88351a7 | 997406413b3abb50fa6bc4fc6a45dee0f45ac131 | refs/heads/yebeen | 2022-12-02T09:14:31.685585 | 2020-08-18T12:51:10 | 2020-08-18T12:51:10 | 279,583,449 | 1 | 1 | null | 2020-08-03T12:43:02 | 2020-07-14T12:48:54 | Jupyter Notebook | UTF-8 | Python | false | false | 409 | py | """
ASGI config for testProj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testProj.settings')
application = get_asgi_application()
| [
"kimhecan@gmail.com"
] | kimhecan@gmail.com |
5a85f68337da49fec9d664ec55a0ccab7bb51369 | fdcb2cdee4d5b398eed4eefc830213234e3e83a5 | /00_DataCamp/07_Functions/error_handling/more_error_handling.py | 5d65abc9f7d11cc8d26ffaba9af4fec231b1c483 | [] | no_license | daftstar/learn_python | be1bbfd8d7ea6b9be8407a30ca47baa7075c0d4b | 4e8727154a24c7a1d05361a559a997c8d076480d | refs/heads/master | 2021-01-20T08:53:29.817701 | 2018-01-15T22:21:02 | 2018-01-15T22:21:02 | 90,194,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | # #####################################################
# ERROR HANDLING W/ TRY EXCEPT
# #####################################################
def shout_echo(word1, echo=1):
""" Concat echo copies of word1 and three exclamation
marks at end of sting """
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concat echo copies of word1
echo_word = word1 * echo
# Concat '!!!' to echo_word:
shout_words = echo_word + "!!!"
except:
print ("word1 must be a string and echo must be an integer")
return (shout_words)
print (shout_echo("particle", "ddj"))
# word1 must be a string and echo must be an integer
# #####################################################
# ERROR HANDLING BY RAISING AN ERROR
# #####################################################
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Raise an error with raise
if echo < 0:
raise ValueError('echo must be greater than 0')
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_word
shout_word = echo_word + '!!!'
# Return shout_word
return shout_word
# Call shout_echo
shout_echo("particle", echo=2) # change echo to negative value
| [
"nikdaftary@gmail.com"
] | nikdaftary@gmail.com |
1ab6e4c0f3db1e7bd0fcf7b43b83ad6d74ae09f0 | b05a3cd3de6618a5029d8b360a4185cd808fccef | /Tic-Tac-Toe with AI/task/tictactoe/tictactoe.py | 93c7a298ab050b109ba6b895cf65a5bf7eae6b53 | [] | no_license | VitaliiBandyl/Tic-Tac-Toe-with-AI | a7dd474e7197c727dff4588327c667534e5b6b8a | 4d3dfb332a9ea9483a370bce3eabc83aec001aa5 | refs/heads/master | 2022-11-15T00:11:59.112056 | 2020-07-03T14:14:37 | 2020-07-03T14:14:37 | 275,857,091 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,250 | py | import random
from abc import ABC, abstractmethod
from typing import List, Tuple
class TicTacToeField:
"""Game field"""
def __init__(self):
self.field = [[' ' for _ in range(3)] for _ in range(3)]
self.turn = 'X'
def print_game_field(self):
"""Prints game_field field to console"""
print('---------')
for row in self.field:
print('|', end=' ')
for element in row:
print(element, end=' ')
print('|', end='\n')
print('---------')
def get_empty_cells(self) -> List[Tuple]:
"""Finds all empty cells and returns it"""
empty_cells = []
for index_row, value_row in enumerate(self.field):
for index_cell, value_cell in enumerate(value_row):
if value_cell == ' ':
empty_cells.append((index_row, index_cell))
return empty_cells
def move(self, coordinates: Tuple[int, int]):
"""Makes a move"""
x = coordinates[0]
y = coordinates[1]
self.field[x][y] = self.turn
self.next_turn()
def undo(self, coordinates: Tuple[int, int]):
"""Cancels the move"""
x = coordinates[0]
y = coordinates[1]
self.field[x][y] = ' '
self.next_turn()
def next_turn(self):
"""Ends the current player’s turn and setups state for the next."""
self.turn = 'X' if self.turn == 'O' else 'O'
def check_winner(self) -> str:
"""Check who wins"""
if self._win_condition('O'):
return 'O'
elif self._win_condition('X'):
return 'X'
elif not self.get_empty_cells():
return 'Draw'
def _win_condition(self, elem: str) -> bool:
"""Check wins conditions"""
triple = [elem, elem, elem]
row = self.field
column = [[c[j] for c in self.field] for j in range(3)]
diagonal = [row[i] for i, row in enumerate(self.field)]
back_diagonal = [row[2 - i] for i, row in enumerate(self.field)]
return any((triple in row, triple in column, diagonal == triple, back_diagonal == triple))
class AbstractPlayer(ABC):
"""Abstract player class for inheritance by other players"""
def __init__(self, game_field: [TicTacToeField], mark: str):
self.game_field = game_field
self.coordinates = None
self.mark = mark
self.opponent_mark = 'X' if self.mark == 'O' else 'O'
@abstractmethod
def make_move(self):
pass
class AI(AbstractPlayer):
"""AI player"""
def __init__(self, game_field: [TicTacToeField], mark: str, difficult: str):
super(AI, self).__init__(game_field, mark)
self.difficult = difficult
def make_move(self):
"""AI makes a move"""
if self.difficult == 'easy':
self.easy_move()
elif self.difficult == 'medium':
self.medium_move()
elif self.difficult == 'hard':
self.hard_move()
print(f'Making move level "{self.difficult}"')
def easy_move(self):
"""Makes a move of easy difficulty. Random move"""
empty_cells = game_field.get_empty_cells()
self.coordinates = random.choice(empty_cells)
self.game_field.move(self.coordinates)
def medium_move(self):
"""
Makes a move of medium difficulty.
If it can win in one move (if it has two in a row), it places a third to get three in a row and win.
If the opponent can win in one move, it plays the third itself to block the opponent to win.
Otherwise, it makes a easy (random) move.
"""
for mark in (self.mark, self.opponent_mark):
if self._get_priority_cell(mark):
self.game_field.move(self.coordinates)
return
self.easy_move()
def hard_move(self):
"""Finds the best move based on MiniMax Algorithm in Game Theory"""
best_score = -2
best_move = None
for move in self.game_field.get_empty_cells():
if len(self.game_field.get_empty_cells()) == 9:
return self.easy_move()
self.game_field.move(move)
score = self.mini_max(False)
self.game_field.undo(move)
if score > best_score:
best_score = score
best_move = move
self.game_field.move(best_move)
def _get_priority_cell(self, mark: str) -> bool:
"""Get priority cell, return True if get it otherwise False"""
# check for horizontal items in a row as two mark and empty one
for row in range(len(self.game_field.field)):
if self.game_field.field[row].count(mark) == 2 and self.game_field.field[row].count(
' ') == 1:
self.coordinates = [row, self.game_field.field[row].index(' ')]
return True
# check for vertical items in a row as two mark and empty one
for col in range(len(self.game_field.field[0])):
column = [self.game_field.field[row][col] for row in
range(len(self.game_field.field))]
if column.count(mark) == 2 and column.count(' ') == 1:
self.coordinates = [column.index(' '), col]
return True
# check the elements in a row diagonally from the upper left corner
diagonal = [self.game_field.field[i][i] for i in range(len(self.game_field.field))]
if diagonal.count(mark) == 2 and diagonal.count(' ') == 1:
idx = diagonal.index(' ')
self.coordinates = [idx, idx]
return True
# check the elements in a row diagonally from the lower left corner
diagonal = [self.game_field.field[i][len(
self.game_field.field) - 1 - i] for i in range(len(self.game_field.field))]
if diagonal.count(mark) == 2 and diagonal.count(' ') == 1:
idx = diagonal.index(' ')
self.coordinates = [idx, len(self.game_field.field) - 1 - idx]
return True
return False
def mini_max(self, is_max_turn: bool) -> int:
"""MiniMax Algorithm in Game Theory"""
result = game_field.check_winner()
if result == self.mark:
return 1
elif result == self.opponent_mark:
return -1
elif result == 'Draw':
return 0
scores = []
for move in self.game_field.get_empty_cells():
self.game_field.move(move)
scores.append(self.mini_max(not is_max_turn))
self.game_field.undo(move)
return max(scores) if is_max_turn else min(scores)
class User(AbstractPlayer):
"""Real player"""
CONVERT_COORDINATES = {
('1', '3'): (0, 0), ('2', '3'): (0, 1), ('3', '3'): (0, 2),
('1', '2'): (1, 0), ('2', '2'): (1, 1), ('3', '2'): (1, 2),
('1', '1'): (2, 0), ('2', '1'): (2, 1), ('3', '1'): (2, 2),
}
def make_move(self):
"""User makes a move"""
while True:
self.coordinates = tuple(input('Enter the coordinates: ').split())
validation_result = self.validate_coordinates(self.coordinates)
if validation_result == 'Not a Number':
print('You should enter numbers!')
continue
elif validation_result == 'Incorrect coordinates':
print('Coordinates should be from 1 to 3!')
continue
elif validation_result == 'Cell is occupied':
print('This cell is occupied! Choose another one!')
continue
else:
return self.game_field.move(self.coordinates)
def validate_coordinates(self, coordinates: Tuple) -> str:
"""Input validation. Returns validation result"""
try:
x = int(coordinates[0])
y = int(coordinates[1])
except (ValueError, IndexError):
return 'Not a Number'
if not (1 <= x <= 3 and 1 <= y <= 3):
return 'Incorrect coordinates'
self.coordinates = self.CONVERT_COORDINATES.get(coordinates)
if self.coordinates not in self.game_field.get_empty_cells():
return 'Cell is occupied'
return 'Valid'
class GameFactory:
"""Creates setup game"""
def __init__(self, player_1: [User, AI], player_2: [User, AI], game_field: [TicTacToeField]):
self.player_1 = User(game_field, mark='X') if player_1 == 'user' else AI(game_field, 'X', player_1)
self.player_2 = User(game_field, mark='O') if player_2 == 'user' else AI(game_field, 'O', player_2)
self.turn = self.player_1
def next_move(self):
"""Makes a move and passes the move to another player."""
self.turn.make_move()
self.turn = self.player_1 if self.turn == self.player_2 else self.player_2
class ParametersError(Exception):
pass
def parse_command():
"""Parse user commands for setup game"""
game_configuration = ('user', 'easy', 'medium', 'hard')
while True:
command = input('Input command: ').split(' ')
try:
if len(command) == 1 and command[0] == 'exit':
return 'break'
elif len(command) == 3 and command[0] == 'start':
if command[1] in game_configuration and command[2] in game_configuration:
player_1 = command[1]
player_2 = command[2]
return player_1, player_2
else:
raise ParametersError
except (IndexError, ParametersError):
print('Bad parameters!')
if __name__ == '__main__':
while True:
command = parse_command()
if command == 'break':
break
player_1 = command[0]
player_2 = command[1]
game_field = TicTacToeField()
game = GameFactory(player_1, player_2, game_field)
game_field.print_game_field()
while True:
game.next_move()
game_field.print_game_field()
winner = game_field.check_winner()
if winner:
print(winner if winner == 'Draw' else f'{winner} wins!')
break
| [
"vitaliibandyl@gmail.com"
] | vitaliibandyl@gmail.com |
dd1ec64e97316958fca29fdf779f144563f25a42 | 8e51b3bf1c4cd1febfb1b9d0c12ce75aa33eb1f8 | /代码/展示项目代码/Locating/asgi.py | ab72fec9cc5453352b4fa974ed86fd019c23aa24 | [] | no_license | GuardingDog/eventLocationExtraction | cf63783b01360e303848de99192610adac24af7c | 9df0f80d1d71cedea08450633e8363aa0fb03950 | refs/heads/main | 2023-02-09T06:22:53.158080 | 2020-12-25T06:25:32 | 2020-12-25T06:25:32 | 324,302,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for Locating project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Locating.settings')
application = get_asgi_application()
| [
"sun2387353@163.com"
] | sun2387353@163.com |
173495b72c3e825cf142425e2d4e4510a87a9075 | 80361b1bf21066abb83efff65d748c49a354fc30 | /20171205.py | ab9e694e6f37b636c93859ceba55a62d25d5254a | [] | no_license | wsdxl/python-20171126 | fa973ade255010ba64d8a13830fdc2988305d560 | df99efc95a9552c2435d0327397a4399adefbfa5 | refs/heads/master | 2021-09-05T01:17:51.546966 | 2018-01-23T09:54:06 | 2018-01-23T09:54:06 | 112,087,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | list=['hello','world',2,6]
list2=[1,3,5,6]
list3=['how','are','you']
list4=['thinks','i','am','fine']
list5=[7,8,9,0]
# print(list[0])
# print(list2[1:3])
# print(list3[0])
# list3[0]='who'
# print(list3[0])
# print(list)
# del list[2]
# print(list)
# print(len(list))
# print(list.__len__())
# print(list2+list5)
# print(list3+list4)
print(list+list2)
print( | [
"506615839@qq.com"
] | 506615839@qq.com |
2037b65f41e66d5efd97fb4037f35830d3fbc814 | b1c578ce83d94848a1c2ec0bcb91ae791ef419cd | /src/ggrc/migrations/versions/20180319122658_679480cbd712_add_risk_propagation_roles.py | ed07384e4deaf9cf32f022902852e518f9698b63 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | zdqf/ggrc-core | 0d1575557af3c49980fe6dbad586d045ad73d5ad | 29dea12d189bc6be21006369efc0aae617bbab6f | refs/heads/master | 2020-03-27T19:29:00.536374 | 2018-08-28T15:29:56 | 2018-08-28T15:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add risk propagation roles
Create Date: 2018-03-19 12:26:58.016090
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from ggrc.migrations.utils import acr_propagation
from ggrc.migrations.utils import acr_propagation_constants as const
# revision identifiers, used by Alembic.
revision = '679480cbd712'
down_revision = '3e667570f21f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
acr_propagation.propagate_roles(const.GGRC_RISKS_PROPAGATION)
def downgrade():
"""Remove Risk propagated roles"""
for object_type, roles_tree in const.GGRC_RISKS_PROPAGATION.items():
acr_propagation.remove_propagated_roles(object_type, roles_tree.keys())
| [
"zidarsk8@gmail.com"
] | zidarsk8@gmail.com |
54cad0c1b793017f42dfe9dadc1607ef7b373dd9 | adcef785263b5aabff50a5ebac5deccc90ff5c43 | /apps/orders/migrations/0031_auto_20210210_2159.py | f416025059e53cdb5a74aa7d7f7d7949e1b84f9a | [] | no_license | jtaningco/Quicklink | f2af25939a28feb3d7fa9b68c66226963161d6d6 | 4cbaf71a2606b123913d52f693fceff97d339a98 | refs/heads/main | 2023-04-06T22:41:50.995658 | 2021-03-27T09:04:37 | 2021-03-27T09:04:37 | 323,894,414 | 0 | 0 | null | 2021-03-18T15:01:53 | 2020-12-23T12:19:10 | HTML | UTF-8 | Python | false | false | 467 | py | # Generated by Django 3.0.6 on 2021-02-10 13:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0030_auto_20210210_2125'),
]
operations = [
migrations.AlterField(
model_name='order',
name='delivery_date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 13, 21, 59, 52, 813536), null=True),
),
]
| [
"64685600+jtaningco@users.noreply.github.com"
] | 64685600+jtaningco@users.noreply.github.com |
94fc23f50dc7a7ff000b88944de86dd6618b33fc | d29d44c9f97a8d7ea0bcf6bdbff22c0d32061bef | /FLaREON/__init__.py | dcce541d9b18f210ebbb3ee26963e9eee4507a61 | [
"MIT"
] | permissive | sidgurun/FLaREON | a8d648a01f5a76709caa67a5ea473aea7a0c66cc | 58672e49bb6fdf5ad2802fa02310724b9c3198ac | refs/heads/master | 2021-06-09T12:40:29.285875 | 2019-09-03T21:48:16 | 2019-09-03T21:48:16 | 155,904,404 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,941 | py | import os
import os.path
#import imp
import sys
import shutil
import urllib
import numpy as np
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
import pickle
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Check_if_DATA_files_are_found():
this_dir, this_filename = os.path.split(__file__)
Bool_1 = True
arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'
with open( arxiv_with_file_names ) as fd:
for line in fd.readlines():
arxiv_name = line.strip('\n')
Bool_1 = Bool_1 * os.path.isfile( this_dir + '/DATA/' + arxiv_name )
return Bool_1
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Download_data():
this_dir, this_filename = os.path.split(__file__)
arxiv_with_file_names = this_dir + '/DATA/List_of_DATA_files'
file_where_to_store_data = this_dir + '/DATA/'
print( 'This package is stored in ', this_dir , '(Please, note that we are not spying you.)' )
http_url = 'http://www.cefca.es/people/~sidgurung/ShouT/ShouT/DATA/'
testfile = urllib.URLopener()
with open( arxiv_with_file_names ) as fd:
for line in fd.readlines():
arxiv_name = line.strip('\n')
print( 'Downloaing...' , http_url + arxiv_name )
testfile.retrieve( http_url + arxiv_name , arxiv_name )
print( '--> Done!' )
print( 'Moving Downloaded file to' , file_where_to_store_data )
shutil.move( arxiv_name , file_where_to_store_data + arxiv_name )
print( '--> Done' )
if Check_if_DATA_files_are_found():
print( '\nHey man, looks like everything is done! That is brilliant!' )
else:
print( 'This is weird... We just downloaded everthing but the files are not found...Exiting...')
print( 'Error. Human is dead. Mismatch.')
sys.exit()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_machine_fesc( Machine , property_name , Geometry , INSIDE_BICONE=True ):
'''
This functions gives you the trained model that you want to use.
'''
Machine_Set = [ 'KN' , 'Grad' , 'Tree' , 'Forest' ]
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'thin' , 'wind' , 'Bicone_X_Slab' ]
Property_Set = [ 'KKK' , 'CCC' , 'LLL' , 'f_esc' ]
assert property_name in Property_Set , "Houston we've got a problem, Error Code = 23452345.7523"
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
this_dir, this_filename = os.path.split(__file__)
filename_root = 'DATA/finalized_model_'+ geo_code[index] +'_f_esc_' + Machine + '_' + property_name
if Geometry == 'Bicone_X_Slab':
filename_root += '_Inside_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.sav'
filename = os.path.join(this_dir, filename)
loaded_model = pickle.load(open(filename, 'rb'))
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr ):
NH18 = 10 ** ( logNH_Arr - 18 )
# Old MCMC
#c11 = 10**(2.109185)
#c12 = -10**(2.745113)
#c13 = 10**(2.954875)
#c21 = 10**(-1.785661)
#c22 = -10**(-0.7302781)
#c23 = 10**(-0.1554347)
#c24 = -10**(0.1517145)
#c3 = 10**(-0.03688789)
#c4 = 10**(-1.556422)
#New MCMC
c11 = 10**(1.90526)
c12 = -10**(2.0399)
c13 = 10**(2.34829)
c21 = 10**(-3.138837)
c22 = -10**(-1.92151)
c23 = 10**(-1.1860205000000001)
c24 = -10**(-0.1480042)
c3 = 10**(0.0530715)
c4 = 10**(-2.743455)
C1 = ( ( np.log10(NH18) ) ** 2 ) * c11 + np.log10(NH18) * c12 + c13
y = np.log10(NH18)
C2 = c21*y*y*y + c22*y*y + c23*y + c24
C3 = c3
C4 = c4
K1 = C1 * ( V_Arr ** C2 )
K2 = C3 * ( V_Arr ** C4 )
fesc = 1. / np.cosh( np.sqrt( K1 * ( ta_Arr ** K2 ) ) )
return fesc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr ):
NH18 = 10 ** ( logNH_Arr - 18 )
# New MCMC
c11 = 10**(0.4852541)
c12 = 10**(-0.2006394)
c21 = 10**(-1.912059)
c22 = -10**(-0.6380347)
c3 = 10**(0.046314074999999996)
c4 = 10**(-1.782037)
C1 = c11 * ( NH18 ** c12 )
C2 = c21 * np.log10( NH18 )**2 + c22 * np.log10(NH18) #+ c23
C3 = c3
C4 = c4
K1 = C1 * V_Arr ** C2
K2 = C3 * V_Arr ** C4
fesc = 1./ np.cosh( np.sqrt( K1 * ta_Arr ** K2 ) )
return fesc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Analytic( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' ]
assert Geometry in Geometry_Set , 'The geometry ' + Geometry + ' is nor supported in MODE=Analytic , only Thin_Shell and Galactic_Wind'
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry == 'Thin_Shell' :
f_esc_Arr = Analytic_f_esc_Thin_Shell( V_Arr , logNH_Arr , ta_Arr )
if Geometry == 'Galactic_Wind' :
f_esc_Arr = Analytic_f_esc_Wind( V_Arr , logNH_Arr , ta_Arr )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def fesc_of_ta_Thin_and_Wind( ta , CCC , KKK ):
f_esc = 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )
return f_esc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def fesc_of_ta_Bicone( ta , CCC , KKK , LLL ):
f_esc = LLL * 1./np.cosh( np.sqrt( CCC * (ta**KKK) ) )
return f_esc
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Machine_Parameter( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm='Tree' ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
Coor_matrix = np.zeros( len(V_Arr) * 2 ).reshape( len(V_Arr) , 2 )
Coor_matrix[ : , 0 ] = V_Arr
Coor_matrix[ : , 1 ] = logNH_Arr
CCC_machine = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry )
KKK_machine = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry )
CCC_model_Arr = CCC_machine.predict( Coor_matrix )
KKK_model_Arr = KKK_machine.predict( Coor_matrix )
f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_model_Arr , KKK_model_Arr )
if Geometry in [ 'Bicone_X_Slab' ] :
assert not Inside_Bicone_Arr is None , 'Inside_Bicone_Arr give is None or none Inside_Bicone_Arr was given. If the geometry is Bicone_X_Slab it is necesary to give a Inside_Bicone_Arr'
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(V_Arr) )
##################
if sum( Inside_Bicone_Arr ) > 0 :
Coor_matrix = np.zeros( sum( Inside_Bicone_Arr ) * 2 ).reshape( sum( Inside_Bicone_Arr ) , 2 )
Coor_matrix[ : , 0 ] = V_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ Inside_Bicone_Arr ]
CCC_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry , INSIDE_BICONE=True )
KKK_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry , INSIDE_BICONE=True )
LLL_machine_in = load_machine_fesc( Machine_Learning_Algorithm , 'LLL' , Geometry , INSIDE_BICONE=True )
CCC_model_in_Arr = CCC_machine_in.predict( Coor_matrix )
KKK_model_in_Arr = KKK_machine_in.predict( Coor_matrix )
LLL_model_in_Arr = LLL_machine_in.predict( Coor_matrix )
f_esc_Arr[ Inside_Bicone_Arr ] = fesc_of_ta_Bicone( ta_Arr[ Inside_Bicone_Arr ] , CCC_model_in_Arr , KKK_model_in_Arr , LLL_model_in_Arr )
##################
if sum( ~Inside_Bicone_Arr ) > 0 :
Coor_matrix = np.zeros( sum( ~Inside_Bicone_Arr ) * 2 ).reshape( sum( ~Inside_Bicone_Arr ) , 2 )
Coor_matrix[ : , 0 ] = V_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ ~Inside_Bicone_Arr ]
CCC_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'CCC' , Geometry , INSIDE_BICONE=False )
KKK_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'KKK' , Geometry , INSIDE_BICONE=False )
LLL_machine_out = load_machine_fesc( Machine_Learning_Algorithm , 'LLL' , Geometry , INSIDE_BICONE=False )
CCC_model_out_Arr = CCC_machine_out.predict( Coor_matrix )
KKK_model_out_Arr = KKK_machine_out.predict( Coor_matrix )
LLL_model_out_Arr = LLL_machine_out.predict( Coor_matrix )
f_esc_Arr[ ~Inside_Bicone_Arr ] = fesc_of_ta_Bicone( ta_Arr[ ~Inside_Bicone_Arr ] , CCC_model_out_Arr , KKK_model_out_Arr , LLL_model_out_Arr )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Machine_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm='Tree' ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
loaded_model = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry )
Coor_matrix = np.zeros( len(V_Arr) * 3 ).reshape( len(V_Arr) , 3 )
Coor_matrix[ : , 0 ] = V_Arr
Coor_matrix[ : , 1 ] = logNH_Arr
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)
f_esc_Arr = loaded_model.predict( Coor_matrix )
if Geometry in [ 'Bicone_X_Slab' ] :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(V_Arr) )
##################
if sum( Inside_Bicone_Arr ) > 0 :
loaded_model_inside = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry , INSIDE_BICONE=True )
Coor_matrix = np.zeros( sum( Inside_Bicone_Arr ) * 3 ).reshape( sum( Inside_Bicone_Arr ) , 3 )
Coor_matrix[ : , 0 ] = V_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ Inside_Bicone_Arr ]
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)[ Inside_Bicone_Arr ]
f_esc_Arr[ Inside_Bicone_Arr ] = loaded_model_inside.predict( Coor_matrix )
##################
if sum( ~Inside_Bicone_Arr ) > 0 :
loaded_model_outside = load_machine_fesc( Machine_Learning_Algorithm , 'f_esc' , Geometry , INSIDE_BICONE=False )
Coor_matrix = np.zeros( sum( ~Inside_Bicone_Arr ) * 3 ).reshape( sum( ~Inside_Bicone_Arr ) , 3 )
Coor_matrix[ : , 0 ] = V_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 1 ] = logNH_Arr[ ~Inside_Bicone_Arr ]
Coor_matrix[ : , 2 ] = np.log10(ta_Arr)[ ~Inside_Bicone_Arr ]
f_esc_Arr[ ~Inside_Bicone_Arr ] = loaded_model_outside.predict( Coor_matrix )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Linear_2D_interpolator( X_prob , Y_prob , X_grid , Y_grid , Field_in_grid ):
INDEX_X = np.where( ( X_grid < X_prob ) )[0][-1]
INDEX_Y = np.where( ( Y_grid < Y_prob ) )[0][-1]
dX_grid = X_grid[ INDEX_X + 1 ] - X_grid[ INDEX_X ]
dY_grid = Y_grid[ INDEX_Y + 1 ] - Y_grid[ INDEX_Y ]
X_min_grid = X_grid[ INDEX_X ]
Y_min_grid = Y_grid[ INDEX_Y ]
Xprob_X0 = ( X_prob - X_min_grid ) * 1. / dX_grid
Yprob_Y0 = ( Y_prob - Y_min_grid ) * 1. / dY_grid
Area1 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 )
Area2 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 )
Area3 = ( Xprob_X0 ) * ( Yprob_Y0 )
Area4 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 )
Field1 = Field_in_grid[ INDEX_X , INDEX_Y ]
Field2 = Field_in_grid[ INDEX_X , INDEX_Y + 1 ]
Field3 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 ]
Field4 = Field_in_grid[ INDEX_X + 1 , INDEX_Y ]
Field_at_the_prob_point = Area1 * Field1 + Area2 * Field2 + Area3 * Field3 + Area4 * Field4
return Field_at_the_prob_point
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Linear_3D_interpolator( X_prob , Y_prob , Z_prob , X_grid , Y_grid , Z_grid , Field_in_grid ):
INDEX_X = np.where( ( X_grid < X_prob ) )[0][-1]
INDEX_Y = np.where( ( Y_grid < Y_prob ) )[0][-1]
INDEX_Z = np.where( ( Z_grid < Z_prob ) )[0][-1]
dX_grid = X_grid[ INDEX_X + 1 ] - X_grid[ INDEX_X ]
dY_grid = Y_grid[ INDEX_Y + 1 ] - Y_grid[ INDEX_Y ]
dZ_grid = Z_grid[ INDEX_Z + 1 ] - Z_grid[ INDEX_Z ]
X_min_grid = X_grid[ INDEX_X ]
Y_min_grid = Y_grid[ INDEX_Y ]
Z_min_grid = Z_grid[ INDEX_Z ]
Xprob_X0 = ( X_prob - X_min_grid ) * 1. / dX_grid
Yprob_Y0 = ( Y_prob - Y_min_grid ) * 1. / dY_grid
Zprob_Z0 = ( Z_prob - Z_min_grid ) * 1. / dZ_grid
Vol1 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol2 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol3 = ( 1. - Xprob_X0 ) * ( Yprob_Y0 ) * ( Zprob_Z0 )
Vol4 = ( 1. - Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( Zprob_Z0 )
Vol5 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol6 = ( Xprob_X0 ) * ( Yprob_Y0 ) * ( 1. - Zprob_Z0 )
Vol7 = ( Xprob_X0 ) * ( Yprob_Y0 ) * ( Zprob_Z0 )
Vol8 = ( Xprob_X0 ) * ( 1. - Yprob_Y0 ) * ( Zprob_Z0 )
Field1 = Field_in_grid[ INDEX_X , INDEX_Y , INDEX_Z ]
Field2 = Field_in_grid[ INDEX_X , INDEX_Y + 1 , INDEX_Z ]
Field3 = Field_in_grid[ INDEX_X , INDEX_Y + 1 , INDEX_Z + 1 ]
Field4 = Field_in_grid[ INDEX_X , INDEX_Y , INDEX_Z + 1 ]
Field5 = Field_in_grid[ INDEX_X + 1 , INDEX_Y , INDEX_Z ]
Field6 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 , INDEX_Z ]
Field7 = Field_in_grid[ INDEX_X + 1 , INDEX_Y + 1 , INDEX_Z + 1 ]
Field8 = Field_in_grid[ INDEX_X + 1 , INDEX_Y , INDEX_Z + 1 ]
Field_at_the_prob_point = Vol1 * Field1 + Vol2 * Field2 + Vol3 * Field3 + Vol4 * Field4 + Vol5 * Field5 + Vol6 * Field6 + Vol7 * Field7 + Vol8 * Field8
return Field_at_the_prob_point
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_Grid_fesc( Geometry , MODE , INSIDE_BICONE=True ):
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' ]
MODE_Set = [ 'Parameters' , 'values' ]
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
filename_root = 'DATA/Dictonary_'+ geo_code[index] +'_Grid_f_esc_' + MODE
if Geometry == 'Bicone_X_Slab':
filename_root += '_Inside_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.npy'
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, filename)
loaded_model = np.load( filename , allow_pickle=True ).item()
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary , Geometry ):
V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]
logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]
logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]
Grid = Grid_Dictionary[ 'Grid' ]
N_objects = len( V_Arr )
CCC_Arr_evaluated = np.zeros( N_objects )
KKK_Arr_evaluated = np.zeros( N_objects )
###################
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
for INDEX in range( 0 , N_objects ):
CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )
f_esc_Arr = fesc_of_ta_Thin_and_Wind( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated )
###################
if Geometry in [ 'Bicone_X_Slab' ] :
LLL_Arr_evaluated = np.zeros( N_objects )
for INDEX in range( 0 , N_objects ):
CCC_Arr_evaluated[ INDEX ] , KKK_Arr_evaluated[ INDEX ] , LLL_Arr_evaluated[ INDEX ] = Linear_2D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , Grid )
f_esc_Arr = fesc_of_ta_Bicone( ta_Arr , CCC_Arr_evaluated , KKK_Arr_evaluated , LLL_Arr_evaluated )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , Grid_Dictionary ):
V_Arr_Grid = Grid_Dictionary[ 'V_Arr' ]
logNH_Arr_Grid = Grid_Dictionary[ 'logNH_Arr' ]
logta_Arr_Grid = Grid_Dictionary[ 'logta_Arr' ]
Grid = Grid_Dictionary[ 'Grid' ]
logta_Arr = np.log10( ta_Arr )
N_objects = len( V_Arr )
f_esc_Arr_evaluated = np.zeros( N_objects )
for INDEX in range( 0 , N_objects ):
f_esc_Arr_evaluated[ INDEX ] = Linear_3D_interpolator( V_Arr[INDEX] , logNH_Arr[INDEX] , logta_Arr[INDEX] , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid )
return f_esc_Arr_evaluated
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Interpolation_Values( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_DICTIONAY = load_Grid_fesc( Geometry , 'values' )
f_esc_Arr = Interpolate_fesc_Arrays_3D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY )
if Geometry in [ 'Bicone_X_Slab' ] and not Inside_Bicone_Arr is None :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(logNH_Arr) )
##############
if sum( Inside_Bicone_Arr ) > 0:
DATA_DICTIONAY_in = load_Grid_fesc( Geometry , 'values' , INSIDE_BICONE=True )
f_esc_Arr[ Inside_Bicone_Arr ] = Interpolate_fesc_Arrays_3D_grid( V_Arr[Inside_Bicone_Arr] , logNH_Arr[Inside_Bicone_Arr] , ta_Arr[Inside_Bicone_Arr] , DATA_DICTIONAY_in )
##############
if sum( ~Inside_Bicone_Arr ) > 0:
DATA_DICTIONAY_out = load_Grid_fesc( Geometry , 'values' , INSIDE_BICONE=False )
f_esc_Arr[ ~Inside_Bicone_Arr ] = Interpolate_fesc_Arrays_3D_grid( V_Arr[~Inside_Bicone_Arr] , logNH_Arr[~Inside_Bicone_Arr] , ta_Arr[~Inside_Bicone_Arr] , DATA_DICTIONAY_out )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc_Interpolation_Parameters( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , Machine_Learning_Algorithm=None ):
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = np.atleast_1d( V_Arr )
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_DICTIONAY = load_Grid_fesc( Geometry , 'Parameters' )
f_esc_Arr = Interpolate_f_esc_Arrays_2D_grid( V_Arr , logNH_Arr , ta_Arr , DATA_DICTIONAY , Geometry )
if Geometry in [ 'Bicone_X_Slab' ] :
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
f_esc_Arr = np.zeros( len(logNH_Arr) )
##############
DATA_DICTIONAY_in = load_Grid_fesc( Geometry , 'Parameters' , INSIDE_BICONE=True )
f_esc_Arr[ Inside_Bicone_Arr ] = Interpolate_f_esc_Arrays_2D_grid( V_Arr[Inside_Bicone_Arr] , logNH_Arr[Inside_Bicone_Arr] , ta_Arr[Inside_Bicone_Arr] , DATA_DICTIONAY_in , Geometry )
##############
DATA_DICTIONAY_out = load_Grid_fesc( Geometry , 'Parameters' , INSIDE_BICONE=False )
f_esc_Arr[ ~Inside_Bicone_Arr ] = Interpolate_f_esc_Arrays_2D_grid( V_Arr[~Inside_Bicone_Arr] , logNH_Arr[~Inside_Bicone_Arr] , ta_Arr[~Inside_Bicone_Arr] , DATA_DICTIONAY_out , Geometry )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr , MODE ):
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = V_Arr.astype(float)
logNH_Arr = logNH_Arr.astype(float)
ta_Arr = ta_Arr.astype(float)
bool1 = np.isfinite( V_Arr )
bool2 = np.isfinite( logNH_Arr )
bool3 = np.isfinite( ta_Arr )
mask_good = bool1 * bool2 * bool3
assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'
V_Arr_used = V_Arr[ mask_good ]
logNH_Arr_used = logNH_Arr[ mask_good ]
ta_Arr_used = ta_Arr[ mask_good ]
#bool4 = ( V_Arr_used <= 100 ) * ( logNH_Arr_used >= 20.5 )
#V_Arr_used[ bool4 ] = 100.00001
#============================================#
if Geometry in [ 'Bicone_X_Slab' ] :
bool1 = V_Arr_used < 100.0
bool2 = logNH_Arr_used >= 20.5
#aux_V_arr = logNH_Arr_used*-40 + 920.
aux_V_arr = logNH_Arr_used * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Arr_used > aux_V_arr
V_Arr_used[ bool1 * bool2 * bool_aux ] = 100.000001
logNH_Arr_used[ bool1 * bool2 *~ bool_aux ] = 20.5
#============================================#
bool5 = V_Arr_used <= 10.00
V_Arr_used[ bool5 ] = 10.000001
bool6 = V_Arr_used >= 1000
V_Arr_used[ bool6 ] = 999.9999
bool7 = logNH_Arr_used <= 17.0
logNH_Arr_used[ bool7 ] = 17.0000001
bool8 = logNH_Arr_used >= 22.0
logNH_Arr_used[ bool8 ] = 21.9999
if MODE=='Raw':
bool9 = ta_Arr_used <= 10**(-2.5)
ta_Arr_used[ bool9 ] = 10**(-2.499999)
bool10 = ta_Arr_used >= 10**(-0.25)
ta_Arr_used[ bool10 ] = 10**(-0.2500001)
if Inside_Bicone_Arr is None : Inside_Bicone_Arr = np.ones( len(V_Arr) )
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
In_Bool_used = Inside_Bicone_Arr[ mask_good ]
return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None , MODE='Parametrization' , Algorithm='Intrepolation' , Machine_Learning_Algorithm='Tree' ):
'''
Return the Lyman alpha escape fraction for a given outflow properties.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Arr : 1-D sequence of float
Array with the expansion velocity of the outflow. The unit
are km/s.
logNH_Arr : 1-D sequence of float
Array with the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Arr : 1-D sequence of float
Array with the dust optic depth of the outflow.
Inside_Bicone_Arr : optional 1-D sequence of bool
An Array with booleans, indicating if the bicone is face-on
or edge-on. If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face on is
np.cos( np.pi/4 ).
MODE : optional string
Set the mode in which the escape fraction is computed. It can be:
Analytic : it uses an analytic equation fitted to the output of the RT MC code.
Parametrization : it computes the escape fraction using a function that depends on the
dust optical depts as in Neufeld et al. 1990.
Raw : it uses directly the output of the RT MC code.
Default = 'Parametrization'
Algorithm : optional string
Set how the escape fraction is computed. If MODE='Analytic' then this varialbe is useless.
Intrepolation : Direct lineal interpolation.
Machine_Learning : uses machine learning algorithms
Default = 'Intrepolation'
Machine_Learning_Algorithm : optial string
Set the machine learning algorith used. Available:
Tree : decision tree
Forest : random forest
KN : KN
Default = 'Tree'
.. versionadded:: 0.0.3
Returns
-------
lines_Arr : 1-D sequence of float
The Lyman alpha escape fraction for V_Arr[i] ,
logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].
'''
assert MODE in [ 'Parametrization' , 'Raw' , 'Analytic'] , 'The requested mode ' + MODE + ' is not available. The modes supported are : Parametrization , Raw , Analytic'
assert Algorithm in [ 'Intrepolation' , 'Machine_Learning' ] , 'The requested algorithm ' + Algorithm + ' is not available. The algorithms supported are : Intrepolation , Machine_Learning'
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good = pre_treatment_f_esc( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr , MODE )
f_esc_Arr = np.zeros( len( mask_good ) ) * np.nan
if MODE == 'Parametrization' :
if Algorithm == 'Intrepolation' :
funtion_to_use = RT_f_esc_Interpolation_Parameters
if Algorithm == 'Machine_Learning':
funtion_to_use = RT_f_esc_Machine_Parameter
if MODE == 'Raw' :
if Algorithm == 'Intrepolation' :
funtion_to_use = RT_f_esc_Interpolation_Values
if Algorithm == 'Machine_Learning':
funtion_to_use = RT_f_esc_Machine_Values
if MODE == 'Analytic' :
funtion_to_use = RT_f_esc_Analytic
f_esc_Arr[ mask_good ] = funtion_to_use( Geometry , V_Arr_used , logNH_Arr_used , ta_Arr_used , Inside_Bicone_Arr=In_Bool_used , Machine_Learning_Algorithm=Machine_Learning_Algorithm )
return f_esc_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def define_RT_parameters():
T4 = 1. # = 10000. / 1e4
nu0 = 2.46777 * 1.e15 #3. * 10.**8 / (1215.67 * (10**(-10)))
Vth = 12.85 * np.sqrt(T4) # lo he comentado porque sqrt(1) = 1
Dv = Vth * nu0 *1. / ( 3 * (10**5))
return nu0 , Dv
#==============================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def convert_x_into_lamda( x ):
nu0 , Dv = define_RT_parameters()
return( 3. * 1.e8 / ( x * Dv + nu0) )
def convert_lamda_into_x( lamda ):
nu0 , Dv = define_RT_parameters()
return( (( 3. * 1.e8 / lamda) -nu0 ) / Dv )
#==============================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def load_Grid_Line( Geometry , INSIDE_BICONE=None ):
'''
Return the dictionary with all the properties of the grid where the lines were run.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
INSIDE_BICONE : optional boolean
This is useless if the geometry is not Bicone_X_Slab.
If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face
on is np.cos( np.pi/4 ).
Returns
-------
loaded_model : Dictionary
This dictonary have all the information of the grid.
Entries:
'V_Arr' : Array of velocity expansions used.[km/s]
'logNH_Arr' : Array of logarithm of the column density. [c.g.s.]
'logta_Arr' : Array of logarithm of the dust optical depth.
'x_Arr' : Array of frequency in Doppler units.
'Grid' : Array with the output of the RT MC code LyaRT:
loaded_model['Grid'][i,j,k,:] has the line profile evaluated in loaded_model['x_Arr']
with outflow velocity loaded_model['V_Arr'][i] , logarithm of the neutral hydrogen
column density loaded_model['logNH_Arr'][j] and logarithm of dust optical depth
loaded_model['logta_Arr'][k]
'''
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
Geometry_Set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
dirs_Set = [ 'Thin_Shell/' , 'Galactic_Wind/' , 'Bicone_X_Slab/' ]
geo_code = [ 'Thin_Shell' , 'Wind' , 'Bicone_X_Slab' ]
index = np.where( Geometry == np.array(Geometry_Set) )[0][0]
filename_root = 'DATA/Dictonary_'+ geo_code[index] +'_Grid_Lines'
if Geometry == 'Bicone_X_Slab':
filename_root += '_In_Bicone_' + str(INSIDE_BICONE)
filename = filename_root + '.npy'
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, filename)
loaded_model = np.load( filename , allow_pickle=True ).item()
return loaded_model
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_Lines_Arrays_3D_grid( V_Arr , logNH_Arr , logta_Arr , x_Arr , Grid_Dictionary ):
Grid_Line = Grid_Dictionary['Grid']
V_Arr_Grid = Grid_Dictionary['V_Arr']
x_Arr_Grid = Grid_Dictionary['x_Arr']
logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']
logta_Arr_Grid = Grid_Dictionary['logta_Arr']
lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) )
for i in range( 0 , len( V_Arr ) ):
aux_line = Linear_3D_interpolator( V_Arr[i] , logNH_Arr[i] , logta_Arr[i] , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid_Line )
axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=0.0 , right=0.0 )
Integral = np.trapz( axu_line_1 , x_Arr )
lines_Arr[i] = np.absolute( axu_line_1 * 1. / Integral )
return lines_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Interpolate_Lines_Arrays_3D_grid_MCMC( V_Value , logNH_Value , logta_Value , x_Arr , Grid_Dictionary ):
Grid_Line = Grid_Dictionary['Grid']
V_Arr_Grid = Grid_Dictionary['V_Arr']
x_Arr_Grid = Grid_Dictionary['x_Arr']
logNH_Arr_Grid = Grid_Dictionary['logNH_Arr']
logta_Arr_Grid = Grid_Dictionary['logta_Arr']
aux_line = Linear_3D_interpolator( V_Value , logNH_Value , logta_Value , V_Arr_Grid , logNH_Arr_Grid , logta_Arr_Grid , Grid_Line )
axu_line_1 = np.interp( x_Arr , x_Arr_Grid , aux_line , left=0.0 , right=0.0 )
Integral = np.trapz( axu_line_1 , x_Arr )
axu_line_1 = np.absolute( axu_line_1 * 1. / Integral )
return axu_line_1
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value ):
bool1 = np.isfinite( V_Value )
bool2 = np.isfinite( logNH_Value )
bool3 = np.isfinite( ta_Value )
Bool_good = bool1 * bool2 * bool3
if Geometry in [ 'Bicone_X_Slab' ]:
if V_Value <= 100.0 and logNH_Value >= 20.5 :
#aux_V = logNH_Value*-40 + 920.
aux_V = logNH_Value * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Value > aux_V
if bool_aux : V_Value = 100.0001
if not bool_aux : logNH_Value = 20.4999999
if V_Value <= 10.0 : V_Value = 10.000001
if V_Value >= 1000.0 : V_Value = 999.999999
if logNH_Value < 17.0 : logNH_Value = 17.000001
if logNH_Value >= 22.0 : logNH_Value = 21.999999
if ta_Value < 10**(-3.75 ) : ta_Value = 10**(-3.749999999)
if ta_Value >= 10**(-0.125) : ta_Value = 10**(-0.125000001)
return V_Value , logNH_Value , ta_Value , Bool_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_Line_Profile_MCMC( Geometry , wavelength_Arr , V_Value , logNH_Value , ta_Value , DATA_LyaRT ):
'''
Return one and only one Lyman alpha line profile for a given outflow properties.
This function is especial to run MCMCs or PSO.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Value : float
Value of the expansion velocity of the outflow. The unit
are km/s.
logNH_Value : float
Value of the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Value : float
Value of the dust optic depth of the outflow.
DATA_LyaRT : Dictionay
This dictonary have all the information of the grid.
This dictionary can be loaded with the function :
load_Grid_Line, for example:
DATA_LyaRT = load_Grid_Line( 'Thin_Shell' )
Returns
-------
lines_Arr : 1-D sequence of float
The Lyman alpha line profile.
'''
V_Value , logNH_Value , ta_Value , Bool_good = pre_treatment_Line_profile_MCMC( Geometry , V_Value , logNH_Value , ta_Value )
if Bool_good :
logta_Value = np.log10( ta_Value )
x_Arr = convert_lamda_into_x( wavelength_Arr )
line_Arr = Interpolate_Lines_Arrays_3D_grid_MCMC( V_Value , logNH_Value , logta_Value , x_Arr , DATA_LyaRT )
if not Bool_good :
line_Arr = np.ones( len(x_Arr) ) * np.nan
return line_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr ):
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
V_Arr = V_Arr.astype(float)
logNH_Arr = logNH_Arr.astype(float)
ta_Arr = ta_Arr.astype(float)
bool1 = np.isfinite( V_Arr )
bool2 = np.isfinite( logNH_Arr )
bool3 = np.isfinite( ta_Arr )
mask_good = bool1 * bool2 * bool3
assert sum( mask_good ) != 0 , 'All the V-logNH-ta combinations are np.nan, -np.inf or np.inf'
V_Arr_used = V_Arr[ mask_good ]
logNH_Arr_used = logNH_Arr[ mask_good ]
ta_Arr_used = ta_Arr[ mask_good ]
#============================================#
if Geometry in ['Thin_Shell' , 'Bicone_X_Slab']:
bool1 = V_Arr_used < 100.0
bool2 = logNH_Arr_used >= 20.5
aux_V_arr = logNH_Arr_used * ( -100/1.5 ) + ( 100 - ( -100/1.5 * 20.5 ) )
bool_aux = V_Arr_used > aux_V_arr
V_Arr_used[ bool1 * bool2 * bool_aux ] = 100.000001
logNH_Arr_used[ bool1 * bool2 *~ bool_aux ] = 20.499999
#============================================#
bool5 = V_Arr_used <= 10
V_Arr_used[ bool5 ] = 10.000001
bool6 = V_Arr_used >= 1000
V_Arr_used[ bool6 ] = 999.9999
bool7 = logNH_Arr_used <= 17.0
logNH_Arr_used[ bool7 ] = 17.0000001
bool8 = logNH_Arr_used >= 22.0
logNH_Arr_used[ bool8 ] = 21.9999
bool9 = ta_Arr_used <= 10**(-3.75)
ta_Arr_used[ bool9 ] = 10**(-3.74999999)
bool10 = ta_Arr_used >= 10**(-0.125)
ta_Arr_used[ bool10 ] = 10**(-0.125000000001)
if Inside_Bicone_Arr is None : Inside_Bicone_Arr = np.ones( len(V_Arr) )
else: Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
In_Bool_used = Inside_Bicone_Arr[ mask_good ]
return V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def RT_Line_Profile( Geometry , wavelength_Arr , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr=None ):
'''
Return the Lyman alpha line profile for a given outflow properties.
Parameters
----------
Geometry : string
The outflow geometry to use: Options: 'Thins_Shell',
'Galactic_Wind' , 'Bicone_X_Slab'.
wavelength_Arr : 1-D sequence of floats
Array with the wavelength vales where the line
profile is computed. The units are meters, i.e.,
amstrongs * 1.e-10.
V_Arr : 1-D sequence of float
Array with the expansion velocity of the outflow. The unit
are km/s.
logNH_Arr : 1-D sequence of float
Array with the logarithim of the outflow neutral hydrogen
column density. The units of the colum density are in c.g.s,
i.e, cm**-2.
ta_Arr : 1-D sequence of float
Array with the dust optic depth of the outflow.
Inside_Bicone_Arr : optional 1-D sequence of bool
This is useless if the geometry is not Bicone_X_Slab.
An Array with booleans, indicating if the bicone is face-on
or edge-on. If True then the bicone is face-on. If false the
bicone is edge-on. The probability of being face on is
np.cos( np.pi/4 ).
.. versionadded:: 0.0.3
Returns
-------
lines_Arr : 2-D sequence of float
The Lyman alpha line profiles. lines_Arr[i] is the line profile
computed at the wavelengths wavelength_Arr for wich V_Arr[i] ,
logNH_Arr[i] , ta_Arr[i] , Inside_Bicone_Arr[i].
'''
assert Geometry in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] , 'The requested geoemtry ' + Geometry + ' is not available. The geometries supported are : Thin_Shell , Galactic_Wind , Bicone_X_Slab'
V_Arr = np.atleast_1d( V_Arr )
logNH_Arr = np.atleast_1d( logNH_Arr )
ta_Arr = np.atleast_1d( ta_Arr )
x_Arr = convert_lamda_into_x( wavelength_Arr )
lines_Arr = np.zeros( len(V_Arr) * len( x_Arr ) ).reshape( len(V_Arr) , len( x_Arr ) ) * np.nan
V_Arr_used , logNH_Arr_used , ta_Arr_used , In_Bool_used , mask_good = pre_treatment_Line_profile( Geometry , V_Arr , logNH_Arr , ta_Arr , Inside_Bicone_Arr )
logta_Arr_used = np.log10( ta_Arr_used )
##############################
if Geometry in [ 'Thin_Shell' , 'Galactic_Wind' ] :
DATA_LyaRT = load_Grid_Line( Geometry )
tmp_lines_Arr = Interpolate_Lines_Arrays_3D_grid( V_Arr_used , logNH_Arr_used , logta_Arr_used , x_Arr , DATA_LyaRT )
##############################
if Geometry in [ 'Bicone_X_Slab' ] :
assert not Inside_Bicone_Arr is None , 'Error. Human is dead. Mismatch. \nIf the goemetry is Bicone_X_Slab then it is compulsory to define Inside_Bicone_Arr when colling this function.'
Inside_Bicone_Arr = np.atleast_1d( Inside_Bicone_Arr )
tmp_lines_Arr = np.zeros( len( V_Arr_used ) * len( x_Arr ) ).reshape( len( V_Arr_used ) , len( x_Arr ) )
DATA_LyaRT_in = load_Grid_Line( Geometry , INSIDE_BICONE=True )
DATA_LyaRT_out = load_Grid_Line( Geometry , INSIDE_BICONE=False )
lines_Arr_in = Interpolate_Lines_Arrays_3D_grid( V_Arr_used[ In_Bool_used] , logNH_Arr_used[ In_Bool_used] , logta_Arr_used[ In_Bool_used] , x_Arr , DATA_LyaRT_in )
lines_Arr_out = Interpolate_Lines_Arrays_3D_grid( V_Arr_used[~In_Bool_used] , logNH_Arr_used[~In_Bool_used] , logta_Arr_used[~In_Bool_used] , x_Arr , DATA_LyaRT_out )
tmp_lines_Arr[ In_Bool_used] = lines_Arr_in
tmp_lines_Arr[~In_Bool_used] = lines_Arr_out
lines_Arr[ mask_good ] = tmp_lines_Arr
return lines_Arr
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Print_the_grid_edges():
print ''
print ' Hi,'
print ''
print ' The expanssion velocity V_exp and neutral hydrogen column density logNH are the same in the escape fraction and line profile grids. However, the optical depth of dust tau_a is different.'
print ''
print ' V_exp [ km/s ] = [ 0 , 10 , ... , 90 , 100 , 150 , 200 , ... , 950 , 1000 ]'
print ''
print ' Bicone_X_Slab :'
print ''
print ' For V_exp < 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 20.25 , 20.5 ]'
print ' '
print ' For V_exp >= 100 km/s the logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]'
print ''
print ' Thin_Shell and Galactic_Wind :'
print ''
print ' logNH [ cm**-2 ] = [ 17.0 , 17.25 , ... , 21.75 , 22.0 ]'
print ''
print ' '
print ' For the escape fraction : tau_a = [ -3. , -2. , -1.5 , -1.0 , -0.75 , -0.5 , -0.25 , -0.0 ]'
print ' '
print ' For the line profile : tau_a = [ -0.125 , -0.25 , -0.375 , -0.5 , -0.625 , -0.75 , -0.875 , -1.0 , -1.125 , -1.25 , -1.375 , -1.5 , -1.75 , -2.0 , -2.25 , -2.5 , -2.75 , -3.0 , -3.25 , -3.5 , -3.75 ]'
print ''
print ' Have a nice day!'
print ' El. PSY. CONGROO.'
print ''
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_1( ):
print '\nChecking if all the files are found...',
bool_files = Check_if_DATA_files_are_found()
print 'Done!'
if bool_files :
print ' Every file was found. that is great!'
if not bool_files :
print ' Missing files.... Let us download them... ;)'
Download_data()
print '\n Now that we are sure that the data is downloaded in your machine...'
print '\n Let us check every different configuration for computing the escape fraction and the line profiles.'
Geometry_set = [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]
ML_codes_set = [ 'Tree' , 'Forest' , 'KN' ]
MODE_set = [ 'Parametrization' , 'Raw' , 'Analytic' ]
Algorithm_set = [ 'Intrepolation' , 'Machine_Learning' ]
# Primero vamos a checkear que funciona las fracciones de escape
N_points = int( 1e4 )
V_Arr = np.random.rand( N_points ) * 1000 + 0.0
logNH_Arr = np.random.rand( N_points ) * 5 + 17.0
logta_Arr = np.random.rand( N_points ) * 4.5 - 4.0
In_Arr = np.random.rand( N_points ) > 0.5
print '\nComputing', N_points , 'random configurations of escape fraction with each algorithms...\n'
for Geo in Geometry_set:
for Mod in MODE_set :
if not Mod in [ 'Analytic' ]:
for Algo in Algorithm_set:
if Algo in [ 'Intrepolation' , 'Machine_Learning' ]:
if Algo == 'Machine_Learning' :
for machine in ML_codes_set :
try:
print ' Running : ' , Geo , Mod , Algo , machine ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo , Machine_Learning_Algorithm=machine)
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
if Algo != 'Machine_Learning' :
try:
print ' Running : ' , Geo , Mod , Algo ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr , MODE=Mod , Algorithm=Algo )
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
if Mod in [ 'Analytic' ]:
try:
print ' Running : ' , Geo , Mod ,
fff = RT_f_esc( Geo , V_Arr , logNH_Arr , 10**logta_Arr , MODE=Mod )
assert np.sum( np.isnan( fff ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
N_points = int( 1e3 )
print '\nComputing', N_points , 'random configurations of line profile with each algorithms...\n'
V_Arr = np.random.rand( N_points ) * 1000 + 0
logNH_Arr = np.random.rand( N_points ) * 5 + 17.0
logta_Arr = np.random.rand( N_points ) * 5.5 - 4.75
In_Arr = np.random.rand( N_points ) > 0.5
wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10
RUN_TEST_Lines = True
if RUN_TEST_Lines :
for Geo in Geometry_set:
print ' Running : ' , Geo ,
try:
qq = RT_Line_Profile( Geo , wavelength_Arr , V_Arr , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=In_Arr )
assert np.sum( np.isnan( qq ) ) == 0
print '--> Success!!'
except:
print '--> ERROR. HUMAN IS DEAD. MISMATCH!!'
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_2( ):
from pylab import *
print '\n Let us make some plots. This will show you just a glimpse of what LyaRT;Grid can do. Just wait for it...'
# Plot some nice line profiles
print '\n Plotting some line profiles...'
wavelength_Arr = np.linspace( 1215.68 - 20 , 1215.68 + 20 , 1000 ) * 1e-10
V_Arr = np.array( [ 10 , 50 , 100 , 200 , 300 ] )
logNH_Arr = np.array( [ 20.0 ] * len( V_Arr ) )
logta_Arr = np.array( [ -1. ] * len( V_Arr ) )
Inside_Bicone_Arr = np.zeros( len(V_Arr) ) == 0
cm = get_cmap( 'rainbow' )
for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ]:
qq = RT_Line_Profile( geo , wavelength_Arr , V_Arr , logNH_Arr , 10.**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr )
figure()
ax_ax = subplot(111)
for i in range( 0 ,len( V_Arr ) ):
ax_ax.plot( wavelength_Arr*1e10 , qq[i] , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )
texto = r'$\rm N_{H} = 10^{20} cm^{-2}$' + '\n' + r'$\rm \tau_{a} = 0.1$'
ax_ax.text( .95 , 0.45 , texto , verticalalignment='top', horizontalalignment='right', transform=ax_ax.transAxes, fontsize=20 )
ax_ax.set_title( r'$\rm Geometry = $' + geo , size=20 )
ax_ax.set_ylabel( r'$\rm Flux [a.u.]$' , size=20 )
ax_ax.set_xlabel( r'$\rm Wavelength [\AA]$' , size=20 )
ax_ax.set_xlim( 1212.5 , 1222.5 )
ax_ax.legend(loc=0)
print '\n Plotting some escape fractions...'
logta_Arr = np.linspace( -2 , 0.5 , 20 )
logNH_Arr = [20.0] * len( logta_Arr )
for geo in [ 'Thin_Shell' , 'Galactic_Wind' , 'Bicone_X_Slab' ] :
figure()
ax_ax = subplot(111)
for i in range( 0 , len(V_Arr) ):
V_Arr_tmp = [ V_Arr[i] ] * len( logta_Arr )
Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 0
f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)
ax_ax.plot( logta_Arr , f_esc , color=cm( i*1./( len(V_Arr) -1 ) ) , label=r'$\rm V_{exp} = '+ str(V_Arr[i]) +'km/s$ ' , lw=2 )
Inside_Bicone_Arr = np.zeros( len( logta_Arr ) ) == 1
f_esc = RT_f_esc( geo , V_Arr_tmp , logNH_Arr , 10**logta_Arr , Inside_Bicone_Arr=Inside_Bicone_Arr)
ax_ax.semilogy( logta_Arr , f_esc , '--' , color=cm( i*1./( len(V_Arr) -1 ) ) , lw=2 )
ax_ax.set_xlabel( r'$\rm \log \tau_a$' , size=20 )
ax_ax.set_ylabel( r'$f_{\rm esc} ^{\rm Ly \alpha} $' , size=20 )
texto = r'$\rm N_{H} = 10^{20} cm^{-2}$'
ax_ax.text( .5 , 0.05 , texto , verticalalignment='bottom', horizontalalignment='left', transform=ax_ax.transAxes, fontsize=20 )
ax_ax.set_title( r'$\rm Geometry = $' + geo , size=20 )
legend( loc=0 )
show()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
def Test_Installation( Make_Plots=True ):
import warnings
warnings.filterwarnings("ignore")
Test_1()
if Make_Plots : Test_2()
return
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
#====================================================================#
if __name__ == '__main__':
pass
| [
"sidgurung@cefca.es"
] | sidgurung@cefca.es |
7d727d63a6bed53b8120d60676ce73974e262685 | 2a7b38b5ffba0ed59e62a3ccc40420f9bb90952b | /shiptrack/apps.py | 264c943e6e20e9cd64eb2fc3b3a9cf0cbeeb0c16 | [] | no_license | tsyork/backoffice | fc2ec80b514c66aa169fec53bdb149763fe97fb1 | fb92335fc4af141db3d22907170629e022870c40 | refs/heads/master | 2020-03-17T22:36:32.554506 | 2018-05-18T23:51:55 | 2018-05-18T23:51:55 | 134,011,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class ShiptrackConfig(AppConfig):
name = 'shiptrack'
| [
"tsyork@gmail.com"
] | tsyork@gmail.com |
096919f23bdcda229ed1005472fe927f7b72a686 | 7947e1098705fd74a275fb7c5f518e6803699372 | /watchdocs.py | eeb255a437af8df8b2656576bca11759a9bf8425 | [] | no_license | akashmore/watchdocs | 6d9a7804b74e9cb73463aa97b914a35c0a9c312a | 3c765916daa1b1766c1cfcc294f1adfdac3bd1dc | refs/heads/master | 2021-04-15T13:36:00.315039 | 2018-05-02T05:41:29 | 2018-05-02T05:41:29 | 103,504,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,047 | py | from flask import *
from flask import Flask, render_template
import os, sys, json, nltk, gensim, pickle
from gensim import corpora, models, similarities
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from pymongo import MongoClient
from flask import send_from_directory
from rake_nltk import Rake
import textrazor
# database connection
client = MongoClient('localhost', 27017)
mydb = client['watchdoc']
collection = mydb['doccat']
UPLOAD_FOLDER = 'C://Users//champ//Documents//watchdocgit//upload'
# UPLOAD_FOLDER='D:\\Study\\Final Year Sem 1\\Project\\watchdocs-master\\upload'
# start app
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# index
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
# upload file
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
filename = secure_filename(file.filename)
# print(filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#for checking file count and tran model
fileCount = len([name for name in os.listdir(app.config['UPLOAD_FOLDER']) if os.path.isfile(os.path.join(app.config['UPLOAD_FOLDER'], name))])
if(fileCount%20==0):
filelist = list()
path = "C:\Users\champ\Documents\watchdocgit\Testdocuments"
for file in os.listdir(path):
filepath = path + '\\' + file
f = open(filepath, 'r')
filecontents = f.read()
filecontents = filecontents.split()
filelist.append(filecontents)
f.close()
model = gensim.models.Word2Vec(filelist, min_count=5, size=200, window=5, workers=4)
model.save("modelfile")
return redirect(url_for('categarize', filename=filename))
# categorize file
@app.route('/categarize/<filename>')
def categarize(filename):
finalcat = []
finalscore = []
textrazor.api_key = "f8656917eff9fdb7989aafbb22a8c8e1b74ebd076f1040c75de4dfcc"
client = textrazor.TextRazor(extractors=["entities", "topics"])
# client.set_cleanup_mode("cleanHTML")
path = app.config['UPLOAD_FOLDER'] + '//' + filename
client.set_classifiers(["textrazor_newscodes"])
#input_file = file(path).read().decode("ISO-8859-1")
input_file = file(path).read().decode("utf-8")
r = Rake()
r.extract_keywords_from_text(input_file)
#print(r.get_ranked_phrases())
startLines = input_file[0:100]
#print(startLines)
# os.system("LSA.py")
response = client.analyze(input_file)
entities = list(response.entities())
entities.sort(key=lambda x: x.relevance_score, reverse=True)
seen = set()
keywords = list()
info = list()
for entity in entities:
if entity.id not in seen:
#print (entity.id, entity.relevance_score, entity.confidence_score, entity.freebase_types)
seen.add(entity.id)
keywords.append(entity.id)
mydb.keywords.insert({"keywords": keywords, "name": filename})
print("--------------------------------------------")
topiclist = list()
for topic in response.topics():
if topic.score > 0.3:
#print (topic.label)
topiclist.append(topic.label)
mydb.topic.insert({"topic": topic.label})
print("------------------------------------------------------")
categorylist = list()
try:
for category in response.categories():
alterLabel = (category.label).split(">")
finalcat.append(alterLabel[-1])
finalscore.append(category.score)
k = finalcat[0]
s = finalscore[0]
print(category.label)
# print(alterLabel[-1])
# print category.score
categorylist.append(alterLabel[-1])
mydb.category.insert({"category": alterLabel[-1]})
mydb.doccat.insert({"classified": k, "Document": filename, "Score": s, "startLines": startLines})
mydb.record.insert(
{"name": filename, "description": [{"keywords": keywords, "topic": topiclist, "category": categorylist}]})
output = "Category : " + str(k)
return jsonify(result=output)
except:
return jsonify(result="unable to categarize")
@app.route('/search', methods=['POST', 'GET'])
def search():
jsonResultDocuments = {}
docScore = {}
resultdocuments = {}
if request.method == 'POST':
keyword = request.form['searchkeyword']
keyword = keyword.lower()
model = gensim.models.Word2Vec.load("modelfile")
try:
resultlist = model.most_similar(keyword)
#print(resultlist)
numberofresults = 7
# print "Words similar to searched keyword are"
for i in range(numberofresults):
print resultlist[i][0]
for document in collection.find():
category = document['classified'].lower()
if (keyword in category or category in keyword):
docScore[document['Document']] = document['Score'], document['startLines']
# print(docScore)
for i in range(len(resultlist)):
if (resultlist[i][0] in category or category in resultlist[i][0]):
docScore[document['Document']] = document['Score'], document['startLines']
# print(docScore)
for key, value in sorted(docScore.iteritems(), key=lambda (k, v): (v, k), reverse=True):
resultdocuments[key] = value[1]
except:
print("keywordnot found")
jsonResultDocuments["files"] = resultdocuments
if len(resultdocuments)==0:
resultOutput = 0
else:
resultOutput = 1
return redirect(url_for('searchResult',filejson=json.dumps(jsonResultDocuments),is_result = resultOutput))
@app.route('/searchResult/<filejson>/<is_result>')
def searchResult(filejson,is_result):
# print(filejson)
responejson = json.loads(filejson)
fileArray = responejson["files"]
show_result = is_result
# print(fileArray)
return render_template('search.html', files=fileArray,show_div=int(show_result))
# for reading files
@app.route('/fileread', methods=['GET', 'POST'])
def fileReading():
if request.method == 'GET':
requestFile = request.args.get('filename')
return send_from_directory(app.config['UPLOAD_FOLDER'], requestFile)
#for donloading files
@app.route('/filedownload', methods=['GET', 'POST'])
def fileDownloading():
if request.method == 'GET':
downloadFile = request.args.get('filename')
path = app.config['UPLOAD_FOLDER'] + '//' + downloadFile
return send_from_directory(app.config['UPLOAD_FOLDER'], downloadFile)
# main function
if __name__ == '__main__':
app.run(debug=True)
| [
"akashmore1997.am85@gmail.com"
] | akashmore1997.am85@gmail.com |
da82acbcd117e45f112e60a7143e73c3ac089d57 | 01c169a4b24581197adbcb7878cd8633f5181263 | /myvenv/Lib/site-packages/userroles/__init__.py | 7bfdc29b1eaf2dd2f150040d1f8eae9db953990a | [] | no_license | jerinzam/printoProject | 663c7a8914b1932d4e355c75ecda721f52f8092a | beeccbee92ca66040f74e01569fc124689f519b6 | refs/heads/master | 2021-01-10T09:01:09.751113 | 2015-10-02T07:43:31 | 2015-10-02T07:43:31 | 43,541,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
__version__ = '0.1.0'
_IMPORT_FAILED = "Could not import role profile '%s'"
_INCORRECT_ARGS = "USER_ROLES should be a list of strings and/or two-tuples"
def _import_class_from_string(class_path):
"""
Given a string like 'foo.bar.Baz', returns the class it refers to.
If the string is empty, return None, rather than raising an import error.
"""
if not class_path:
return None
module_path, class_name = class_path.rsplit('.', 1)
return getattr(importlib.import_module(module_path), class_name)
class Role(object):
"""
A single role, eg as returned by `roles.moderator`.
"""
def __init__(self, name):
self.name = name
def __unicode__(self):
return self.name
class Roles(object):
_roles_dict = None
@property
def roles_dict(self):
"""
Load list style config into dict of {role_name: role_class}
"""
if self._roles_dict is None:
self._roles_dict = {}
for item in self._config:
if isinstance(item, basestring):
# An item like 'manager'
self._roles_dict[item] = None
else:
# Anything else
raise ImproperlyConfigured(_INCORRECT_ARGS)
return self._roles_dict
@property
def choices(self):
"""
Return a list of two-tuples of role names, suitable for use as the
'choices' argument to a model field.
"""
return [(role, role) for role in self.roles_dict.keys()]
def __init__(self, config=None):
"""
By default the Roles object will be created using configuration from
the django settings file, but you can also set the configuration
explicitly, for example, when testing.
"""
self._config = config or getattr(settings, 'USER_ROLES', ())
def __getattr__(self, name):
"""
Handle custom properties for returning Role objects.
For example: `roles.moderator`
"""
if name in self.roles_dict.keys():
return Role(name=name)
else:
raise AttributeError("No such role exists '%s'" % name)
roles = Roles()
| [
"jerinzam@gmail.com"
] | jerinzam@gmail.com |
9c636a5cfb11bc65fdae5558924bb4c4ae5f6632 | fbabf6f4b411355c01289bf218b17f0a8628ee04 | /models/posenet.py | fd151a4cac3dbff45d5f57e1d14f66d67d57c588 | [
"BSD-3-Clause"
] | permissive | princeton-vl/pytorch_stacked_hourglass | 823978cb2a4a7c22a03fca67bf9fc0858cf03125 | ceedc14b9b8814ba641f4a00baa5d0af153588a9 | refs/heads/master | 2023-08-04T06:20:49.509962 | 2023-07-19T17:02:10 | 2023-07-19T17:02:10 | 207,642,604 | 455 | 110 | BSD-3-Clause | 2023-07-19T17:02:12 | 2019-09-10T19:10:52 | Python | UTF-8 | Python | false | false | 2,432 | py | import torch
from torch import nn
from models.layers import Conv, Hourglass, Pool, Residual
from task.loss import HeatmapLoss
class UnFlatten(nn.Module):
def forward(self, input):
return input.view(-1, 256, 4, 4)
class Merge(nn.Module):
def __init__(self, x_dim, y_dim):
super(Merge, self).__init__()
self.conv = Conv(x_dim, y_dim, 1, relu=False, bn=False)
def forward(self, x):
return self.conv(x)
class PoseNet(nn.Module):
def __init__(self, nstack, inp_dim, oup_dim, bn=False, increase=0, **kwargs):
super(PoseNet, self).__init__()
self.nstack = nstack
self.pre = nn.Sequential(
Conv(3, 64, 7, 2, bn=True, relu=True),
Residual(64, 128),
Pool(2, 2),
Residual(128, 128),
Residual(128, inp_dim)
)
self.hgs = nn.ModuleList( [
nn.Sequential(
Hourglass(4, inp_dim, bn, increase),
) for i in range(nstack)] )
self.features = nn.ModuleList( [
nn.Sequential(
Residual(inp_dim, inp_dim),
Conv(inp_dim, inp_dim, 1, bn=True, relu=True)
) for i in range(nstack)] )
self.outs = nn.ModuleList( [Conv(inp_dim, oup_dim, 1, relu=False, bn=False) for i in range(nstack)] )
self.merge_features = nn.ModuleList( [Merge(inp_dim, inp_dim) for i in range(nstack-1)] )
self.merge_preds = nn.ModuleList( [Merge(oup_dim, inp_dim) for i in range(nstack-1)] )
self.nstack = nstack
self.heatmapLoss = HeatmapLoss()
def forward(self, imgs):
## our posenet
x = imgs.permute(0, 3, 1, 2) #x of size 1,3,inpdim,inpdim
x = self.pre(x)
combined_hm_preds = []
for i in range(self.nstack):
hg = self.hgs[i](x)
feature = self.features[i](hg)
preds = self.outs[i](feature)
combined_hm_preds.append(preds)
if i < self.nstack - 1:
x = x + self.merge_preds[i](preds) + self.merge_features[i](feature)
return torch.stack(combined_hm_preds, 1)
def calc_loss(self, combined_hm_preds, heatmaps):
combined_loss = []
for i in range(self.nstack):
combined_loss.append(self.heatmapLoss(combined_hm_preds[0][:,i], heatmaps))
combined_loss = torch.stack(combined_loss, dim=1)
return combined_loss
| [
"cnris@vl-fb.eecs.umich.edu"
] | cnris@vl-fb.eecs.umich.edu |
e37ba6dd323fe1f352194c9517273785de8985bf | e0074687439148922b90e363d5d6b813edc3c7d0 | /CPFL.py | 1d904fd4ad0a5c3625262d278eaf9a179884fb51 | [] | no_license | beraldonico/Aquisicao-de-boletos-com-selenium | 047577f6596d7900e37e918e8f201f35d91de87c | f54cded24cc4da1090e0eeadfb202cc1f8adf409 | refs/heads/main | 2023-02-07T13:47:56.118530 | 2020-12-29T15:36:50 | 2020-12-29T15:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # Essencial to open browser
from selenium import webdriver
# Helps to find element
from selenium.webdriver.common.by import By
# Set wait rules
from selenium.webdriver.support.ui import WebDriverWait
# Set conditional rules
from selenium.webdriver.support import expected_conditions as EC
# Essencial to close terminal
import os
import signal
import time
import glob
# Essencial to control mouse and keyboard
from pynput.mouse import Button
from pynput.mouse import Controller
# Variables
URL = 'https://servicosonline.cpfl.com.br/agencia-webapp/#/login'
# Marcos
Email =
Senha =
# Set mouse automation
mouse = Controller()
# Load browser WebDriver ( Chrome or Firefox)
driver = webdriver.Chrome()
#driver = webdriver.Firefox()
# Set usable wait time
wait = WebDriverWait(driver, 10)
# Open browser on page
driver.get(URL)
# Complete "E-mail" information
# Select the "E-mail" box
Email_box = wait.until(EC.presence_of_element_located((By.ID, 'documentoEmail')))
# Fill the box
Email_box.send_keys(Email)
# Complete "Senha" information
# Select the "Senha" box
Senha_box = wait.until(EC.presence_of_element_located((By.ID, 'password')))
# Fill the box
Senha_box.send_keys(Senha)
# Select "Entrar" button
Entrar_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//button[@translate = '@APP-LOGIN-BTN-ENTRAR']")))
# Click button
Entrar_button.click()
time.sleep(5)
driver.get('https://servicosonline.cpfl.com.br/agencia-webapp/#/historico-contas')
# Select "Selecionar Todas" checkbox
Todos_checkbox = wait.until(EC.element_to_be_clickable((By.XPATH, "//label[@translate = '@APP-HISTORICO-CONTAS-CHECKBOX-SELECIONAR-TODAS']")))
# Confirm selection
Todos_checkbox.click()
# Select "Salvar Conta" button
Salvar_button = wait.until(EC.element_to_be_clickable((By.ID, 'btnSalvarContasPDF')))
# clic button
Salvar_button.click() | [
"noreply@github.com"
] | noreply@github.com |
1a8d72d16bb59fdf20bee4bf93d582a162afbc26 | 24312d6308760df1e8e7f3e446e828bfd333576c | /preprocess_gist_files.py | 1c61e96de4c8ec676e55e75dc7fd28c029de1fbb | [] | no_license | liting12/EmotionDetection | 827fa459cb18460b380f802f4f3085bd58f8662d | 45a2dcae5f20835e71dc6548d5e7f459217682b7 | refs/heads/master | 2021-09-12T22:04:48.150014 | 2018-04-21T10:42:43 | 2018-04-21T10:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | with open("../tmp/filenames.txt", 'r') as f:
with open("../tmp/gist.txt", 'r') as g:
files = f.readlines();
gists = g.readlines();
with open("../gists.txt", 'w') as s:
for i in range(0, len(files)):
s.write(files[i].strip().split('/')[-1]+":"+str(gists[i])) | [
"amritsinghal97@gmail.com"
] | amritsinghal97@gmail.com |
48c0fa3c02b94ef7d4860dcf8193efc152f59b9e | f28ef7c72a56a2a732bee3e42506c96bb69edee8 | /old_scripts/stocks_data.py | f9a03cddc88b57d6fb4ab645a0f58a8230321f1b | [] | no_license | webclinic017/backtrader_stocks_api | cb92311a1069199e61acc547ec69941ba861d4e6 | e489724e7a30bb915657244bf12e55ad2f484832 | refs/heads/main | 2023-03-26T05:40:53.584824 | 2021-03-10T07:53:35 | 2021-03-10T07:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | from fastquant import get_stock_data, backtest
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import date, timedelta
#array: [open, high, low, close, volume]
class ticker_data():
def __init__(self, ticker, date_range='null'):
self.name = ticker.upper()
# format date_range : ["2018-01-01", "2019-01-01"]
self.date_range = date_range
#self.period = period
self.data_np, self.data_pd = self.get_ticker_data()
self.highs, self.lows, self.open, self.close, self.volume = self.get_constants_from_data()
self.dates = self.get_dates()
self.med_price = self.get_med_price()
self.sma5 = self.get_slow_moving_average(p=5)
#self.sma5 = self.get_awesome_ossilator(p=5)
self.sma34 = self.get_slow_moving_average(p=34)
self.AO = self.get_awesome_oss()
self.jaw = self.calculate_alligator(13,8)#522
self.teeth = self.calculate_alligator(8,5)#519 perfect
self.lips = self.calculate_alligator(5,3)#517
def calculate_alligator(self,N, start):
#### broke but on the right track
# if start 8, shift array 8 left and last 8=0 and start iter
#med price has 1525, shift 8 group 13
#start at 13+8=23 to grab all
arr = []
length = len(self.med_price)
med = self.med_price
begin = N
#smma = sum(self.med_price[length - N:]) / N
#arr.append(smma)
for i in range(begin, length):
if i == begin:
smma = sum(med[i-N:i]) / N
arr.append(smma)
if i != begin:
prev_sum = arr[-1] * N
sma = sum(med[i - N:i]) / N
smma = ( prev_sum - arr[-1] + sma) / N
arr.append(smma)
# they all have diff sma periods, 13,8, 5 being smallest and limit, prepend N zeroes
print('pre',len(arr))
diff = N - start
for b in range(diff):
arr.insert(0,0)
for f in range(start):
arr.append(0)
return arr
def get_awesome_oss(self):
print(len(self.med_price))
#len med prices = 1525
ao = []
length = len(self.sma34)
for i in reversed(range(length)):
sma_diff = self.sma5[i] - self.sma34[i]
ao.append(sma_diff)
return ao[::-1]
def get_slow_moving_average(self, p):
sma_arrs = []
#reverse to capture newest date back 1525-0; 1525-30
length = len(self.med_price)
for i in reversed(range(p, length)):
period_arr = self.med_price[i-p:i]
sma = sum(period_arr)/p
sma_arrs.append(sma)
missing = length
while len(sma_arrs) < missing:
sma_arrs.append(0)
return sma_arrs[::-1]
'''for i in reversed(range(self.period)):#reverse range of 90
sma_arr = []
#start 90, so need 89,88,
for b in range(i, self.period - p ):
sma_arr.append(self.med_price[b])
if len(sma_arr) == p:
sma = sum(sma_arr) / p
arr.append(sma)
sma_arr = []
print('sma',sma)
return arr'''
def get_med_price(self):
med_prices = []
for i in range(len(self.lows)):
med = (self.highs[i] + self.lows[i]) /2
print('med_price', med)
med_prices.append(med)
return med_prices
def get_ticker_data(self):
if(self.name):
today = date.today()
yesterday = today - timedelta(days = 1)
try:
pd_data = get_stock_data(self.name, "2017-01-01", yesterday)
np_data = pd_data.values
except Exception as e:
print('get stock data error, query misformed line 20')
print(e)
return np_data, pd_data
def get_constants_from_data(self):
opens = []
close = []
high = []
low = []
volume = []
data = self.data_np
for i in range(len(data)):
opens.append(data[i][0])
high.append(data[i][1])
low.append(data[i][2])
close.append(data[i][3])
volume.append(data[i][4])
return high, low, opens, close, volume
def get_dates(self):
data = self.data_pd
dates = []
for i in range(len(data.index)):
dates.append(data.iloc[i].name)
return dates
if __name__ == '__main__':
ticker = ticker_data('tsla')
'''plt.bar(range(90), ticker.AO)
plt.plot(range(90), ticker.sma5)
plt.plot(range(90), ticker.sma34)
plt.plot(range(90), ticker.med_price[len(ticker.med_price)-90:] )
plt.show()
plt.plot(range(90), ticker.close[len(ticker.close)-90:] )
plt.plot(range(90), ticker.open[len(ticker.open)-90:] )
plt.plot(range(90), ticker.highs[len(ticker.highs)-90:] )
plt.plot(range(90), ticker.lows[len(ticker.lows)-90:] )
plt.show()
plt.plot(range(90), ticker.volume[len(ticker.volume)-90:] )
plt.show()'''
print('len', len(ticker.med_price))
plt.plot(ticker.sma34)
plt.plot(ticker.sma5)
plt.bar(range(len(ticker.AO)),ticker.AO)
plt.show()
plt.plot(ticker.jaw)
plt.plot(ticker.teeth)
plt.plot(ticker.lips)
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
75ce3bc0628729fd0e6e556ca3a05bc9eb9a5ee2 | f86400e8dcf78ef9ebb9e291dcd8b2f102c18cda | /pwn.py | dfaf7f7ae179e428dfb27edc5eacd3d878675ab4 | [] | no_license | TimzG3/DDoSTool | 22765030be8661e06c6b2710decc5afa14e77d77 | 483956389a4a6337b38dcd5c90090d02f917fb80 | refs/heads/main | 2023-01-01T03:24:26.108923 | 2020-10-26T14:40:58 | 2020-10-26T14:40:58 | 307,401,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # ~ DDoSTool ~
# Any question? HMU on https://mastodon.lol
from sys import argv
from os import system
def main():
system("ping %s -f" %argv[1])
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
709bdcbe4ebff07af6549d6a424c06a4abae09ed | 585380c7caa6475572dc2ae86cb36b2f2a4c1520 | /python/977_Squares_of_a_Sorted_Array.py | 3c661912a9609dc76f7bc810bfeab669a830c614 | [
"MIT"
] | permissive | allenphilip93/leetcode | 1bd3453fe051666171802460af543f44835419fa | 5a52cd9b54553d68ee10cfe87044bed79ee5fb93 | refs/heads/master | 2023-05-11T20:05:28.481646 | 2023-03-22T01:31:55 | 2023-03-22T01:31:55 | 191,033,976 | 1 | 0 | MIT | 2019-06-09T17:11:19 | 2019-06-09T17:11:19 | null | UTF-8 | Python | false | false | 865 | py | class Solution(object):
# def sortedSquares(self, A):
# """
# :type A: List[int]
# :rtype: List[int]
# """
# # Directly sort
# return sorted(x * x for x in A)
def sortedSquares(self, A):
pos = 0
while pos < len(A) and A[pos] < 0:
pos += 1
# pos point to first positve
# npos point to larget negative
npos = pos - 1
res = []
while pos < len(A) and npos >= 0:
if A[npos] ** 2 < A[pos] ** 2:
res.append(A[npos] ** 2)
npos -= 1
else:
res.append(A[pos] ** 2)
pos +=1
while npos >= 0:
res.append(A[npos] ** 2)
npos -= 1
while pos < len(A):
res.append(A[pos] ** 2)
pos += 1
return res
| [
"qiyuan.gong@intel.com"
] | qiyuan.gong@intel.com |
c36579c307b89115c28ff265ee689405fca26c0d | 0c5b96d5b778d7f54854d03948bb8ef1336a0350 | /DocBank/lista/admin.py | 64cc46e14eb648c80810f6913dfa7a06e040cd55 | [] | no_license | GabrielEstevezReyes/PythonP | 3628463d3a6c11e93a8220c5e4dd4005d378d65a | 8df1f7c83a918ccfe4f2df649287d01305650fe1 | refs/heads/master | 2020-09-13T14:55:42.657986 | 2019-11-20T01:17:04 | 2019-11-20T01:17:04 | 222,822,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.contrib import admin
from .models import lista
# Register your models here.
admin.site.register(lista) | [
"gabriel.axel25@gmail.com"
] | gabriel.axel25@gmail.com |
d93af998f22f0599ae05964e40bf4946e07934db | dd4d1a61ec680a86d4b569490bf2a898ea0d7557 | /appengine/findit/model/test/wf_swarming_task_test.py | f6921dfc809d6a8bcf5b6cc3326292e6c1424897 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-infra | f1a68914b47bcbe3cd8a424f43741dd74fedddf4 | 09064105713603f7bf75c772e8354800a1bfa256 | refs/heads/master | 2022-10-29T23:21:46.894543 | 2017-05-16T06:22:50 | 2017-05-16T06:22:50 | 91,423,078 | 1 | 1 | BSD-3-Clause | 2022-10-01T18:48:03 | 2017-05-16T06:23:34 | Python | UTF-8 | Python | false | false | 1,690 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from model.wf_swarming_task import WfSwarmingTask
class WfSwarmingTaskTest(unittest.TestCase):
def testClassifiedTests(self):
task = WfSwarmingTask.Create('m', 'b', 121, 'browser_tests')
task.tests_statuses = {
'TestSuite1.test1': {
'total_run': 2,
'SUCCESS': 2
},
'TestSuite1.test2': {
'total_run': 4,
'SUCCESS': 2,
'FAILURE': 2
},
'TestSuite1.test3': {
'total_run': 6,
'FAILURE': 6
},
'TestSuite1.test4': {
'total_run': 6,
'SKIPPED': 6
},
'TestSuite1.test5': {
'total_run': 6,
'UNKNOWN': 6
}
}
expected_classified_tests = {
'flaky_tests': ['TestSuite1.test2', 'TestSuite1.test1'],
'reliable_tests': ['TestSuite1.test3', 'TestSuite1.test4'],
'unknown_tests': ['TestSuite1.test5']
}
self.assertEqual(expected_classified_tests, task.classified_tests)
self.assertEqual(expected_classified_tests['reliable_tests'],
task.reliable_tests)
self.assertEqual(expected_classified_tests['flaky_tests'],
task.flaky_tests)
def testStepName(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
expected_step_name = 's'
task = WfSwarmingTask.Create(
master_name, builder_name, build_number, expected_step_name)
self.assertEqual(expected_step_name, task.step_name) | [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
3766f9a5133652056ebd9b6b6bc0c4f68515983c | f2cb9b54e51e693e1a1f1c1b327b5b40038a8fbe | /src/bin/shipyard_airflow/tests/unit/plugins/test_deckhand_client_factory.py | 044f4cc7ae96a556bd1cc726789890d7c1abce2c | [
"Apache-2.0"
] | permissive | airshipit/shipyard | 869b0c6d331e5b2d1c15145aee73397184290900 | 81066ae98fe2afd3a9c8c5c8556e9438ac47d5a2 | refs/heads/master | 2023-08-31T11:46:13.662886 | 2023-07-01T06:42:55 | 2023-08-30T16:04:47 | 133,844,902 | 6 | 2 | Apache-2.0 | 2023-09-12T19:09:02 | 2018-05-17T17:07:36 | Python | UTF-8 | Python | false | false | 1,083 | py | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from deckhand.client import client as deckhand_client
from shipyard_airflow.plugins.deckhand_client_factory import (
DeckhandClientFactory
)
def test_get_client():
"""Test the get_client functionality"""
cur_dir = os.path.dirname(__file__)
filename = os.path.join(cur_dir, 'test.conf')
client_factory = DeckhandClientFactory(filename)
client = client_factory.get_client()
assert isinstance(client, deckhand_client.Client)
| [
"bryan.strassner@gmail.com"
] | bryan.strassner@gmail.com |
1fba6cc1018e8180d4b1a9d7312f2d7735cb2952 | d4d492068212ea98ad54cf6558be5d087b9f159e | /code/modules/beam_search.py | 169bbfc405103a73a90287ad0ff2991317e21b2b | [] | no_license | amarazad/uctf | 9c0b1f700bfb1f946f5dabbe4cc91784c9d1452a | d087a28e5fb37784858c6406595a6b7df3598fc1 | refs/heads/master | 2022-01-09T13:09:53.759698 | 2019-05-09T12:14:06 | 2019-05-09T12:14:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | from math import log
from numpy import array
from numpy import argmax
import torch
from torch.nn import functional
from torch.autograd import Variable
from Vocab import *
# beam search
def beam_search(data, k):
data = data.data.numpy()
sequences = [[list(), 1.0]]
# walk over each step in sequence
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score * -log(row[j])]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup:tup[1])
# select k best
sequences = ordered[:k]
return sequences
def beam_search_decoder(data, k, vocab):
print('Data ', data.size())
result = beam_search(data, k)
for r in result:
seq = r[0]
score = r[1]
for s in seq:
print(vocab.index2word(s))
print(seq)
print(score)
def test():
max_len = 5
vocab_file = 'data/vocab_mc5.txt'
vocab_src = Vocab('model_vocab')
vocab_src.load_vocab(vocab_file)
vocab_size = vocab_src.get_n_words
outputs = torch.randn(max_len, vocab_size)
outputs = functional.softmax(outputs)
beam_search_decoder(outputs, 3, vocab_src)
def main():
# define a sequence of 10 words over a vocab of 5 words
data = [[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1]]
data = array(data)
# decode sequence
beam_search_decoder(data, 3)
if __name__ == '__main__':
test()
| [
"pajain06@in.ibm.com"
] | pajain06@in.ibm.com |
f2175851726ca0bd2de375f5dd60009f4fea1399 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping15/StrippingBuToKX3872.py | bc8fd68b57bf0cc15f042076cea12929f4e982a4 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,316 | py | # $Id: StrippingBu2KX3872.py,v 1.2 2010-08-26 13:16:50 nmangiaf Exp $
__author__ = ['Jeremy Dickens']
__date__ = '21/02/2011'
__version__ = '$Revision: 1.2 $'
'''
Implements 3 lines: B+ -> K+ X3872, B+ -> K+ Psi(2S) and a looser B+ -> K+ JPsi pi+ pi+
'''
## Note this is just for testing the stripping on 25/02/2011
## Cuts may well change before the stripping is launched
from Gaudi.Configuration import *
from LHCbKernel.Configuration import *
from StrippingUtils.Utils import LineBuilder
Stripping_BuToKX3872_TestDictonary = {
'Prescale_BuToKX3872' : 1.0,
'Postscale_BuToKX3872' : 1.0,
'Prescale_BuToKX3872Loose' : 1.0,
'Postscale_BuToKX3872Loose' : 1.0,
'Prescale_BuToKPsi2S' : 1.0,
'Postscale_BuToKPsi2S' : 1.0,
'Prescale_BuToKPsi2SLoose' : 1.0,
'Postscale_BuToKPsi2SLoose' : 1.0,
# B cuts
'Bu_Comb_MassWindow' : 450.0,
'Bu_Comb_MassWindowLoose' : 500.0,
'Bu_MassWindow' : 400.0,
'Bu_MassWindowLoose' : 400.0,
'Bu_VertexCHI2' : 5.0,
'Bu_VertexCHI2Loose' : 7.0,
'Bu_IPCHI2' : 15.0,
'Bu_IPCHI2Loose' : 20.0,
'Bu_FlightCHI2' : 30.0,
'Bu_FlightCHI2Loose' : 20.0,
'Bu_DIRA' : 0.9995,
'Bu_DIRALoose' : 0.9995,
# X3872 / Psi(2S) cuts
'X3872_Comb_MassWindow' : 180.0,
'X3872_Comb_MassWindowLoose' : 220.0,
'X3872_MassWindow' : 150.0,
'X3872_MassWindowLoose' : 190.0,
'X3872_VertexCHI2' : 8.0,
'X3872_VertexCHI2Loose' : 10.0,
# Track cuts
'Track_CHI2nDOF' : 4.0,
# Kaon cuts
'Kaon_MinIPCHI2' : 4.5,
'Kaon_MinIPCHI2Loose' : 4.0,
# Pion cuts
'Pion_MinIPCHI2' : 4.5,
'Pion_MinIPCHI2Loose' : 4.0,
# JPsi cuts
'JPsi_MassWindow' : 70.0,
'JPsi_MassWindowLoose' : 70.0,
'JPsi_VertexCHI2' : 10.0,
'JPsi_VertexCHI2Loose' : 10.0,
# Muon cuts
'Muon_MinIPCHI2' : 1.5,
'Muon_MinIPCHI2Loose' : 1.5,
'Muon_PT' : 500.0,
'Muon_IsMuon' : True
}
class StrippingBu2KX3872Conf(LineBuilder):
"""
Configuration object for B+ -> K X(3872) lines
"""
__configuration_keys__ = (
'Prescale_BuToKX3872',
'Postscale_BuToKX3872',
'Prescale_BuToKX3872Loose',
'Postscale_BuToKX3872Loose',
'Prescale_BuToKPsi2S',
'Postscale_BuToKPsi2S',
'Prescale_BuToKPsi2SLoose',
'Postscale_BuToKPsi2SLoose',
# B cuts
'Bu_Comb_MassWindow',
'Bu_Comb_MassWindowLoose',
'Bu_MassWindow',
'Bu_MassWindowLoose',
'Bu_VertexCHI2',
'Bu_VertexCHI2Loose',
'Bu_IPCHI2',
'Bu_IPCHI2Loose',
'Bu_FlightCHI2',
'Bu_FlightCHI2Loose',
'Bu_DIRA',
'Bu_DIRALoose',
# X3872 / Psi(2S) cuts
'X3872_Comb_MassWindow',
'X3872_Comb_MassWindowLoose',
'X3872_MassWindow',
'X3872_MassWindowLoose',
'X3872_VertexCHI2',
'X3872_VertexCHI2Loose',
# Track cuts
'Track_CHI2nDOF',
# Kaon cuts
'Kaon_MinIPCHI2',
'Kaon_MinIPCHI2Loose',
# Pion cuts
'Pion_MinIPCHI2',
'Pion_MinIPCHI2Loose',
# JPsi cuts
'JPsi_MassWindow',
'JPsi_MassWindowLoose',
'JPsi_VertexCHI2',
'JPsi_VertexCHI2Loose',
# Muon cuts
'Muon_MinIPCHI2',
'Muon_MinIPCHI2Loose',
'Muon_PT',
'Muon_IsMuon'
)
def __init__(self, name, config):
'''The constructor of the configuration class.
Requires a configuration dictionary, config, which must provide all the settings for cuts which are not hard coded
'''
LineBuilder.__init__(self, name, config)
self.name = name
self.BuToKX3872LineName = self.name + "_BuToKX3872"
self.BuToKPsi2SLineName = self.name + "_BuToKPsi2S"
self.BuToKX3872LooseLineName = self.name + "_BuToKX3872Loose"
self.BuToKPsi2SLooseLineName = self.name + "_BuToKPsi2SLoose"
############################
## Define the cut strings ##
############################
###############
# Bu cuts ##
###############
self.BuCombCut = "(ADAMASS('B+') < %(Bu_Comb_MassWindow)s * MeV)" %config
self.BuCombLooseCut = "(ADAMASS('B+') < %(Bu_Comb_MassWindowLoose)s * MeV)" %config
self.BuCut = "(ADMASS('B+') < %(Bu_MassWindow)s * MeV) & (VFASPF(VCHI2/VDOF) < %(Bu_VertexCHI2)s) & (BPVIPCHI2() < %(Bu_IPCHI2)s) & (BPVDIRA> %(Bu_DIRA)s) & (BPVVDCHI2 > %(Bu_FlightCHI2)s)" %config
self.BuLooseCut = "(ADMASS('B+') < %(Bu_MassWindowLoose)s * MeV) & (VFASPF(VCHI2/VDOF) < %(Bu_VertexCHI2Loose)s) & (BPVIPCHI2() < %(Bu_IPCHI2Loose)s) & (BPVDIRA> %(Bu_DIRALoose)s) & (BPVVDCHI2 > %(Bu_FlightCHI2Loose)s)" %config
##############################
## X3872 and Psi(2S) cuts ##
##############################
self.X3872CombCut = "(ADAMASS('X_1(3872)') < %(X3872_Comb_MassWindow)s * MeV)" %config
self.X3872CombLooseCut = "(ADAMASS('X_1(3872)') < %(X3872_Comb_MassWindowLoose)s * MeV)" %config
self.Psi2SCombCut = "(ADAMASS('psi(2S)') < %(X3872_Comb_MassWindow)s * MeV)" %config
self.Psi2SCombLooseCut = "(ADAMASS('psi(2S)') < %(X3872_Comb_MassWindowLoose)s * MeV)" %config
ResonanceVertexCut = "(VFASPF(VCHI2/VDOF) < %(X3872_VertexCHI2)s)" %config
ResonanceVertexLooseCut = "(VFASPF(VCHI2/VDOF) < %(X3872_VertexCHI2Loose)s)" %config
self.X3872Cut = "(ADMASS('X_1(3872)') < %(X3872_MassWindow)s * MeV) & " %config + ResonanceVertexCut
self.X3872LooseCut = "(ADMASS('X_1(3872)') < %(X3872_MassWindowLoose)s * MeV) & " %config + ResonanceVertexLooseCut
self.Psi2SCut = "(ADMASS('psi(2S)') < %(X3872_MassWindow)s * MeV) & " %config + ResonanceVertexCut
self.Psi2SLooseCut = "(ADMASS('psi(2S)') < %(X3872_MassWindowLoose)s * MeV) & " %config + ResonanceVertexLooseCut
######################
## Track cuts ##
######################
TrackCut = "(TRCHI2DOF < %(Track_CHI2nDOF)s)" %config
self.KaonCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Kaon_MinIPCHI2)s)" %config
self.KaonLooseCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Kaon_MinIPCHI2Loose)s)" %config
self.PionCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Pion_MinIPCHI2)s)" %config
self.PionLooseCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Pion_MinIPCHI2Loose)s)" %config
MuonCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Muon_MinIPCHI2)s) & (PT > %(Muon_PT)s * MeV)" %config
if(config["Muon_IsMuon"]):
MuonCut += " & (ISMUON)"
MuonLooseCut = TrackCut + " & (MIPCHI2DV(PRIMARY) > %(Muon_MinIPCHI2Loose)s)" %config
##################
## Rho cuts ##
##################
self.RhoCuts = "(2 == NINTREE((ABSID=='pi+') & " + self.PionCut + "))"
######################
## J/psi cuts ##
######################
JPsiCut = "(ADMASS('J/psi(1S)') < %(JPsi_MassWindow)s * MeV) & (VFASPF(VCHI2/VDOF) < %(JPsi_VertexCHI2)s)" %config
JPsiLooseCut = "(ADMASS('J/psi(1S)') < %(JPsi_MassWindowLoose)s * MeV) & (VFASPF(VCHI2/VDOF) < %(JPsi_VertexCHI2Loose)s)" %config
self.JPsiCuts = JPsiCut + " & (2 == NINTREE((ABSID=='mu-') & " + MuonCut + "))"
self.JPsiLooseCuts = JPsiLooseCut + " & (2 == NINTREE((ABSID=='mu-') & " + MuonLooseCut + "))"
#########################
## Make the selections ##
#########################
## loose selections
Sel_JPsiLoose = self.__FilterSelectionJPsi__(self.name + "Loose", self.JPsiLooseCuts)
Sel_RhoLoose = self.__CreateSelectionRho__(self.name + "Loose", "", "ALL", self.PionCut)
Sel_X3872Loose = self.__CreateSelectionX3872__(self.BuToKX3872LooseLineName, self.X3872CombLooseCut, self.X3872LooseCut, [Sel_JPsiLoose, Sel_RhoLoose])
Sel_Psi2SLoose = self.__CreateSelectionX3872__(self.BuToKPsi2SLooseLineName, self.Psi2SCombLooseCut, self.Psi2SLooseCut, [Sel_JPsiLoose, Sel_RhoLoose])
Sel_KaonLoose = self.__FilterKaon__(self.name + "Loose", self.KaonLooseCut)
Sel_BuX3872KLoose = self.__CreateSelectionBu__(self.BuToKX3872LooseLineName, self.BuCombLooseCut, self.BuLooseCut, [Sel_X3872Loose, Sel_KaonLoose])
Sel_BuPsi2SKLoose = self.__CreateSelectionBu__(self.BuToKPsi2SLooseLineName, self.BuCombLooseCut, self.BuLooseCut, [Sel_Psi2SLoose, Sel_KaonLoose])
## tight selections
Sel_JPsi = self.__FilterSelectionJPsi__(self.name, self.JPsiCuts)
Sel_Rho = self.__FilterSelectionRho__(self.name, self.RhoCuts, [Sel_RhoLoose])
Sel_X3872 = self.__CreateSelectionX3872__(self.BuToKX3872LineName, self.X3872CombCut, self.X3872Cut, [Sel_JPsi, Sel_Rho])
Sel_Psi2S = self.__CreateSelectionX3872__(self.BuToKPsi2SLineName, self.Psi2SCombCut, self.Psi2SCut, [Sel_JPsi, Sel_Rho])
Sel_Kaon = self.__FilterKaon__(self.name, self.KaonCut)
Sel_BuX3872K = self.__CreateSelectionBu__(self.BuToKX3872LineName, self.BuCombCut, self.BuCut, [Sel_X3872, Sel_Kaon])
Sel_BuPsi2SK = self.__CreateSelectionBu__(self.BuToKPsi2SLineName, self.BuCombCut, self.BuCut, [Sel_Psi2S, Sel_Kaon])
###################################
## Construct the stripping lines ##
###################################
from StrippingConf.StrippingLine import StrippingLine
## --- B+ -> X3872 K+ loose line ---
Line_BuToX3872Loose_Name = self.BuToKX3872LooseLineName + "Line"
Line_BuToX3872Loose = StrippingLine( Line_BuToX3872Loose_Name,
prescale = config['Prescale_BuToKX3872Loose'],
postscale = config['Postscale_BuToKX3872Loose'],
selection = Sel_BuX3872KLoose)
self.registerLine(Line_BuToX3872Loose)
## --- B+ -> Psi2S K+ loose line ---
Line_BuToPsi2SLoose_Name = self.BuToKPsi2SLooseLineName + "Line"
Line_BuToPsi2SLoose = StrippingLine( Line_BuToPsi2SLoose_Name,
prescale = config['Prescale_BuToKPsi2SLoose'],
postscale = config['Postscale_BuToKPsi2SLoose'],
selection = Sel_BuPsi2SKLoose)
self.registerLine(Line_BuToPsi2SLoose)
## --- B+ -> X3872 K+ line ---
Line_BuToX3872_Name = self.BuToKX3872LineName + "Line"
Line_BuToX3872 = StrippingLine( Line_BuToX3872_Name,
prescale = config['Prescale_BuToKX3872'],
postscale = config['Postscale_BuToKX3872'],
selection = Sel_BuX3872K)
self.registerLine(Line_BuToX3872)
## --- B+ -> Psi2S K+ line ---
Line_BuToPsi2S_Name = self.BuToKPsi2SLineName + "Line"
Line_BuToPsi2S = StrippingLine( Line_BuToPsi2S_Name,
prescale = config['Prescale_BuToKPsi2S'],
postscale = config['Postscale_BuToKPsi2S'],
selection = Sel_BuPsi2SK)
self.registerLine(Line_BuToPsi2S)
self.printCuts()
def printCuts(self):
'''Print the compiled cut values'''
print "-------------------------------------------"
print "-- B+ -> K X3872 etc stripping line cuts --"
print "-------------------------------------------"
print " "
print " --> B+ -> K X3872 line"
print " --> Bu cut: ", self.BuCut
print " --> Bu combination cut: ", self.BuCombCut
print " --> X3872 cut: ", self.X3872Cut
print " --> X3872 combination cut: ", self.X3872CombCut
print " --> JPsi cuts: ", self.JPsiCuts
print " --> Pion cut: ", self.PionCut
print " --> Kaon cut: ", self.KaonCut
print " "
print " --> B+ -> K X3872 loose line"
print " --> Bu cut: ", self.BuLooseCut
print " --> Bu combination cut: ", self.BuCombLooseCut
print " --> X3872 cut: ", self.X3872LooseCut
print " --> X3872 combination cut: ", self.X3872CombLooseCut
print " --> JPsi cuts: ", self.JPsiLooseCuts
print " --> Pion cut: ", self.PionLooseCut
print " --> Kaon cut: ", self.KaonLooseCut
print " "
print " --> B+ -> K Psi(2S) line"
print " --> Bu cut: ", self.BuCut
print " --> Bu combination cut: ", self.BuCombCut
print " --> X3872 cut: ", self.Psi2SCut
print " --> X3872 combination cut: ", self.Psi2SCombCut
print " --> JPsi cuts: ", self.JPsiCuts
print " --> Pion cut: ", self.PionCut
print " --> Kaon cut: ", self.KaonCut
print " "
print " --> B+ -> K Psi(2S) loose line"
print " --> Bu cut: ", self.BuLooseCut
print " --> Bu combination cut: ", self.BuCombLooseCut
print " --> X3872 cut: ", self.Psi2SLooseCut
print " --> X3872 combination cut: ", self.Psi2SCombLooseCut
print " --> JPsi cuts: ", self.JPsiLooseCuts
print " --> Pion cut: ", self.PionLooseCut
print " --> Kaon cut: ", self.KaonLooseCut
return True
##########################################
## Create selections for StrippingLines ##
##########################################
#################
## Filter Kaon ##
#################
def __FilterKaon__(self, lName, KaonCut):
'''
Kaon filter for Bu -> K X3872 (from StdLooseKaons)
'''
from StandardParticles import StdLooseKaons
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
FilterKaon = FilterDesktop()
FilterKaon.Code = KaonCut
from PhysSelPython.Wrappers import Selection
SelKaon = Selection("SelFilter_" + lName + "_Kaon", Algorithm = FilterKaon, RequiredSelections = [StdLooseKaons])
return SelKaon
#########################
## Create rho -> pi pi ##
#########################
def __CreateSelectionRho__(self, lName, RhoCombCut, RhoCut, PionCut):
'''
rho(770)0 -> pi+ pi- selection (from StdLoosePions)
'''
from StandardParticles import StdLoosePions
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
CombineRho = CombineParticles()
CombineRho.DecayDescriptor = "rho(770)0 -> pi+ pi-"
if(len(RhoCombCut) > 0):
CombineRho.CombinationCut = RhoCombCut
CombineRho.MotherCut = RhoCut
CombineRho.DaughtersCuts = { "pi+" : PionCut }
from PhysSelPython.Wrappers import Selection
SelRho = Selection("SelBuild_" + lName + "_Rho", Algorithm = CombineRho, RequiredSelections = [StdLoosePions])
return SelRho
def __FilterSelectionRho__(self, lName, RhoCuts, InputSelections):
'''
Filter already built rho (eg from the loose line)
'''
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
FilterRho = FilterDesktop()
FilterRho.Code = RhoCuts
from PhysSelPython.Wrappers import Selection
SelRho = Selection("SelFilter_" + lName + "_Rho", Algorithm = FilterRho, RequiredSelections = InputSelections)
return SelRho
##########################
## Filter Jpsi -> mu mu ##
##########################
def __FilterSelectionJPsi__(self, lName, JPsiCuts):
'''
J/psi(1S) -> mu+ mu- filter (from StdLooseJpsi2MuMu)
'''
from PhysSelPython.Wrappers import DataOnDemand
StdJPsi = DataOnDemand(Location = "Phys/StdLooseJpsi2MuMu/Particles")
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
FilterJPsi = FilterDesktop()
FilterJPsi.Code = JPsiCuts
from PhysSelPython.Wrappers import Selection
SelJPsi = Selection("SelFilter_" + lName + "_JPsi", Algorithm = FilterJPsi, RequiredSelections = [StdJPsi])
return SelJPsi
##############################
## Create X3872 -> JPsi rho ##
##############################
def __CreateSelectionX3872__(self, lName, CombCut, MotherCut, InputSelections):
'''
X3872 -> J/psi(1S) rho(770)0: note this can be used for the psi(2S) as well - just use different cuts!
'''
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
CombineX3872 = CombineParticles()
CombineX3872.DecayDescriptor = "X_1(3872) -> J/psi(1S) rho(770)0"
CombineX3872.CombinationCut = CombCut
CombineX3872.MotherCut = MotherCut
from PhysSelPython.Wrappers import Selection
SelX3872 = Selection("Sel_" + lName + "_X3872", Algorithm = CombineX3872, RequiredSelections = InputSelections)
return SelX3872
###########################
## Create B+ -> X3872 K+ ##
###########################
def __CreateSelectionBu__(self, lName, CombCut, MotherCut, InputSelections):
'''
B+ -> K+ X3872 Selection
'''
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
CombineBu = CombineParticles()
CombineBu.DecayDescriptors = ["B+ -> X_1(3872) K+", "B- -> X_1(3872) K-"]
CombineBu.CombinationCut = CombCut
CombineBu.MotherCut = MotherCut
from PhysSelPython.Wrappers import Selection
SelBu = Selection("Sel_" + lName + "_Bu", Algorithm = CombineBu, RequiredSelections = InputSelections)
return SelBu
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
609a381377e705ec17bc60c43a5a10c592dfe9d8 | f969c54dd7b549aef24a3437709ced875e716d0a | /S12/tsai_repo/data_loader/example.py | 39779878c7067353a7e5d7afeddf171b427b68b1 | [] | no_license | prarthananbhat/tsai | d5bd1d2ee107ef6ef751ecaf863d1bc21d9391fa | 3b29555bb62eac9910430d7672cda052f59ae568 | refs/heads/master | 2021-01-03T12:12:55.503306 | 2020-05-28T07:13:33 | 2020-05-28T07:13:33 | 240,070,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms, utils
from data_loader.custom_dataset import imageMaskDataset
from data_loader.utils import show_images
from data_loader.custom_transform import Rescale
from data_loader.custom_transform import ToTensor
mask_dataset = imageMaskDataset(csv_file="D:/Projects/theschoolofai/datasets/custom_dataset/data.csv",
root_dir = "D:/Projects/theschoolofai/datasets/custom_dataset/")
fig = plt.figure()
fig.suptitle("Sample of Raw images")
for i in range(len(mask_dataset)):
sample=mask_dataset[i]
print(i,sample['image'].shape,sample['target_image'].shape)
j=i+1
ax = plt.subplot(3,2,j)
plt.tight_layout()
ax.set_title("Sample {}".format(i))
ax.axis('off')
show_images(**sample)
# print(i)
if i==3:
fig.show()
break
# Compose transforms
scale = Rescale((256,256))
composed = transforms.Compose([Rescale(256)])
fig = plt.figure()
fig.suptitle("Showing transformation on a sample")
sample = mask_dataset[0]
print(sample.keys())
for i, trsfm in enumerate([scale,composed]):
trsansformed_sample = trsfm(sample)
ax = plt.subplot(1,2,i+1)
plt.tight_layout()
ax.set_title(type(trsfm).__name__)
show_images(**trsansformed_sample)
fig.show()
#Iterating through the dataset
custom_transform = transforms.Compose([Rescale(256),ToTensor()])
transformed_dataset = mask_dataset = imageMaskDataset(csv_file="D:/Projects/theschoolofai/datasets/custom_dataset/data.csv",
root_dir = "D:/Projects/theschoolofai/datasets/custom_dataset/",
transform = custom_transform)
dataloader = DataLoader(transformed_dataset,batch_size=4,shuffle=True)
#Helper function to show a batch
def show_masked_data_batch(sample_batched):
images_batch, target_images_batch = sample_batched[0], sample_batched[1]
batch_size = len(images_batch)
im_size = images_batch.size(2)
grid_border_size = 2
grid_image = utils.make_grid(images_batch)
# plt.imshow(grid.numpy().transpose(1,2,0))
grid_target_image = utils.make_grid(target_images_batch)
x = grid_image.numpy().transpose(1, 2, 0)
y = grid_target_image.numpy().transpose(1, 2, 0)
new_im = np.vstack((x, y))
plt.imshow(new_im)
for i_batch, sample_batched in enumerate(dataloader):
print(i_batch, sample_batched[0].size(), sample_batched[1].size())
# observe 4th batch and stop.
if i_batch == 0:
fig = plt.figure()
fig.suptitle("Transformed Images")
show_masked_data_batch(sample_batched)
plt.axis('off')
plt.ioff()
fig.show()
break | [
"prarthana.bhat@progress.com"
] | prarthana.bhat@progress.com |
b2d9c40f151b806bd9ee4736251b38554a83456f | 02c08bc50fb73d08f8f321d6ec76a0f0c51904cc | /security.py | f6bde07f29dcad405e96430b111a07b990a000fa | [] | no_license | AmritaNeha19/my-rest-api | 29c36713630c06592eb9228eead55a47bc3cd681 | 0feb5e43237dbfe3a7b7a6d9b55c4856dc1816eb | refs/heads/master | 2023-06-25T23:08:31.011693 | 2021-07-25T11:05:10 | 2021-07-25T11:05:10 | 389,323,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from models.user import UserModel
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user and user.password == password:
return user
def identity(payload):
user_id = payload['identity']
return UserModel.find_by_userid(user_id)
| [
"amritaneha19@gmail.com"
] | amritaneha19@gmail.com |
a24df8ce78ac686f3b1e2b8425cc73319caabd34 | 0f24e87c9035a2203de61c8dc67be332cedf4c97 | /browser/fields.py | 63d70485a6d16c324bf708ad38382f4e26697d35 | [] | no_license | timleslie/gattini | 05a2e6cc806cce9253c8a8d46e8b66e84161ee0e | 5b3f10859d306bfd35205ad2014f6d9a91c4b094 | refs/heads/master | 2021-01-18T16:32:30.735454 | 2012-06-08T08:36:15 | 2012-06-08T08:36:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,555 | py | """
This module contains classes which represent database fields and
their derived values for display in the Gattini Data Explore.
"""
import numpy as N
from pylab import log10, pi
from matplotlib.dates import num2date, date2num
from db.query import per_image_fields, per_star_fields
def get_x_data(result, field):
"""
Extract the data for a given field from a result recarray returned from the
GDB. Convert any time fields into a numberical value.
"""
field = field.split(".")[1]
if field == 'time':
data = date2num(result[field])
else:
data = result[field]
return data
class Field:
"""
This is the base class for GDE fields. Subclasses implement different
behaviour for different kinds of fields.
"""
def __init__(self, label):
"""
Create a field with the given label. This label will be used to label
the axes on GDE plots.
"""
self.label = label
def get_fields(self):
"""
Return the database fields which must be queried to calculate the value
for this field.
"""
return self.fields
def value(self, data, image=None):
"""
Calculate the value of this field, given recarray of data retrieved
from tehe GDB.
This method must be implemented by all subclasses.
"""
raise NotImplementedError
def str_value(self, data):
"""
Return the value as a string.
"""
return str(self.value(data))
def __str__(self):
"""
Use the fields label as its string representation.
"""
return self.label
def __eq__(self, other):
"""
Use field labels for defining equality of fields.
"""
return self.label == other
class SimpleField(Field):
"""
This is the most basic field. It takes a single database field and returns
its values 'as is'.
Subclasses of this can take the single database field and return it in a
more appropriate manner.
"""
def __init__(self, field):
Field.__init__(self, field)
self.fields = [field]
def value(self, data, image=None):
return get_x_data(data, self.fields[0])
class MedianField(Field):
def __init__(self):
Field.__init__(self, "30% Level")
self.fields = []
def value(self, data, images=None):
ret = []
for image in images:
data = image[600][320:1280]
thirty = data[int(0.3*len(data))]
ret.append(thirty)
print thirty
return ret
#return [N.median(N.asarray(image).flat) for image in images]
class RadianField(SimpleField):
"""
A field which represent a value in radians. When represented as a string
the values are converted to degrees for easy reading.
"""
def str_value(self, data):
"""
Return the value, in degrees, as a string
"""
return str(180*SimpleField.value(self, data)/pi)
class TimeField(SimpleField):
"""
A field which represent a time value. When represented as a string the
values are converted to a full date string.
"""
def str_value(self, data):
return str(num2date(SimpleField.value(self, data)))
class ElevationField(SimpleField):
"""
A field representing an elevation. Takes a zenith distance database field
and returns values as elevation in degrees.
"""
def __init__(self, field, label):
SimpleField.__init__(self, field)
Field.__init__(self, label)
def value(self, data, image=None):
return 90 - 180*get_x_data(data, self.fields[0])/N.pi
class ProductField(Field):
"""
A field representing the product of two database fields.
"""
def __init__(self, (field_a, field_b)):
Field.__init__(self, "%s * %s" % (field_a, field_b))
self.fields = (field_a, field_b)
def value(self, data, image=None):
return get_x_data(data, self.fields[0])*get_x_data(data, self.fields[1])
class SkyBrightness(Field):
def __init__(self):
Field.__init__(self, "Sky Brightness")
self.fields = ["cam.name", "astrom.sky", "astrom.zmag",
"header.exposure", "imstat.mean", "header.temp"]
def value(self, data, image=None):
cam = data['name']
if type(cam) == N.recarray:
cam = cam[0]
pix_size = {"sky": N.sqrt(202000/4), "sbc": 11.3}[cam]
ex = data['exposure']
sky = data['sky'].clip(0.0001, data['sky'].max())
mean = data['mean']
pix_mag = 2.5*log10(pix_size**2)
temp = data['temp'].copy()
if type(cam) == N.recarray:
temp[temp < -35] = -40
temp[temp >= -35] = -30
offset = ex.copy()
offset[temp == -30] = 2.25*ex[temp == -30]
offset[temp == -40] = 0.59*ex[temp == -40]
else:
offset = 0
if cam == "sbc":
offset = 68
else:
offset += 77.1
return -2.5*log10(sky - offset) + data['zmag'] + pix_mag
class StarBrightness(Field):
def __init__(self, label="Star Brightness"):
Field.__init__(self, label)
self.fields = ["astrom.zmag", "phot.mag3", "header.exposure"]
def value(self, data, image=None):
return data['zmag'] + data['mag3'] - 25 -2.5*log10(data['exposure'])
class StarBrightnessError(StarBrightness):
def __init__(self):
StarBrightness.__init__(self, "Star Brightness Error")
self.fields.append("phot.vmag")
def value(self, data, image=None):
return StarBrightness.value(self, data) - data['vmag']
def make_field(field):
"""
A function to take a database field and return an appropriate Field class.
For time fields, a TimeField is created, for zenith distance fields, a
RadianField is returned and a Simple field is returned for other fields.
"""
if "time" in field:
return TimeField(field)
if "zd" in field:
return RadianField(field)
else:
return SimpleField(field)
per_image_fields = [make_field(field) for field in per_image_fields] + \
[SkyBrightness(), ElevationField("header.sunzd", "Sun Elevation"),
ElevationField("header.moonzd", "Moon Elevation")]
per_star_fields = [make_field(field) for field in per_star_fields] + \
[StarBrightness(), StarBrightnessError()]
| [
"timl@breakawayconsulting.com.au"
] | timl@breakawayconsulting.com.au |
3f6b96bd37777af22a45a424ab153ca53b208f80 | abf1916a72b4a308858aa7a00e8882ca68fe9b54 | /accounts/models.py | 17406dc4f216f34327002efefb20bf04d50dc2c7 | [] | no_license | impratap/Newspaper_in_Django | c54cc2cf2e031a0b3aa94c63b85428249b2830f3 | 6a0a800b1fea1d1e8b5f1d7c5e1719e0c625142d | refs/heads/main | 2023-08-12T08:24:39.348976 | 2021-09-29T18:05:42 | 2021-09-29T18:05:42 | 388,561,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
age = models.PositiveIntegerField(null=True, blank=True)
| [
"ankitpratapsingh333@gmail.com"
] | ankitpratapsingh333@gmail.com |
c9d98c8c4d7a53b3a1e1c19aa8a54ebf068e5adb | eed5bd36f4c287d92db4855a2c71aa7aee1f58a9 | /qfin/assets/spot.py | 555fdd231cc7c1a6e5531420235e11ed58b0bf8c | [] | no_license | vitaliakogut/hedging_cc | 7d5a070187e98dbc84d360171bcba62d985036c9 | 8876f37412755b89c67ebffd629186807871b614 | refs/heads/master | 2023-08-27T23:46:46.607876 | 2021-11-11T15:54:15 | 2021-11-11T15:54:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | from qfin.assets import Asset
class Spot(Asset):
def __init__(self, s0, rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.s0 = s0
self.rate = rate
@property
def asset_name(self):
return f"SPOT"
def generate(self):
msg = f"Generation of the paths for the model {self.model.name} is not implemented."
assert hasattr(self.model, 'paths'), msg
self.model.paths(self.paths, self.period, self.s0, self.rate, self.npaths)
| [
"lilimatic@Lilis-MacBook-Pro.local"
] | lilimatic@Lilis-MacBook-Pro.local |
3a74d29ca2e60ada74bce7805174b4c8f218a0c1 | 7e686824108f22f095a89860b235cc1267e6d32f | /test/functional/feature_rbf.py | 74e53a3b57c55646d78fff059439d3f3de393dc2 | [
"MIT"
] | permissive | alleck/Splendid | 2aace2cf675233c3c435c4eab4aedf8b32f23347 | 8ea29bda381628f954d1699a38a70c3ae3506ed9 | refs/heads/main | 2023-03-20T11:20:13.567687 | 2021-02-22T21:56:34 | 2021-02-22T21:56:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,951 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Splendid Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import SplendidTestFramework
from test_framework.util import satoshi_round, assert_raises_rpc_error, assert_equal, Decimal
from test_framework.script import CScript
from test_framework.mininode import COIN, CTransaction, CTxIn, COutPoint, CTxOut
MAX_REPLACEMENT_LIMIT = 100
def tx_to_hex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, script_pub_key=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1 * COIN
while node.getbalance() < satoshi_round((amount + fee) / COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, script_pub_key)]
tx2.rehash()
signed_tx = node.signrawtransaction(tx_to_hex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert (new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(SplendidTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"],
["-mempoolreplacement=0"]]
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1 * COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_fee_per_kb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("All Tests Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 SPL fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_n_value = 5000 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
prevout = tx0_outpoint
remaining_value = initial_n_value
chain_txids = []
while remaining_value > 1000 * COIN:
remaining_value -= 100 * COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, n_sequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = tx_to_hex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 SPL - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 30 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert (doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_n_value = 50 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
def branch(prevout, initial_value, max_txs, tree_width=5, fee_val=0.0001 * COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee_val) // tree_width
if txout_value < fee_val:
return
vout = [CTxOut(txout_value, CScript([i + 1]))
for i in range(tree_width)]
tx_data = CTransaction()
tx_data.vin = [CTxIn(prevout, n_sequence=0)]
tx_data.vout = vout
tx_hex = tx_to_hex(tx_data)
assert (len(tx_data.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx_data
_total_txs[0] += 1
txid = int(txid, 16)
for i, _ in enumerate(tx_data.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee_val=fee_val,
_total_txs=_total_txs):
yield x
fee = int(0.0001 * COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 SPL fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n - 1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.0001 * COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 2 * fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_fee_per_kb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN))
utxo2 = make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, n_sequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0))
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1b_hex = tx_to_hex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1_hex = tx_to_hex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_n_value = 10 * COIN
utxo = make_utxo(self.nodes[0], initial_n_value)
fee = int(0.0001 * COIN)
split_value = int((initial_n_value - fee) / (MAX_REPLACEMENT_LIMIT + 1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, n_sequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = tx_to_hex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT + 1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), n_sequence=0)]
tx_i.vout = [CTxOut(split_value - fee, CScript([b'a']))]
tx_i_hex = tx_to_hex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
inputs.append(CTxIn(COutPoint(txid, i), n_sequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx2b_hex = tx_to_hex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))]
tx3a_hex = tx_to_hex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), CScript([b'e']))]
tx3b_hex = tx_to_hex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), CScript([b'f']))]
tx3c_hex = tx_to_hex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))]
tx1b_hex = tx_to_hex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert (tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = tx_to_hex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert (tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
f_raw_tx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
f_raw_tx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(f_raw_tx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(f_raw_tx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
| [
"79376856+SplendidProject@users.noreply.github.com"
] | 79376856+SplendidProject@users.noreply.github.com |
06ea7004e6548c99ae12598d02b6772fe46d7dec | 417ab6024a95e97b4d2236c67e28d00e6d1defc0 | /python/fetch/s58589/video.py | ed22519cf6a9c389fddd2e76eb4a290ff89c4b7b | [] | no_license | zeus911/myconf | 11139069948f7c46f760ca0a8f1bd84df5ec4275 | 6dc7a6761ab820d6e97a33a55a8963f7835dbf34 | refs/heads/master | 2020-04-18T02:16:09.560219 | 2019-01-22T18:15:08 | 2019-01-22T18:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,392 | py | #!/usr/bin python
# -*- coding: utf-8 -*-
from baseparse import *
from urlparse import urlparse
from common import common
from urllib import unquote
import time
from fetch.profile import *
class VideoParse(BaseParse):
def __init__(self):
pass
def run(self):
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
chs = self.videoChannel()
for item in chs:
ops.inertVideoChannel(item)
print 's58589 video -- channel ok;,len=',len(chs)
dbVPN.commit()
dbVPN.close()
for item in chs:
for i in range(1, maxVideoPage):
url = item['url']
if i!=1:
url= "%s%s%s"%(item['url'].replace(".html","-pg-"),i,".html")
print url
self.videoParse(item['channel'], url)
print '解析完成 ', item['channel'], ' ---', i, '页'
time.sleep(1)
def videoChannel(self):
ahrefs = self.header()
channelList = []
for ahref in ahrefs:
obj={}
obj['name']=ahref.text
obj['url']=ahref.get('href')
obj['baseurl']=baseurl
obj['updateTime']=datetime.datetime.now()
obj['pic']=''
obj['rate']=1.2
obj['channel']=baseurl.replace("http://", "").replace("https://", "")+ahref.text
obj['showType']=3
obj['channelType']='webview'
channelList.append(obj)
return channelList
def videoParse(self, channel, url):
dataList = []
soup = self.fetchUrl(url)
metas = soup.findAll("li", {"class": "yun yun-large border-gray"})
for meta in metas:
obj = {}
ahref = meta.first("a")
mp4Url = self.parseDomVideo(ahref.get("href"))
if mp4Url == None:
print '没有mp4 文件:', ahref.get("href")
continue
obj['url'] = mp4Url
obj['pic'] = meta.first('img').get("data-original")
obj['name'] = ahref.get("title").replace(",快播,大香蕉","").replace("_chunk_1,快播云资源","").replace("成人影院","")
videourl = urlparse(obj['url'])
obj['path'] = videourl.path
obj['updateTime'] = datetime.datetime.now()
obj['channel'] = channel
if mp4Url.count("m3u8")==0 and mp4Url.count("mp4")==0:
obj['videoType'] = "webview"
else:
obj['videoType'] = "normal"
obj['baseurl'] = baseurl
print obj['name'],obj['videoType'],obj['url'],obj['pic']
dataList.append(obj)
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
for obj in dataList:
ops.inertVideo(obj,obj['videoType'],baseurl)
print 's58589 video --解析完毕 ; channel =', channel, '; len=', len(dataList), url
dbVPN.commit()
dbVPN.close()
def parseDomVideo(self, url):
try:
soup = self.fetchUrl(url, header)
div = soup.first("div",{'class':'playlist jsplist clearfix'})
if div!=None:
ahref = div.first('a')
if ahref!=None:
soup = self.fetchUrl(ahref.get('href'), header)
play_video = soup.first('div',{'class':'video-info fn-left'})
if play_video!=None:
script = play_video.first('script')
if script!=None:
text = unquote(script.text.replace("\"","").replace("\/","/"))
texts = text.split(",")
for item in texts:
match = regVideo.search(item)
if match!=None:
videoUrl =match.group(1)
return "%s%s%s"%("http",videoUrl,'m3u8')
match = regVideo2.search(item)
if match!=None:
videoUrl =match.group(1)
return videoUrl
print '没找到mp4'
return None
except Exception as e:
print common.format_exception(e)
return None
def videoParse(queue):
queue.put(VideoParse())
| [
"liguoqing19861028@163.com"
] | liguoqing19861028@163.com |
bca9b0d70711b54dbd8d5e7a546d4d8f9f3c7bc8 | 2e883bd0291df0529175a2782ae91c738ed94796 | /Beginner_Student/passingreference.py | 62f748ef5f9b5766a763f8b7ab9665667a5613fa | [] | no_license | jpquinn62/ATBS-Python | 1b560f71cd44cad479c397874b2521c688a38c1e | 66ea20050fe6a3366673062d0f0daf981688d445 | refs/heads/master | 2021-01-10T09:25:57.165214 | 2016-03-27T18:35:07 | 2016-03-27T18:35:07 | 52,700,795 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | def eggs(someParameter):
someParameter.append('Hello')
spam = [1,2,3]
eggs(spam)
print(spam)
| [
"quinn.jay@gmail.com"
] | quinn.jay@gmail.com |
395c456b453400fa986c84776abccd917bbdcf1b | 7c82bb206839fccc28e8bc0de06deeaaf3db5960 | /MachineLearning/src/ch3/01_20170403/csv-read2.py | 8b78483ccec76e69ec1d1fbf388c1f69ec41fd67 | [] | no_license | tsuki1646/Python | 7624c7ef3fd17556b3fb2a9fa8ee9017c2081f18 | bc596d2c938cffcc98947f4eca2a6de5704cfce0 | refs/heads/master | 2020-03-27T07:25:26.075076 | 2017-05-27T01:30:02 | 2017-05-27T01:30:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import csv, codecs
# Shift-JISのCSVファイルを読む
filename = "list-sjis.csv"
fp = codecs.open(filename, "r", encoding="shift-jis")
# 1行ずつ読む
reader = csv.reader(fp, delimiter=",", quotechar='"')
for c in reader:
print(c[1], c[2])
| [
"y.inagaki53@gmail.com"
] | y.inagaki53@gmail.com |
b31415e72940e35d0dea7aa1ae8c0f093b2613db | c84427345c08aa965e6323160dd81735251678bb | /p3dx_transceiver/axis_camera/nodes/axis_ptz.py | 40a54ae1500fc866085655dd56e92f191a0df391 | [] | no_license | dhrodriguezg/P3DX_Platform | 3cbaa1e2be20427a7e391381baf8f26f47d6384e | e5aef3d7fd7593a122cef6df36697cffebaf8998 | refs/heads/master | 2021-01-21T08:54:38.274352 | 2017-08-31T04:57:10 | 2017-08-31T04:57:10 | 101,960,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,447 | py | #!/usr/bin/env python
#
# Basic PTZ node, based on documentation here:
# http://www.axis.com/files/manuals/vapix_ptz_45621_en_1112.pdf
#
import threading
import urllib
import urllib2
import base64
import httplib, urllib
import rospy
from axis_camera.msg import Axis
from std_msgs.msg import Bool
from std_msgs.msg import Int32
import math
from dynamic_reconfigure.server import Server
from axis_camera.cfg import PTZConfig
class StateThread(threading.Thread):
'''This class handles the publication of the positional state of the camera
to a ROS message'''
def __init__(self, axis):
threading.Thread.__init__(self)
self.axis = axis
self.daemon = True
def run(self):
r = rospy.Rate(100)
self.msg = Axis()
while True:
self.changeCameraPosition()
r.sleep()
def changeCameraPosition(self):
self.movement = ''
if self.axis.command == 0:
self.movement = 'home'
if self.axis.command == 1:
self.movement = 'up'
if self.axis.command == 2:
self.movement = 'down'
if self.axis.command == 3:
self.movement = 'left'
if self.axis.command == 4:
self.movement = 'right'
self.url = 'http://%s/cgi-bin/viewer/camctrl.cgi?move=%s' % (self.axis.hostname, self.movement)
if self.axis.command > -1:
# create a password manager
rospy.logwarn( self.url )
chimpConfig = {
"headers" : {
"Content-Type": "application/json",
"Authorization": "Basic " + base64.encodestring("admin:admin").replace('\n', '')
},
"url": self.url}
datas = None
self.url = urllib2.Request(chimpConfig["url"], datas, chimpConfig["headers"])
try:
self.axis.command = -1
self.fp = urllib2.urlopen(self.url)
self.axis.command = -1
except urllib2.URLError, e:
rospy.logwarn('Error opening URL %s' % (self.url) + 'Possible timeout. Looping until camera appears')
class AxisPTZ:
'''This class creates a node to manage the PTZ functions of an Axis PTZ
camera'''
def __init__(self, hostname, username, password, flip, speed_control):
self.hostname = hostname
self.username = username
self.password = password
self.command = -1
self.st = None
self.sub_command = rospy.Subscriber("/camera_ptz", Int32, self.cameraPositionCallback, queue_size=1)
self.st = StateThread(self)
self.st.start()
def cameraPositionCallback(self, msg):
self.command = msg.data
def main():
rospy.init_node("axis_twist")
arg_defaults = {
'hostname': '192.168.0.90',
'username': 'ros',
'password': '',
'flip': False, # things get weird if flip=true
'speed_control': False
}
args = {}
# go through all arguments
for name in arg_defaults:
full_param_name = rospy.search_param(name)
# make sure argument was found (https://github.com/ros/ros_comm/issues/253)
if full_param_name == None:
args[name] = arg_defaults[name]
else:
args[name] = rospy.get_param(full_param_name, arg_defaults[name])
# create new PTZ object and start dynamic_reconfigure server
my_ptz = AxisPTZ(**args)
rospy.spin()
if __name__ == "__main__":
main()
| [
"dh.rodriguezg@gmail.com"
] | dh.rodriguezg@gmail.com |
71e1a5eda5f89ca8545135e293807b109c44eec7 | cb741a3d5b7c634e00f64771c37d27ca9b59c5c9 | /PycharmProjects/pythonProject1/Test02.py | 33f64edcb5246916afaa16037cc7235e7ca0d9ee | [] | no_license | aleksandra925/projekt_appium | c3e50f96788e4c8ab5bbfaffce81601b0fc16125 | c597fa604e149105014709badadf621dba03524e | refs/heads/main | 2023-06-07T13:07:14.063478 | 2021-07-03T23:11:53 | 2021-07-03T23:11:53 | 382,714,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | import unittest
import os
from appium import webdriver
from time import sleep
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class Test1Appium(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['app'] = PATH('ContactManager.apk')
desired_caps['platformName'] = 'Android'
desired_caps['deviceName'] = 'Genymotion Cloud'
desired_caps['udid'] = 'localhost:10000' # do uzupelnia gdyby nie byl staly
desired_caps['appPackages'] = 'com.example.android.contactmanager'
desired_caps['appActivity'] = 'com.example.android.contactmanager.ContactManager'
# polaczenie z Appium
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.driver.implicitly_wait(2)
def tearDown(self):
self.driver.quit()
def testFormApp(self):
self.driver.is_app_installed('com.example.android.contactmanager')
self.driver.find_element_by_id('com.example.android.contactmanager:id/addContactButton').click()
textfields = self.driver.find_elements_by_class_name('android.widget.EditText')
textfields[0].send_keys('Tomek z Warszawy')
textfields[1].send_keys('222333444')
textfields[2].send_keys('tom@wsbwawa.pl')
sleep(2)
# asercja
self.assertEqual('Tomek z Warszawy', textfields[0].text)
self.assertEqual('222333444', textfields[1].text)
self.assertEqual('tom@wsbwawa.pl', textfields[2].text)
# printy dydaktyczne
print(textfields[0])
print(textfields[0].text)
if __name__ == 'main':
suite = unittest.TestLoader().loadTestsFromTestCase(Test1Appium)
unittest.TextTestRunner(verbosity=2).run(suite) | [
"aleksandra925@users.noreply.github.com"
] | aleksandra925@users.noreply.github.com |
ddc286841bea7b56b520bff0925ecac10bed0530 | b7aa9d9dfa1f98ccd779e5ea64f8fd1d96a16cdf | /2020.2021/07_KNF_DPLL_SAT/Marko/cnf.py | 6f1134cd257adc52ee1bb00f3a584b86d340d933 | [] | no_license | matfvi/vi | b628983e31332b5a79047ce20203716c6ec4ce1f | d20ec947b359d84418d6aa05afa91eadecde3879 | refs/heads/master | 2023-05-25T14:02:33.791302 | 2023-05-20T16:20:39 | 2023-05-20T16:20:39 | 122,360,903 | 46 | 79 | null | 2020-06-01T20:12:11 | 2018-02-21T16:22:20 | HTML | UTF-8 | Python | false | false | 1,100 | py |
class CNF:
def __init__(self):
self.clauses = []
self.number_to_var_name = {}
self.var_name_to_number = {}
def add_clause(self, clause):
for literal in clause:
var_name = literal.strip('-')
if var_name not in self.var_name_to_number:
var_number = len(self.var_name_to_number) + 1
self.var_name_to_number[var_name] = var_number
self.number_to_var_name[var_number] = var_name
self.clauses.append(clause)
def dimacs(self):
result = f'p cnf {len(self.number_to_var_name)} {len(self.clauses)}\n'
for clause in self.clauses:
for literal in clause:
var_name = literal.strip('-')
if literal[0] == '-':
result += '-'
result += f'{self.var_name_to_number[var_name]} '
result += '0\n'
return result
def get_var_name(self, number: int):
return self.vars[number]
def get_var_number(self, name: str):
return self.var_name_to_number[name] | [
"spaskeasm@gmail.com"
] | spaskeasm@gmail.com |
d1bb3ed4301c2d4059d9318b9cec14819f0c700d | 473415cb4ee038cd2056a4502e7b25fb443e1518 | /src/olympe/_private/pomp_loop_thread.py | 7a84f4bbcdc72a25efebe6272f938fbb44e0df2f | [
"BSD-3-Clause"
] | permissive | wahyurahmaniar/olympe | 4b5eac08139da2aa060c13fe719c8a1fdbcd6974 | 0cf65bc971024b6cf03ad1c0a662aea452761534 | refs/heads/master | 2020-09-11T17:13:53.369812 | 2019-09-26T11:50:13 | 2019-09-26T11:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,314 | py | # -*- coding: UTF-8 -*-
# Copyright (C) 2019 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
from future.builtins import str
import concurrent.futures
import ctypes
import logging
import olympe_deps as od
import select
import threading
import traceback
try:
from itertools import ifilter as filter
except ImportError:
# python3
pass
logger = logging.getLogger("concurrent.futures")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def func():
logger.warning('Something happened!')
class Future(concurrent.futures.Future):
"""
A chainable Future class
"""
def __init__(self, loop):
super(Future, self).__init__()
self._loop = loop
def _register(self):
self._loop._register_future(id(self))
self.add_done_callback(lambda _: self._loop._unregister_future(id(self)))
def __del__(self):
self._loop._unregister_future(id(self), ignore_error=True)
def set_from(self, source):
if source.cancelled():
self.cancel()
return
if self.done():
return
if not self.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
self.set_exception(exception)
else:
result = source.result()
self.set_result(result)
def chain(self, next_):
self.add_done_callback(lambda _: next_.set_from(self))
def then(self, fn, deferred=False):
result = Future(self._loop)
result._register()
def callback(_):
try:
if deferred:
temp = self._loop.run_later(fn, self.result())
temp.chain(result)
elif not threading.current_thread() is self._loop:
temp = self._loop.run_async(fn, self.result())
temp.chain(result)
else:
result.set_result(fn(self.result()))
except Exception as e:
result.set_exception(e)
except:
result.cancel()
self.add_done_callback(callback)
return result
def result_or_cancel(self, timeout=None):
try:
return self.result(timeout=timeout)
except:
self.cancel()
raise
class PompLoopThread(threading.Thread):
"""
Class running a pomp loop in a pomp thread.
It performs all calls to pomp and arsdk-ng within the loop (except init and destruction)
"""
def __init__(self, logging):
self.logging = logging
self.running = True
self.pomptimeout_ms = 100
self.async_pomp_task = list()
self.deferred_pomp_task = list()
self.wakeup_evt = od.pomp_evt_new()
self.pomp_events = dict()
self.pomp_event_callbacks = dict()
self.pomp_loop = None
self.pomp_timers = {}
self.pomp_timer_callbacks = {}
self.userdata = dict()
self.c_userdata = dict()
self.cleanup_functions = []
self.futures = []
self._create_pomp_loop()
super(PompLoopThread, self).__init__()
def destroy(self):
# stop the thread
self.stop()
# remove all fds from the loop
self._destroy_pomp_loop_fds()
# remove all timers from the loop
self._destroy_pomp_loop_timers()
# destroy the loop
self._destroy_pomp_loop()
def stop(self):
"""
Stop thread to manage commands send to the drone
"""
self.running = False
if threading.current_thread().ident != self.ident:
self._wake_up()
self.join()
def run_async(self, func, *args, **kwargs):
"""
Fills in a list with the function to be executed in the pomp thread
and wakes up the pomp thread.
"""
future = Future(self)
future._register()
self.async_pomp_task.append((future, func, args, kwargs))
self._wake_up()
return future
def run_later(self, func, *args, **kwargs):
"""
Fills in a list with the function to be executed later in the pomp thread
"""
future = Future(self)
future._register()
self.deferred_pomp_task.append((future, func, args, kwargs))
return future
def _wake_up_event_cb(self, pomp_evt, _userdata):
"""
Callback received when a pomp_evt is triggered.
"""
# the pomp_evt is acknowledged by libpomp
def _run_task_list(self, task_list):
"""
execute all pending functions located in the task list
this is done in the order the list has been filled in
"""
while len(task_list):
future, f, args, kwargs = task_list.pop(0)
try:
ret = f(*args, **kwargs)
except Exception as e:
traceback.print_exc()
self._unregister_future(future, ignore_error=True)
future.set_exception(e)
continue
if not isinstance(ret, concurrent.futures.Future):
future.set_result(ret)
else:
ret.chain(future)
def run(self):
"""
Thread's main loop
"""
self._add_event_to_loop(
self.wakeup_evt, lambda *args: self._wake_up_event_cb(*args))
# We have to monitor the main thread exit. This is the simplest way to
# let the main thread handle the signals while still being able to
# perform some cleanup before the process exit. If we don't monitor the
# main thread, this thread will hang the process when the process
# receive SIGINT (or any other non fatal signal).
main_thread = next(filter(
lambda t: t.name == "MainThread",
threading.enumerate()
))
try:
while self.running and main_thread.is_alive():
try:
self._wait_and_process()
except RuntimeError as e:
self.logging.logE('Exception caught: %s.' % e)
self._run_task_list(self.async_pomp_task)
self._run_task_list(self.deferred_pomp_task)
finally:
# Perform some cleanup before this thread dies
self._cleanup()
self.destroy()
def _wait_and_process(self):
od.pomp_loop_wait_and_process(self.pomp_loop, self.pomptimeout_ms)
def _wake_up(self):
if self.wakeup_evt:
od.pomp_evt_signal(self.wakeup_evt)
def add_event_to_loop(self, *args, **kwds):
"""
Add a pomp event to the loop
"""
self.run_async(self._add_event_to_loop, *args, **kwds)
def _add_event_to_loop(self, pomp_evt, cb, userdata=None):
evt_id = id(pomp_evt)
self.pomp_events[evt_id] = pomp_evt
self.pomp_event_callbacks[evt_id] = od.pomp_evt_cb_t(cb)
self.userdata[evt_id] = userdata
userdata = ctypes.cast(ctypes.pointer(ctypes.py_object(userdata)), ctypes.c_void_p)
self.c_userdata[evt_id] = userdata
res = od.pomp_evt_attach_to_loop(
pomp_evt,
self.pomp_loop,
self.pomp_event_callbacks[evt_id],
userdata
)
if res != 0:
raise RuntimeError('Cannot add eventfd to pomp loop')
def remove_event_from_loop(self, *args, **kwds):
"""
Remove a pomp event from the loop
"""
self.run_later(self._remove_event_from_loop, *args, **kwds)
def _remove_event_from_loop(self, pomp_evt):
evt_id = id(pomp_evt)
self.userdata.pop(evt_id, None)
self.c_userdata.pop(evt_id, None)
self.pomp_event_callbacks.pop(evt_id, None)
if self.pomp_events.pop(evt_id, None) is not None:
if od.pomp_evt_detach_from_loop(pomp_evt, self.pomp_loop) != 0:
self.logging.logE('Cannot remove event "%s" from pomp loop' % evt_id)
def _destroy_pomp_loop_fds(self):
evts = list(self.pomp_events.values())[:]
for evt in evts:
self._remove_event_from_loop(evt)
def _create_pomp_loop(self):
self.logging.logI('Creating pomp loop')
self.pomp_loop = od.pomp_loop_new()
if self.pomp_loop is None:
raise RuntimeError('Cannot create pomp loop')
def _destroy_pomp_loop(self):
if self.pomp_loop is not None:
res = od.pomp_loop_destroy(self.pomp_loop)
if res != 0:
self.logging.logE(
"Error while destroying pomp loop: {}".format(res))
return False
else:
self.logging.logI("Pomp loop has been destroyed")
self.pomp_loop = None
return True
def create_timer(self, callback):
self.logging.logI('Creating pomp timer')
pomp_callback = od.pomp_timer_cb_t(
lambda *args: callback(*args))
pomp_timer = od.pomp_timer_new(
self.pomp_loop, pomp_callback, None)
if pomp_timer is None:
raise RuntimeError('Unable to create pomp timer')
self.pomp_timers[id(pomp_timer)] = pomp_timer
self.pomp_timer_callbacks[id(pomp_timer)] = pomp_callback
return pomp_timer
def set_timer(self, pomp_timer, delay, period):
res = od.pomp_timer_set_periodic(pomp_timer, delay, period)
return res == 0
def clear_timer(self, pomp_timer):
res = od.pomp_timer_clear(pomp_timer)
return res == 0
def destroy_timer(self, pomp_timer):
if id(pomp_timer) not in self.pomp_timers:
return False
res = od.pomp_timer_destroy(pomp_timer)
if res != 0:
self.logging.logE(
"Error while destroying pomp loop timer: {}".format(res))
return False
else:
del self.pomp_timers[id(pomp_timer)]
del self.pomp_timer_callbacks[id(pomp_timer)]
self.logging.logI("Pomp loop timer has been destroyed")
return True
def _destroy_pomp_loop_timers(self):
pomp_timers = list(self.pomp_timers.values())[:]
for pomp_timer in pomp_timers:
self.destroy_timer(pomp_timer)
def register_cleanup(self, fn):
self.cleanup_functions.append(fn)
def unregister_cleanup(self, fn, ignore_error=False):
try:
self.cleanup_functions.remove(fn)
except ValueError:
if not ignore_error:
raise
def _cleanup(self):
# Execute cleanup functions
for cleanup in reversed(self.cleanup_functions):
try:
cleanup()
except Exception as e:
self.logging.logE("Error in cleanup function {}".format(str(e)))
self.cleanup_functions = []
# Execute asynchronous cleanup actions
count = 0
while self.async_pomp_task or self.deferred_pomp_task or self.futures:
self._wait_and_process()
self._run_task_list(self.async_pomp_task)
self._run_task_list(self.deferred_pomp_task)
if count > 30:
self.logging.logE('Deferred cleanup action are still pending after 3s')
break
count += 1
self.async_pomp_task = []
self.deferred_pomp_task = []
self.futures = []
def _register_future(self, f):
self.futures.append(f)
def _unregister_future(self, f, ignore_error=False):
try:
self.futures.remove(f)
except ValueError:
if not ignore_error:
raise
| [
"nicolas.dessart@parrot.com"
] | nicolas.dessart@parrot.com |
ef0dd796c9cb6f41e96663d4f8b3787ddfcb2c4b | 4314a813434328d3b92bb002fefe329acfdf5b7c | /session3/hw/ccif.py | 20c4d2922be23ce3b9c798aed3e08306b336b43c | [] | no_license | dotiendat2301/DATD4E11 | 0430264ecbc75be6dd83648c5be2924eedf3ed3d | 75ec9ad27ad770ee97e73de238b9dc6e8824ea3f | refs/heads/master | 2022-08-29T06:14:38.326752 | 2020-05-23T14:19:52 | 2020-05-23T14:19:52 | 264,968,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | c=int(input("Enter The Temprature In Cencius :"))
f= 32 + c*1.8
print(c,"(C)=",f,"(F)") | [
"dotiendat2301@gmail.com"
] | dotiendat2301@gmail.com |
e89fd12c00e917aec1603917c8b784f4c0856fc4 | 2c271619e27d516dd9e6a19c32130e6fcc500d60 | /linearbag.py | 26c256ccab66a42d4074043ca37ef0396cfac72c | [] | no_license | RevolF/dataStructAndAlgosInPython | 5780db89457b114b4a3f0d7cc69940184b0b12a6 | d69c0f04de279b85fee558e2ebb8cea77753a138 | refs/heads/master | 2021-01-18T06:42:53.470643 | 2017-04-19T05:44:40 | 2017-04-19T05:44:40 | 84,284,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 27 16:01:01 2017
@author: thor
"""
#==============================================================================
# use __str__ and __repr__ for print class
#==============================================================================
class Bag:
def __init__(self):
self._theItems=list()
def __len__(self):
return len(self._theItems)
def __contains__(self,item):
return item in self._theItems
def __str__(self):
return str(self._theItems)
def __repr__(self):
return str(self)
def add(self,item):
self._theItems.append(item)
def remove(self,item):
assert item in self._theItems, 'the item must in the bag'
ndx=self._theItems.index(item)
return self._theItems.pop(ndx)
def __iter__(self,item):
return _BagIterator(self._theItems)
class _BagIterator:
def __init__(self,theList):
self._bagItems=theList
self._curItem=0
def __iter__(self):
return self
def __next__(self):
if self._curItem<len(self._bagItems):
item=self._bagItems[self._curItem]
self._curItem+=1
return item
else:
raise StopIteration
def test():
myBag=Bag()
myBag.add(1)
iterator=myBag.__iter__()
while True:
try:
item=iterator.__next__()
print item
except StopIteration:
break
| [
"929042297@qq.com"
] | 929042297@qq.com |
46704702b85011345fc39dacbe1433db96bfee18 | 34932f68b9878081748d96f267bd7a8359c24ffc | /code/derivatives.py | 4acdd9ae7c4a81771d706b2786c1eb10623caf02 | [] | no_license | rossfadely/wfc3psf | 388160cd692d77e4db24668a924f12004099d572 | b0ac9fd1ed993f250cd1923d6a4ca16dd7f42a70 | refs/heads/master | 2020-06-04T08:54:15.044796 | 2014-12-15T20:40:38 | 2014-12-15T20:40:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,859 | py | import multiprocessing
import numpy as np
from patch_fitting import eval_nll, make_background, evaluate
from generation import render_psfs
def get_derivatives(data, dq, shifts, psf_model, old_nlls, fit_parms, masks,
parms):
"""
Calculate the derivatives of the objective (in patch_fitting)
with respect to the psf model.
"""
# derivative of regularization term
old_reg, reg_term = reg(psf_model, parms)
# calculate derivative of nll term
pool = multiprocessing.Pool(parms.Nthreads)
mapfn = pool.map
steps = psf_model.copy() * parms.h
argslist = [None] * parms.Ndata
for i in range(parms.Ndata):
argslist[i] = (data[i], shifts[None, i], psf_model, old_nlls[i],
fit_parms[i], masks[i], steps, parms)
results = list(mapfn(one_datum_nll_diff, [args for args in argslist]))
Neff = 0
derivatives = np.zeros_like(psf_model)
for i in range(parms.Ndata):
derivatives += results[i]
if np.any(results[i][0] != 0.0):
Neff += 1
if Neff == 0:
derivatives = np.zeros_like(psf_model)
else:
derivatives /= Neff
derivatives += reg_term
# tidy up
pool.close()
pool.terminate()
pool.join()
return derivatives, old_reg
def reg(psf_model, parms):
"""
Regularization and derivative.
"""
eps = parms.eps
if (eps is None):
return np.zeros_like(psf_model)
psf_shape = psf_model.shape
d = np.zeros_like(psf_model)
r = np.zeros_like(psf_model)
for i in range(psf_shape[0]):
for j in range(psf_shape[1]):
if i > 0:
r[i, j] += (psf_model[i, j] - psf_model[i - 1, j]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i - 1, j])
if j > 0:
r[i, j] += (psf_model[i, j] - psf_model[i, j - 1]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j - 1])
if i < psf_shape[0] - 1:
r[i, j] += (psf_model[i, j] - psf_model[i + 1, j]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i + 1, j])
if j < psf_shape[1] - 1:
r[i, j] += (psf_model[i, j] - psf_model[i, j + 1]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j + 1])
r *= eps
d *= eps
return r, d
def regularization_derivative(psf_model, parms):
"""
Compute derivative of regularization wrt the psf.
"""
# old regularization
old_reg = local_regularization((psf_model, parms, None))
# Map to the processes
pool = multiprocessing.Pool(parms.Nthreads)
mapfn = pool.map
# compute perturbed reg
hs = parms.h * psf_model.copy()
argslist = [None] * parms.psf_model_shape[0] * parms.psf_model_shape[1]
for i in range(parms.psf_model_shape[0]):
for j in range(parms.psf_model_shape[1]):
idx = i * parms.psf_model_shape[1] + j
tmp_psf = psf_model.copy()
tmp_psf[i, j] += hs[i, j]
argslist[idx] = (tmp_psf, parms, (i, j))
new_reg = np.array((mapfn(local_regularization,
[args for args in argslist])))
new_reg = new_reg.reshape(parms.psf_model_shape)
# tidy up
pool.close()
pool.terminate()
pool.join()
return (new_reg - old_reg) / hs, old_reg
def one_datum_nll_diff((datum, shift, psf_model, old_nll, fitparms, mask,
steps, parms)):
"""
Calculate the derivative for a single datum using forward differencing.
"""
# if not enough good pixels, discard patch
min_pixels = np.ceil(parms.min_frac * datum.size)
if datum[mask].size < min_pixels:
return np.zeros_like(psf_model)
# background model
if parms.background == 'linear':
N = np.sqrt(psf_model.size).astype(np.int)
x, y = np.meshgrid(range(N), range(N))
A = np.vstack((np.ones_like(psf), np.ones_like(psf),
x.ravel(), y.ravel())).T
bkg = make_background(datum, A, fitparms, parms.background)
elif parms.background == None:
bkg = 0.0
else:
bkg = fitparms[-1]
# calculate the difference in nll, tweaking each psf parm.
steps = parms.h * psf_model
deriv = np.zeros_like(psf_model)
for i in range(parms.psf_model_shape[0]):
for j in range(parms.psf_model_shape[1]):
temp_psf = psf_model.copy()
temp_psf[i, j] += steps[i, j]
psf = render_psfs(temp_psf, shift, parms.patch_shape,
parms.psf_grid)[0]
model = fitparms[0] * psf + bkg
diff = eval_nll(datum[mask], model[mask], parms) - old_nll[mask]
deriv[i, j] = np.sum(diff) / steps[i, j]
return deriv
def local_regularization((psf_model, parms, idx)):
"""
Calculate the local regularization for each pixel.
"""
eps = parms.eps
gamma = parms.gamma
if (eps is None):
if idx is None:
return np.zeros_like(psf_model)
else:
return 0.0
pm = np.array([-1, 1])
psf_shape = psf_model.shape
reg = np.zeros_like(psf_model)
if idx is None:
# axis 0
idx = np.arange(psf_shape[0])
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # boundary foo
ind[ind == psf_shape[0]] = psf_shape[0] - 1 # boundary foo
for i in range(psf_shape[1]):
diff = psf_model[ind, i] - psf_model[idx, i][:, None]
reg[:, i] += eps * np.sum(diff ** 2., axis=1)
# axis 1
idx = np.arange(psf_shape[1])
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # boundary foo
ind[ind == psf_shape[1]] = psf_shape[1] - 1 # boundary foo
for i in range(psf_shape[0]):
diff = psf_model[i, ind] - psf_model[i, idx][:, None]
reg[i, :] += eps * np.sum(diff ** 2., axis=1)
# l2 norm
#reg += gamma * psf_model ** 2.
# floor
#reg += 1.e-1 / (1. + np.exp((psf_model - 4e-5) * 2.e5))
else:
idx = np.array(idx)
value = psf_model[idx[0], idx[1]]
# axis 0
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # lower edge case
ind[ind == psf_shape[0]] = psf_shape[0] - 1 # upper edge case
diff = psf_model[ind[0], idx[1]] - value
reg = eps * np.sum(diff ** 2.)
# axis 1
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # lower edge case
ind[ind == psf_shape[1]] = psf_shape[1] - 1 # upper edge case
diff = psf_model[idx[0], ind[1]] - value
reg += eps * np.sum(diff ** 2.)
# l2 norm
#reg += gamma * value ** 2.
# floor
#reg += 1.e-1 / (1. + np.exp((value - 4e-5) * 2.e5) )
return reg
| [
"rossfadely@gmail.com"
] | rossfadely@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.