blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10f0e4d1efac1de782eb9787d553aaa17c0cf94c
|
2976c5e9c534ace3e25674ace113ed3d920cc05c
|
/progress_bar/tests/test_progress_bar.py
|
084f7792e05ff5156a81e9757bd5cc0406e94a1d
|
[] |
no_license
|
nstanger/process_podcast
|
cc1d5d3813dc002b52a2cf169bf2d378bfb83952
|
16761c7dc62b036decedd67c24529d198b9d2a85
|
refs/heads/master
| 2021-08-28T15:47:49.280134
| 2021-08-23T10:30:30
| 2021-08-23T10:30:30
| 68,779,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
from contextlib import contextmanager
from io import StringIO
import sys
import unittest
from progress_bar import ProgressBar
@contextmanager
def captured_output():
"""Capture stdout and stderr so we can assert against them."""
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class ProgressBarTestCase(unittest.TestCase):
"""Test the ProgressBar class."""
DEFAULT_VALUE = 0
DEFAULT_MAX_VALUE = 100
DEFAULT_PRINT_WIDTH = 50
DEFAULT_NEWLINE = "\r"
def setUp(self):
"""Set up for test."""
self.bar = ProgressBar(quiet=True)
def tearDown(self):
"""Clean up after test."""
self.bar = None
def test_defaults(self):
"""Test default values are correct."""
test_data = (
(self.bar.value, self.DEFAULT_VALUE,
"default value = {v}".format(v=self.DEFAULT_VALUE)),
(self.bar.initial_value, self.DEFAULT_VALUE,
"default initial value = {v}".format(v=self.DEFAULT_VALUE)),
(self.bar.max_value, self.DEFAULT_MAX_VALUE,
"default max value = {v}".format(v=self.DEFAULT_MAX_VALUE)),
(self.bar.print_width, self.DEFAULT_PRINT_WIDTH,
"default print width = {v}".format(v=self.DEFAULT_PRINT_WIDTH)),
(self.bar.newline, self.DEFAULT_NEWLINE,
"default newline = {v}".format(v=self.DEFAULT_NEWLINE)),
)
for actual, expected, description in test_data:
with self.subTest(msg=description):
self.assertEqual(actual, expected)
def test_set_and_reset(self):
"""Test setting and resetting the current value."""
self.bar.set(value=50)
self.assertEqual(self.bar.value, 50)
self.bar.reset()
self.assertEqual(self.bar.value, 0)
def test_update_ad_draw(self):
"""Test updating and drawing the progress bar."""
self.bar.reset()
self.bar.quiet = False
for i in range(self.bar.initial_value, self.bar.max_value + 1):
percent = int(i * 100 / self.bar.max_value)
dots = int(i * self.bar.print_width / self.bar.max_value)
expected_bar = "{nl}[{c}{nc}] {p}% ".format(
c="".join(["+"] * dots),
nc="".join(["."] * (self.bar.print_width - dots)),
p=percent, nl=self.bar.newline)
with captured_output() as (out, _):
self.bar.update(i)
with self.subTest(msg="value = {v}".format(v=i)):
self.assertEqual(self.bar.value, i)
with self.subTest(msg="output = {v}".format(v=expected_bar)):
self.assertEqual(out.getvalue(), expected_bar)
|
[
"nigel.stanger@otago.ac.nz"
] |
nigel.stanger@otago.ac.nz
|
5abbfb92bebe2926a9fbaeed80d3c343edc5ecd4
|
9be74fed44caef7bd9a8913b872911564cbdd5f2
|
/BaseDeDatos/Semana4/call_terceraui.py
|
7414cc1d7696b8bfbd8453d90f70ae0328333a78
|
[] |
no_license
|
Genshzkan/ToulouseLautrec
|
c19e8572620728d69857233ec2e2dfa23f63a007
|
33ff93bfadf65d0ae13e4c6e6c61c9fcfd22c3bc
|
refs/heads/master
| 2022-10-10T18:21:01.061019
| 2020-06-14T15:15:24
| 2020-06-14T15:15:24
| 260,724,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
import sqlite3, sys
from PyQt5.QtWidgets import QDialog, QApplication
from sqlite3 import Error
from terceraui import *
tabledefinition=""
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.pushButtonCreateTable.clicked.connect(self.createTable)
self.ui.pushButtonAddColumn.clicked.connect(self.addColumns)
self.show()
def addColumns(self):
global tabledefinition
if tabledefinition=="":
tabledefinition="CREATE TABLE IF NOT EXISTS "+ self.ui.lineEditTableName.text()+" ("+ self.ui.lineEditColumnName.text()+" "+ self.ui.comboBox.itemText(self.ui.comboBox.currentIndex())
else:
tabledefinition+=","+self.ui.lineEditColumnName.text()+" "+ self.ui.comboBox.itemText(self.ui.comboBox.currentIndex())
self.ui.lineEditColumnName.setText("")
self.ui.lineEditColumnName.setFocus()
def createTable(self):
global tabledefinition
try:
conn = sqlite3.connect(self.ui.lineEditDBName.text()+".db")
self.ui.labelResponse.setText("Database is connected")
c = conn.cursor()
tabledefinition+=");"
c.execute(tabledefinition)
self.ui.labelResponse.setText("Table is successfully created")
except Error as e:
self.ui.labelResponse.setText("Error in creating table")
finally:
conn.close()
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
Genshzkan.noreply@github.com
|
28ec052e9c58a50f9db14275c3fe505405877f48
|
dd098f8a93f787e38676283679bb39a290ba28b4
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_models/test_anyof.py
|
197c3449a9e49196e0d0dc3b0844ab50910bddba
|
[
"Apache-2.0"
] |
permissive
|
InfoSec812/openapi-generator
|
727c0235d3bad9b85ac12068808f844287af6003
|
e0c72702c3d5dae2a627a2926f0cddeedca61e32
|
refs/heads/master
| 2022-10-22T00:31:33.318867
| 2022-08-20T14:10:31
| 2022-08-20T14:10:31
| 152,479,633
| 1
| 0
|
Apache-2.0
| 2023-09-04T23:34:09
| 2018-10-10T19:38:43
|
Java
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import unittest
import unit_test_api
from unit_test_api.model.anyof import Anyof
from unit_test_api import configuration
class TestAnyof(unittest.TestCase):
"""Anyof unit test stubs"""
_configuration = configuration.Configuration()
def test_second_anyof_valid_passes(self):
# second anyOf valid
Anyof._from_openapi_data(
2.5,
_configuration=self._configuration
)
def test_neither_anyof_valid_fails(self):
# neither anyOf valid
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
Anyof._from_openapi_data(
1.5,
_configuration=self._configuration
)
def test_both_anyof_valid_passes(self):
# both anyOf valid
Anyof._from_openapi_data(
3,
_configuration=self._configuration
)
def test_first_anyof_valid_passes(self):
# first anyOf valid
Anyof._from_openapi_data(
1,
_configuration=self._configuration
)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
InfoSec812.noreply@github.com
|
35197901f27c1fbf3371fa2e0dd9b5bb490cac04
|
43c653032caee6e440c4a62ca953a43159872652
|
/bin/scripts/entities/counter.py
|
e43048d5d3fdd457815e8834532009cd107424bd
|
[] |
no_license
|
KentMadsen/Private-Scraper
|
a39b23c552b86dba9629374be4500d1ec853d1a4
|
2230c0e2b79cc99389a344069a44265068dff029
|
refs/heads/master
| 2020-08-07T13:04:51.232139
| 2019-10-08T14:01:05
| 2019-10-08T14:01:05
| 213,461,958
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
zero = 0
movement_size = 1
class Counter:
global zero, movement_size
""" """
def __init__(self):
self.value = zero
self.movement = movement_size
def increase(self, variable_value):
self.value = self.value + variable_value
def increment(self):
self.increase(self.movement)
def decrease(self, variable_value):
self.value = self.value - variable_value
def decrement(self):
self.decrease(self.movement)
def reset(self):
self.set_value(zero)
def is_zero(self):
return self.get_value() == zero
def get_value(self):
return self.value
def set_value(self, variable_value):
self.value = variable_value
return self.value
|
[
"20760795+KentMadsen@users.noreply.github.com"
] |
20760795+KentMadsen@users.noreply.github.com
|
c6c2dc5894b4ae4053e691e30c01a290561dc8d3
|
228b514d129506adffb354314d8ce1b2c382aabb
|
/core/migrations/0004_auto_20211020_1615.py
|
3d5f3b7c2ce2e7bcf269cee07677f94e7a24842d
|
[] |
no_license
|
sannjayy/chat-support-django
|
1bab78d28380d3e86db61187af086408d85e6878
|
7473632d6534afab22dad1b3b7c17a5afda0983f
|
refs/heads/master
| 2023-08-31T09:53:08.316121
| 2021-10-27T12:36:05
| 2021-10-27T12:36:05
| 418,448,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# Generated by Django 3.2.8 on 2021-10-20 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20211019_1601'),
]
operations = [
migrations.AddField(
model_name='user',
name='mobile',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Mobile number'),
),
migrations.AddField(
model_name='user',
name='whatsapp',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Whatsapp number'),
),
]
|
[
"znasofficial@gmail.com"
] |
znasofficial@gmail.com
|
6fd20aa2da6073c474f234458c001826634eb8e6
|
356a3a5484f6e508b071fcad318ca3c28eff8589
|
/examples/example_depth.py
|
3aedc0dc3d84cb09d106474b828b166a5e366d5b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
HighLordGame/FaceEngine
|
0fe20d5f3a7480912627266c1fb219ca1e30fbbb
|
7315e413b03a2234017b20a1fbe85b567397e81f
|
refs/heads/master
| 2023-06-07T22:04:23.351402
| 2021-06-25T04:19:45
| 2021-06-25T04:19:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
import sys
import argparse
import cv2
import numpy as np
#Parse arguments and load additional modules
parser = argparse.ArgumentParser(description = 'Luna SDK realsense depth demo')
group = parser.add_argument_group('Required arguments')
group.add_argument('--data', required = True, help = 'absolute path to Luna SDK data directory')
group.add_argument('--bindPath', required = True, help = 'absolute path to Luna SDK bindings directory')
group.add_argument('--rsbindPath', required = True, help = 'absolute path to realsense bindings directory')
args = parser.parse_args()
print("Arguments are: data: {} bindPath: {} rsbindPath: {}".format(args.data, args.bindPath, args.rsbindPath))
sys.path += (args.bindPath, args.rsbindPath)
import FaceEngine as fe
import pyrealsense2 as rs
if __name__ == "__main__":
#create Face Engine root object
root = fe.createFaceEngine(args.data)
#check license
license = root.getLicense()
if not root.activateLicense(license, args.data + "/license.conf"):
print("Failed to activate license!")
exit(-1)
detector = root.createDetector(fe.FACE_DET_V1)
estimator = root.createDepthEstimator()
warper = root.createWarper()
# Configure streams and start the pipeline
pipe = rs.pipeline()
pipeProfile = pipe.start()
device = pipeProfile.get_device()
print(device)
# Since realsense depth sensors contain pixel values in hardware specific units
# we have to manually convert it to milimeters for depth estimator to work properly
depthSensor = device.first_depth_sensor()
depthScale = depthSensor.get_depth_scale() * 1000
print("Depth scale is {}".format(depthScale))
#For warper to work properly, we need to align both stream viewports.
align_to = rs.stream.color
depthToColorAlignment = rs.align(align_to)
#create window
cv2.namedWindow('Realsense depth example', cv2.WINDOW_AUTOSIZE)
# main processing loop
try:
while True:
# Get frameset of color and depth
frames = pipe.wait_for_frames()
# Align the depth frame to color frame
alignedFrames = depthToColorAlignment.process(frames)
# Get aligned frames
depthFrame = alignedFrames.get_depth_frame()
rgbFrame = alignedFrames.get_color_frame()
# Validate that both frames are valid
if not depthFrame or not rgbFrame:
continue
#convert depth map to milimeters
depthFrame = np.asanyarray(depthFrame.get_data())
depthFrame = (depthFrame * depthScale).astype(np.int16)
rgbFrame = np.asanyarray(rgbFrame.get_data())
#convert incoming frames to SDK images
depthImage = fe.Image()
depthImage.setData(depthFrame, fe.FormatType.R16)
rgbImage = fe.Image()
rgbImage.setData(rgbFrame, fe.FormatType.R8G8B8)
#perform detection
err, face = detector.detectOne(rgbImage, rgbImage.getRect(), fe.DetectionType(fe.dt5Landmarks))
#prepare cv image for visualisation
cvRGBImage = cv2.cvtColor(rgbFrame, cv2.COLOR_BGR2RGB)
if(err.isError):
print("Failed to detect!")
else:
if(face.isValid() and face.landmarks5_opt.isValid()):
#warp depth image
transformation = warper.createTransformation(face.detection, face.landmarks5_opt.value())
warpResult, warpImage = warper.warp(depthImage, transformation)
if warpResult.isError:
print("Failed to warp image!")
#perform depth liveness estimation
error, output = estimator.estimate(warpImage)
if(error.isOk):
color = (0, 255, 0) if output.isReal else (0, 0, 255)
box = face.detection.rect
#draw bbox
cv2.rectangle(cvRGBImage,
(int(box.x), int(box.y)),
(int(box.x + box.width), int(box.y + box.height)),
color, 2)
print(output)
cv2.imshow('Realsense depth example', cvRGBImage)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
pipe.stop()
|
[
"a.balashov@visionlabs.ru"
] |
a.balashov@visionlabs.ru
|
2e3d23bb4aa493a361704b390ed48ae2ea9b0542
|
0ccfa5a1144faab1377fa9dd74867155edf013d4
|
/BP.py
|
597e358608a8903afed15ca9f53f9aad94a7b8a2
|
[] |
no_license
|
jarangol/ML
|
e8b6559d8870587bc1c98330b19714040fba6d4e
|
7f5a26de1ad69d9768603603b72c70273a40f7a2
|
refs/heads/master
| 2020-04-21T22:40:57.874888
| 2019-04-07T00:33:36
| 2019-04-07T00:33:36
| 169,920,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[16]:
import numpy as np
class ANN():
def __init__(self, nn_structure, eta):
self.nn_structure = nn_structure
self.eta = eta
def set_weights(self,weights,b):
self.W = weights
self.b = b
def f(self,x):
return 1 / (1 + np.exp(-x))
def f_deriv(self,x):
return self.f(x) * (1 - self.f(x))
def feed_forward(self,x):
h = {1:x}
z = {}
for l in range(1,len(self.W)+1):
if l == 1:
node_in = x
else:
node_in = h[l]
z[l+1] = self.W[l].dot(node_in) + self.b[l]
h[l+1] = self.f(z[l+1])
return h, z
def init_tri_values(self):
tri_W = {}
tri_b = {}
for l in range(1,len(self.nn_structure)):
tri_W[l] = np.zeros((self.nn_structure[l], self.nn_structure[l-1]))
tri_b[l] = np.zeros((self.nn_structure[l],))
return tri_W, tri_b
def calculate_out_layer_delta(self,y,h_out,z_out):
return -(y-h_out)*self.f_deriv(z_out)
def calculate_hidden_delta(self,delta_plus_1,w_l,z_l):
return np.dot(np.transpose(w_l),delta_plus_1) * self.f_deriv(z_l)
def fit(self, X, y, iter_num= 10000):
print("Entrenando..")
cnt = 0
m = len(y)
avg_cost_func = []
while cnt < iter_num:
if cnt%1000 == 0:
print('Iteration {} of {}'.format(cnt, iter_num))
tri_W, tri_b = self.init_tri_values()
avg_cost = 0
for i in range(len(y)):
delta = {}
h, z = self.feed_forward(X[i,:])
for l in range(len(self.nn_structure), 0, -1):
if l == len(self.nn_structure):
delta[l] = self.calculate_out_layer_delta(y[i, :], h[l], z[l])
avg_cost += np.linalg.norm((y[i,:]-h[l]))
else:
if l > 1:
delta[l] = self.calculate_hidden_delta(delta[l+1], self.W[l], z[l])
tri_W[l] += np.dot(delta[l+1][:,np.newaxis], np.transpose(h[l][:,np.newaxis]))
tri_b[l] += delta[l+1]
for l in range(len(self.nn_structure) - 1, 0, -1):
self.W[l] += -self.eta * (1.0/m * tri_W[l])
b[l] += -self.eta * (1.0/m * tri_b[l])
avg_cost = 1.0/m * avg_cost
avg_cost_func.append(avg_cost)
cnt += 1
return self.W, self.b
def predict(self, X):
m = X.shape[0]
y = np.zeros(X.shape)
for i in range(m):
h, z = self.feed_forward(X[i, :])
y[i] = h[len(self.nn_structure)]
print(y)
return y
# In[17]:
w1 = np.array([[.15, .2], [.25, .3]])
w2 = np.array([[.4, .45], [.5, .55]])
b1 = np.array([.35, .35])
b2 = np.array([.6, .6])
W = {1:w1, 2:w2}
b = {1:b1, 2:b2}
X = np.array([[.05, .1]])
y = np.array([[.01, .99]])
# In[18]:
nn_structure = [2,2,2]
model = ANN(nn_structure, .5)
model.set_weights(W,b)
y_pred = model.predict(X)
model.fit(X, y)
y_pred2 = model.predict(X)
|
[
"jarangol@eafit.edu.co"
] |
jarangol@eafit.edu.co
|
801500a3039b52130e88ad991a245baa74a5c88f
|
f78daddf7d83d1f6b9f13ac973cdb33d40717839
|
/examples/petrol/conanfile.py
|
a09ae6e5cb24d5171ebd32986be6ddfa9eac35de
|
[
"Apache-2.0"
] |
permissive
|
xtuml/masl
|
fe43c7131d0aea919f24136aaf1686162629f70f
|
80d07b41573bdad9c191e2d7d6a18ee59c3cf5c7
|
refs/heads/master
| 2023-08-31T07:12:58.662239
| 2023-08-22T18:28:17
| 2023-08-22T18:31:28
| 71,495,752
| 6
| 13
| null | 2023-09-12T14:39:41
| 2016-10-20T19:14:06
|
Java
|
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# ----------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Classification: UK OFFICIAL
# ----------------------------------------------------------------------------
import conan
class ConanFile(conan.ConanFile):
name = "masl_examples_petrol"
version = "0.1"
user = 'xtuml'
channel = 'stable'
python_requires = 'masl_conan/[>=0.1]@xtuml/stable'
python_requires_extend = 'masl_conan.MaslConanHelper'
|
[
"levi@roxsoftware.com"
] |
levi@roxsoftware.com
|
fcfe732e166431bbb85b5f073a245161e166f9a1
|
8c24d0ac619a2e83e574bb0bf9172b21bc6a06a6
|
/Dialog/WaferInkDialog.py
|
70428730352cd67d0e29894783d338ce9db72402
|
[] |
no_license
|
qiujian3328103/pySortMap
|
7142e02158a7fbb080c7132b59511511423af5bf
|
c013ba6bea2e16c286401261a3c15622871a0aee
|
refs/heads/master
| 2023-09-04T10:56:12.908632
| 2023-08-21T05:34:08
| 2023-08-21T05:34:08
| 363,678,584
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,783
|
py
|
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QColor, QPixmap, QPalette
from PyQt5.QtWidgets import QDialog, QApplication, QColorDialog
from UI import Ui_Ink_Dialog
class ShowInkDialog(QDialog, Ui_Ink_Dialog):
ink_signal = pyqtSignal(dict)
close_ink_dialog_signal = pyqtSignal(bool)
def __init__(self):
super(QDialog, self).__init__()
Ui_Ink_Dialog.__init__(self)
self.setupUi(self)
# inital color for polygon tool
self.toolButton_pick_poly_edge_color.setText("")
self.toolButton_pick_poly_edge_color.setStyleSheet("background-color: {}".format(QColor('blue').name()))
self.toolButton_pick_poly_point_color.setText("")
self.toolButton_pick_poly_point_color.setStyleSheet("background-color: {}".format(QColor('green').name()))
self.toolButton_pick_poly_click_color.setText("")
self.toolButton_pick_poly_click_color.setStyleSheet("background-color: {}".format(QColor('red').name()))
self.ink_selection = {
"auto_ink": False,
"click_die_ink": self.toolButton_click.isChecked(),
"drag_ink": self.toolButton_drag.isChecked(),
"polygon_ink": self.toolButton_polygon.isChecked(),
"update_polygon_ink": False,
"redraw_polygon": False,
"ink_shape": self.comboBox_ink_shape.currentText(),
"nearest_die_number": self.spinBox_ink_number.value(),
"window_close": False,
"polygon_props":{
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background)
}
}
self.toolButton_click.clicked.connect(self.emitClickInkSignal)
self.toolButton_drag.clicked.connect(self.emitDragInkSignal)
self.toolButton_polygon.clicked.connect(self.emitPolygonInkSignal)
self.toolButton_reset.clicked.connect(self.emitClickInkSignal)
self.toolButton_auto_ink.clicked.connect(self.emitClickInkSignal)
self.toolButton_pick_poly_edge_color.clicked.connect(lambda : self.clickButtonPickColor(self.toolButton_pick_poly_edge_color))
self.toolButton_pick_poly_point_color.clicked.connect(lambda : self.clickButtonPickColor(self.toolButton_pick_poly_point_color))
self.toolButton_pick_poly_click_color.clicked.connect(lambda: self.pickDoubleClickPolygonColor(self.toolButton_pick_poly_click_color))
self.spinBox_polygon_num_points.valueChanged.connect(self.emitReDrawPolygonSignal)
self.spinBox_polygon_radius.valueChanged.connect(self.emitReDrawPolygonSignal)
self.spinBox_point_size.valueChanged.connect(self.emitReDrawPolygonSignal)
self.spinBox_polygon_width_size.valueChanged.connect(self.emitPolygonLineWidthChangeSignal)
self.spinBox_ink_number.valueChanged.connect(self.changeInkNearestDieNumber)
self.comboBox_ink_shape.currentTextChanged.connect(self.changeInkDieShape)
def emitClickInkSignal(self):
"""
change the tool states on toolButton
:return:
"""
# click and drag mode can only either one be use
# print(self.toolButton_click.isChecked())
#
if self.toolButton_click.isChecked():
self.toolButton_drag.setChecked(False)
self.toolButton_polygon.setChecked(False)
self.ink_selection.update({
"click_die_ink": self.toolButton_click.isChecked(),
"drag_ink": self.toolButton_drag.isChecked(),
"polygon_ink": self.toolButton_polygon.isChecked(),
"update_polygon_ink": False,
"redraw_polygon": False,
})
self.ink_signal.emit(self.ink_selection)
def emitDragInkSignal(self):
"""
change th tool sates to drag mode
:return:
"""
if self.toolButton_drag.isChecked():
self.toolButton_click.setChecked(False)
self.toolButton_polygon.setChecked(False)
self.ink_selection.update({
"click_die_ink": self.toolButton_click.isChecked(),
"drag_ink": self.toolButton_drag.isChecked(),
"polygon_ink": self.toolButton_polygon.isChecked(),
"update_polygon_ink": False,
"redraw_polygon": False
})
self.ink_signal.emit(self.ink_selection)
def emitPolygonInkSignal(self):
"""
emit the PolygonInk
:return:
"""
if self.toolButton_polygon.isChecked():
self.toolButton_click.setChecked(True)
self.toolButton_drag.setChecked(False)
self.ink_selection.update({
"polygon_ink": self.toolButton_polygon.isChecked(),
"click_die_ink": self.toolButton_click.isChecked(),
"update_polygon_ink": False,
"redraw_polygon": False,
"polygon_props": {
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background),
}
})
self.ink_signal.emit(self.ink_selection)
def clickButtonPickColor(self, Button):
"""
click the pick color button
:return:
"""
color = QColorDialog.getColor()
if color.isValid():
self.die_edge_color = color
Button.setStyleSheet("background-color: {}".format(color.name()))
self.ink_selection.update({
"update_polygon_ink": True,
"redraw_polygon": False,
"polygon_props": {
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background),
}
})
self.ink_signal.emit(self.ink_selection)
def emitPolygonLineWidthChangeSignal(self):
"""
emit signal when the value changed
:return:
"""
self.ink_selection.update({
"update_polygon_ink": True,
"redraw_polygon": False,
"polygon_props": {
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background),
}
})
self.ink_signal.emit(self.ink_selection)
def emitReDrawPolygonSignal(self):
"""
todo: find better way to change in GripItem instead of current method redo the plot for polygon
:return:
"""
self.ink_selection.update({
"update_polygon_ink": False,
"redraw_polygon": True,
"polygon_props": {
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background),
}
})
self.ink_signal.emit(self.ink_selection)
def pickDoubleClickPolygonColor(self, Button):
color = QColorDialog.getColor()
if color.isValid():
self.die_edge_color = color
Button.setStyleSheet("background-color: {}".format(color.name()))
self.ink_selection.update({
"update_polygon_ink": False,
"redraw_polygon": True,
"polygon_props": {
"polygon_color": self.toolButton_pick_poly_edge_color.palette().color(QPalette.Background),
"polygon_radius": self.spinBox_polygon_radius.value(),
"polygon_width": self.spinBox_polygon_width_size.value(),
"grip_number": self.spinBox_polygon_num_points.value(),
"grip_ellipse_size": self.spinBox_point_size.value(),
"grip_ellipse_color": self.toolButton_pick_poly_point_color.palette().color(QPalette.Background),
"polygon_hover_color": self.toolButton_pick_poly_click_color.palette().color(QPalette.Background),
}
})
self.ink_signal.emit(self.ink_selection)
def changeInkDieShape(self):
"""
current four modes, for ink die shape
around
cross
conner
all
:return:
"""
try:
self.comboBox_ink_shape.currentText()
pixmap = QPixmap()
pixmap.load(":/mainwindowIcon/icons8_automatic.ico")
pixmap = pixmap.scaledToWidth(96)
self.label_type.setPixmap(pixmap)
except Exception as e:
print(e)
self.ink_selection.update({
"ink_shape": self.comboBox_ink_shape.currentText()
})
self.ink_signal.emit(self.ink_selection)
def changeInkNearestDieNumber(self):
"""
change inked dies numbers
:return:
"""
self.ink_selection.update({
"nearest_die_number":self.spinBox_ink_number.value()
})
self.ink_signal.emit(self.ink_selection)
# try:
# name = self.comboBox_inkoff_shape_type.currentText()
# pixmap = QPixmap()
# pixmap.load(":/newPrefix/{}.ico".format(name))
# pixmap = pixmap.scaledToWidth(96)
# self.label_inkoff_display.setPixmap(pixmap)
# self.statusBar().showMessage("Ink Search Algorithm Changed To {}".format(name))
# self.statusBar().setStyleSheet("QStatusBar{color:#008000;}")
# except Exception as e:
# print(e)
# self.statusBar().showMessage("Cannot find the shape icon!")
# self.statusBar().setStyleSheet("QStatusBar{color:#E74C3C;}")
#
# self.graphicsView.search_shape = self.comboBox_inkoff_shape_type.currentText()
# self.graphicsView.searchNearestN = int(self.comboBox_inkoff_number.currentText())
def closeEvent(self, event):
"""
close event signal emit
:param event:
:return:
"""
# close = QMessageBox()
# close.setText("?")
# close.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
# close = close.exec()
# self.close_ink.emit(True)
# self.ink_selection = {
# "window_close": True
# }
# self.ink_signal.emit(self.ink_selection)
# event.accept()
self.close_ink_dialog_signal.emit(True)
self.hide()
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
Dialog = ShowInkDialog()
Dialog.show()
sys.exit(app.exec_())
|
[
"Ethan.qiujian@gmail.com"
] |
Ethan.qiujian@gmail.com
|
9d84cac07d4408083b05d8d295ccfd35d8e5d30a
|
bec582ead54db4e903d792850c8cbc869879769b
|
/lib/TransParser.py
|
ed4f3ccd66edd236c87cbb3f9e2c26d3636bb691
|
[] |
no_license
|
MattDiesel/staticpress
|
1c787d02feca83267bf622b2573b68f7083bb366
|
3b2c53549f2653e22582b8771655589efebd4a98
|
refs/heads/master
| 2021-01-10T19:43:27.011386
| 2012-09-18T20:34:14
| 2012-09-18T20:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
from html.parser import HTMLParser
class TransParser(HTMLParser):
def __init__(self, strict = False, reps = None, outs = None, sc = True):
self.rep = reps
self.outStream = outs
self.stripComment = sc
self.rep.parser = self
HTMLParser.__init__(self, strict)
def feedf(self, file):
f = open(file, "rt", encoding="utf-8")
self.feed(f.read(None))
f.close()
def attrs(self, attr):
if (len(attr) > 0):
return ' ' + ' '.join(['{}="{}"'.format(a, v) for (a, v) in attr.items()])
else:
return ''
def out(self, s):
print(s, sep='', end='', file=self.outStream)
def handle_startendtag(self, tag, attrs):
attrs = dict(attrs)
r = None
if (self.rep != None):
r = self.rep.transf(tag, attrs)
if ((r == None) and self.rep and (':' in tag)):
r = self.rep.get(tag, attrs)
if (r != None):
self.out(r)
else:
self.out('<{}{} />'.format(tag, self.attrs(attrs)))
def handle_pi(self, proc):
exec(proc.strip('?').strip())
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
r = None
if (self.rep != None):
r = self.rep.transf(tag, attrs)
if ((r == None) and self.rep and (':' in tag)):
r = self.rep.get(tag, attrs)
if (r != None):
self.out(r)
else:
self.out("<{}{}>".format(tag, self.attrs(attrs)))
def handle_endtag(self, tag):
self.out("</{}>".format(tag))
def handle_data(self, data):
self.out(data)
def handle_decl(self, data):
self.out("<!{}>".format(data))
def handle_comment(self, data):
if (not self.stripComment):
self.out("<!-- {} //-->".format(data.strip('-!<>/ ')))
def handle_entityref(self, name):
self.out('&{};'.format(name))
def handle_charref(self, name):
self.out('&{};'.format(name))
|
[
"M@ttDiesel.co.uk"
] |
M@ttDiesel.co.uk
|
0bbd2de8dc824f52d6a0f42c25382c0215c06147
|
7813653f41a440f5666b5bd4f96e71b5c8856b41
|
/main.py
|
bc32db66dd05654158a252af0318e50eaea3f4eb
|
[] |
no_license
|
evidencebp/commit-classification
|
e404e1abff60fa648ed151092fa856094276c13b
|
d8cdf9908db494281b1c236a0549b5ad52b4b28e
|
refs/heads/master
| 2023-08-19T04:49:42.652647
| 2023-07-30T10:58:16
| 2023-07-30T10:58:16
| 253,520,268
| 14
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
"""
The repository is mainly stateless since this is a language library
commit_type_model is a python program that decompose enitities and generate a regular experssion.
One can use them in python program.
For use in BigQuery, the file also genertae function.
The current functions are in the bq_*.sql scripts.
If one moodifes the commit_type_model, recration and redployment of the scripts is needed.
The confusion matrix is a utility for performance evaluation.
For the evaluation of the given classifiers use linguistic_model_performance that will evalute them on the
labled samples in the data directory
"""
|
[
"idanamit@gmail.com"
] |
idanamit@gmail.com
|
0c0b4c57534821da8d86289ea7b2c0e905648ed2
|
e8a6154683f27dd5143a29bc9333b829e5e12b38
|
/CAP7/7-10.py
|
9727c9dcea1ffde03a921ddf3d4bc45232a24625
|
[] |
no_license
|
Rodrigodebarros17/Livropython
|
78512091ee9cedfb498e34dde8765fb4b2fee608
|
9a5b4bd8083d31c291ca1b53acbc7b3b07d53510
|
refs/heads/master
| 2021-05-19T10:28:25.741553
| 2020-10-02T16:13:45
| 2020-10-02T16:13:45
| 251,652,669
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
#Programa Jogo da Velha
matriz=[["-","-","-"],["-","-","-"],["-","-","-"]]
for e in matriz:
prin
while true:
print("Jogador 1, digite a coluna e a ")
|
[
"rodrigodebarros17@gmail.com"
] |
rodrigodebarros17@gmail.com
|
3a2fcef79c162226a9627026894a0b0845627c8a
|
6ec0ed313be8805eb50b7241dd508c27d3540f7e
|
/one_hot_y.py
|
e7acf05434fe8a7d8b2ef138cb9e22b184ef6109
|
[] |
no_license
|
LYHHYR/Text-Classification
|
d0d50a13ce4047176b2e7a677572207d6a6b7c32
|
ac531c98938bd33128193a4036bb4464752fbec6
|
refs/heads/master
| 2020-06-07T00:51:49.447613
| 2019-08-15T03:07:38
| 2019-08-15T03:07:38
| 192,887,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
import numpy as np
def one_hot(X, y):
"""
While y belongs to {1, 2, ..., n} (n>2), tranform y to one-hot encoded matrix
"""
n_samples, n_features = X.shape
y_classes = set(y)
n_classes = len(y_classes)
one_hot = np.zeros((n_samples, n_classes))
# y should be transfer into a series which begin with 0 in order to get the position information
y_position = y - np.min(y)
one_hot[np.arange(n_samples), y_position.T] = 1
# np.arange(n_samples): generate an array([0, 1, 2, 3,..., (n_sample - 1)])
return one_hot
######################################
# Testing functions below #
######################################
def test_one_hot():
np.random.seed(20190606)
X = np.random.rand(100, 10)
y = np.random.randint(1, 5, 100)
# the range of y is {1,2,3,4}
y_one_hot = one_hot(X, y)
print ("\n === TEST RESULT === ")
print("\n the result of one hot function is")
print (y, y_one_hot)
if __name__ == "__main__":
test_one_hot()
|
[
"noreply@github.com"
] |
LYHHYR.noreply@github.com
|
6796f3024c4ba730e5ec237035170e0320b726cc
|
133a1604e67507fdc5b3396303d84c98f1222eaf
|
/thanos/bin/pip
|
f95161d98d14c0efb3d9ac1654ffdda914cd589a
|
[] |
no_license
|
junniepat/Python-Car-Rental-
|
fb72e8aad97d8e5a8530d392d4b4ba6722844d78
|
0e197df7f10b1caf344df70b1361f88f3e865137
|
refs/heads/master
| 2020-12-05T03:15:58.457401
| 2020-01-05T23:38:49
| 2020-01-05T23:38:49
| 231,994,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
#!/Users/bobby/Desktop/Car_Rental_Project/CarRental/thanos/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"emmanuel.amadi@lloydant.com"
] |
emmanuel.amadi@lloydant.com
|
|
cb2df88f9154e5ab553526285f461976f28e27a3
|
50400174010e6f49fca3120b94223be32e5ca02e
|
/common/tests.py
|
0b76d3ffb01915965b92612f0e33cfcc67438484
|
[] |
no_license
|
whut2962575697/webs
|
7ed6dcb3c879ef57f08cee436be80f1b64e579b7
|
b463c1a0e937ace6ef99812117998e742b05bfe5
|
refs/heads/master
| 2020-04-04T13:49:43.615315
| 2018-11-15T12:25:26
| 2018-11-15T12:25:26
| 155,976,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,313
|
py
|
# -*- encoding:utf-8 -*-
import arcpy
arcpy.env.workspace = ""
polygons = arcpy.FromWKT("POLYGON((116.13982 29.88508,116.133298 29.865047,116.140065 29.853345,116.136074 29.841912,116.141487 29.833255,116.139141 29.827136,116.094168 29.804491,116.05429 29.765514,115.969441 29.730778,115.951961 29.7266,115.901597 29.733592,115.848585 29.751062,115.769124 29.800163,115.713168 29.842946,115.689668 29.85718,115.674007 29.857044,115.613694 29.842301,115.586868 29.840578,115.5252 29.843555,115.478962 29.856092,115.459597 29.869491,115.436349 29.905496,115.418339 29.915956,115.414394 29.928174,115.41487 29.950444,115.38586 29.975737,115.368123 30.006744,115.33548 30.036738,115.330484 30.045083,115.328576 30.078342,115.30855 30.117856,115.27418 30.161666,115.23118 30.208822,115.214144 30.218919,115.195419 30.222019,115.173966 30.221622,115.148019 30.211853,115.12642 30.215717,115.103114 30.227566,115.086667 30.242818,115.085262 30.267359,115.096426 30.298311,115.09247 30.330363,115.104584 30.354133,115.094606 30.369622,115.083706 30.363208,115.071347 30.385562,115.055222 30.399363,115.040581 30.405584,115.018059 30.411633,114.957343 30.410559,114.899734 30.41612,114.879463 30.426017,114.853994 30.448418,114.842412 30.468337,114.838641 30.484741,114.84026 30.520581,114.845446 30.546981,114.844419 30.566417,114.815363 30.595449,114.811554 30.606528,114.794198 30.619363,114.802522 30.625508,114.850438 30.631565,114.858127 30.637346,114.85142 30.643258,114.862842 30.639819,114.864011 30.642663,114.860203 30.649033,114.852272 30.647867,114.852101 30.653853,114.835283 30.675505,114.830507 30.676892,114.825561 30.669529,114.819804 30.666575,114.816514 30.668242,114.811099 30.680345,114.817529 30.692339,114.802808 30.705428,114.790826 30.727486,114.810885 30.729886,114.822966 30.736901,114.847936 30.744705,114.877126 30.76586,114.8828 30.776263,114.882513 30.798587,114.88531 30.802074,114.894284 30.802577,114.921991 30.790449,114.939121 30.802092,114.945921 30.792376,114.959368 30.782336,114.963342 30.786666,114.96319 30.795321,114.960114 30.799798,114.951639 30.802635,114.950814 30.808121,114.969669 30.813316,114.97633 30.818195,114.979415 30.813385,114.987561 30.821945,114.989159 30.814371,114.994905 30.815386,114.998204 30.830591,115.007347 30.836425,114.998341 30.851483,115.00346 30.862551,115.025286 30.869018,115.028789 30.862642,115.037593 30.861618,115.035839 30.854631,115.04188 30.850942,115.045785 30.851971,115.05348 30.862421,115.056052 30.858734,115.061764 30.862101,115.059112 30.869897,115.085775 30.888963,115.085336 30.895442,115.066106 30.900638,115.061758 30.910838,115.042152 30.915286,115.027965 30.931268,115.018792 30.92898,115.001472 30.943353,114.992939 30.961009,114.998714 30.973105,114.992991 30.974555,114.988736 30.980992,114.995281 30.990673,114.99451 31.003842,114.989741 31.003911,114.990181 30.998608,114.986967 30.998621,114.99071 31.02373,114.982844 31.033432,114.976799 31.027439,114.983139 31.012355,114.967394 30.992596,114.961789 30.993082,114.955782 31.00162,114.945048 31.003443,114.942708 31.008364,114.934727 31.001685,114.926412 31.020488,114.911053 31.011414,114.888873 31.012258,114.867955 30.998404,114.862745 30.991306,114.880109 30.979102,114.882972 30.971623,114.871909 30.963025,114.866862 30.950256,114.846928 30.956291,114.82872 30.951981,114.831069 30.957608,114.820999 30.962728,114.817705 30.973194,114.810046 30.97664,114.815296 30.982364,114.811771 30.998215,114.809814 31.000226,114.802814 30.99584,114.802259 31.000993,114.791246 31.001181,114.788892 30.991263,114.781402 30.985945,114.778763 30.971567,114.775575 30.969779,114.768857 30.969822,114.771886 30.975968,114.76959 30.986108,114.775284 30.989111,114.767173 30.990936,114.767066 30.995398,114.745638 30.992904,114.743519 30.988484,114.726931 30.983487,114.724253 30.987747,114.721543 30.986954,114.722417 30.982951,114.715469 30.984287,114.7085 30.976746,114.704021 30.964551,114.704513 30.955779,114.713819 30.948315,114.710907 30.9441,114.689074 30.95457,114.672481 30.94021,114.664655 30.938172,114.653631 30.942485,114.647617 30.933368,114.641731 30.931858,114.639359 30.934332,114.630028 30.929696,114.618815 30.932664,114.618511 30.944029,114.614296 30.944745,114.608447 30.964198,114.582758 30.975708,114.577497 30.984266,114.567496 30.985505,114.559948 30.998378,114.549434 30.997934,114.534128 31.017041,114.515958 31.020291,114.505379 31.042626,114.510063 31.051115,114.520831 31.056739,114.520952 31.069604,114.539429 31.100064,114.533888 31.113969,114.539931 31.122675,114.532992 31.128099,114.531053 31.14123,114.524885 31.149985,114.498782 31.15543,114.494142 31.163872,114.476393 31.176468,114.466775 31.174727,114.464416 31.189197,114.47743 31.190161,114.478975 31.195288,114.455993 31.202376,114.458803 31.209721,114.448437 31.213001,114.44016 31.211763,114.42829 31.218499,114.411223 31.235328,114.407847 31.255088,114.419227 31.267522,114.445372 31.313759,114.446905 31.330576,114.442649 31.344421,114.448519 31.348893,114.442387 31.353385,114.432527 31.354634,114.427524 31.362635,114.434872 31.371223,114.434845 31.384979,114.443762 31.389819,114.454063 31.389234,114.458361 31.403237,114.454526 31.414458,114.456414 31.420686,114.466922 31.416097,114.465453 31.413397,114.469619 31.410747,114.484346 31.411855,114.4868 31.415353,114.487982 31.425088,114.481745 31.427521,114.480298 31.433099,114.484099 31.45201,114.493771 31.464832,114.491864 31.472315,114.500945 31.473035,114.506337 31.464248,114.518121 31.468019,114.511651 31.49118,114.504449 31.503365,114.510851 31.52703,114.516487 31.530359,114.519307 31.542013,114.526502 31.546997,114.526771 31.564185,114.53279 31.555897,114.549104 31.554274,114.55665 31.558135,114.564285 31.574856,114.570682 31.56126,114.579807 31.55948,114.582312 31.565415,114.600178 31.566877,114.603141 31.579745,114.617239 31.58251,114.62747 31.590941,114.651282 31.585372,114.651766 31.5791,114.661541 31.575266,114.680052 31.548028,114.706204 31.549034,114.702325 31.529738,114.723111 31.531185,114.727399 31.522276,114.746222 31.531099,114.75576 31.52621,114.761336 31.532734,114.777091 31.524576,114.790754 31.530455,114.791239 31.523687,114.779621 31.517887,114.78021 31.508141,114.789299 31.502798,114.788838 31.490142,114.797717 31.484076,114.819982 31.484091,114.82887 31.477643,114.838684 31.463435,114.844162 31.464421,114.851908 31.471893,114.856639 31.482785,114.863192 31.486108,114.879829 31.484074,114.882654 31.476262,114.891734 31.47434,114.899298 31.479691,114.914412 31.481529,114.929275 31.488296,114.931868 31.48322,114.945258 31.475494,114.952793 31.489237,114.967484 31.501594,114.978556 31.506294,114.984929 31.502176,114.988015 31.485051,114.997985 31.485086,114.999473 31.477295,115.00308 31.477081,115.007129 31.49153,115.016765 31.494657,115.010039 31.502629,115.00923 31.509013,115.016027 31.5132,115.029032 31.536241,115.03494 31.534701,115.034802 31.529257,115.043322 31.519877,115.055471 31.525102,115.068594 31.518233,115.101459 31.5178,115.101209 31.527958,115.115479 31.533358,115.119238 31.540576,115.109011 31.548142,115.115812 31.559081,115.10863 31.564921,115.107938 31.573351,115.134895 31.591423,115.127197 31.604829,115.149195 31.612496,115.175791 31.611138,115.182675 31.60627,115.185719 31.587897,115.199406 31.578925,115.199109 31.57197,115.214844 31.572164,115.219599 31.561897,115.242319 31.565029,115.236272 31.542118,115.224075 31.522845,115.22649 31.510471,115.219119 31.49993,115.224538 31.476021,115.214123 31.453465,115.226173 31.43265,115.23831 31.433767,115.258035 31.42754,115.264992 31.412741,115.256552 31.400027,115.266247 31.395099,115.286959 31.405837,115.30062 31.402019,115.306333 31.390535,115.315443 31.389411,115.321471 31.39062,115.342141 31.40745,115.359597 31.404823,115.37775 31.409837,115.397032 31.398586,115.396406 31.390501,115.388797 31.381075,115.380874 31.377444,115.386836 31.36816,115.386777 31.362071,115.377385 31.355558,115.405442 31.348323,115.412118 31.343414,115.413555 31.348049,115.42653 31.356402,115.429582 31.352893,115.438153 31.355854,115.447926 31.352476,115.454686 31.322173,115.461217 31.319593,115.466916 31.326425,115.468671 31.323056,115.462004 31.288572,115.472822 31.282094,115.475243 31.27438,115.480474 31.270321,115.496113 31.272547,115.507352 31.267582,115.515038 31.27479,115.525659 31.269228,115.546605 31.235028,115.542372 31.202524,115.550979 31.19715,115.553844 31.188476,115.562147 31.179989,115.562634 31.169448,115.579972 31.160191,115.590623 31.147888,115.607905 31.1626,115.610421 31.173625,115.622233 31.181662,115.623023 31.186983,115.641341 31.189969,115.650631 31.213164,115.668037 31.217624,115.696735 31.208499,115.704971 31.209038,115.717648 31.20368,115.718586 31.188251,115.737536 31.181124,115.741155 31.160704,115.752689 31.157327,115.76592 31.141429,115.777535 31.112688,115.780448 31.111422,115.788921 31.116356,115.794354 31.12673,115.801647 31.131465,115.826943 31.129911,115.843701 31.132265,115.858299 31.143827,115.860093 31.149592,115.886448 31.152017,115.892995 31.139171,115.902002 31.134273,115.895242 31.123793,115.895955 31.111487,115.900378 31.103587,115.908972 31.097068,115.915874 31.09731,115.928161 31.091494,115.929075 31.083006,115.951301 31.070989,115.951618 31.062845,115.945969 31.051767,115.94851 31.046625,115.95869 31.048855,115.972275 31.043132,115.988653 31.046029,116.003491 31.038258,116.009739 31.039272,116.017746 31.02034,116.027375 31.019824,116.035744 31.014776,116.04488 31.021074,116.063983 31.018804,116.065558 31.000886,116.077162 30.962304,116.047004 30.964629,116.039827 30.957333,116.029978 30.959531,116.015379 30.955626,115.995908 30.940526,115.981332 30.936904,115.975923 30.925561,115.959439 30.918586,115.954485 30.909704,115.941312 30.909209,115.934947 30.893816,115.92591 30.891918,115.91952 30.894103,115.894965 30.885388,115.881032 30.873533,115.871126 30.869971,115.869591 30.861065,115.857126 30.849658,115.852721 30.840682,115.854492 30.833435,115.869031 30.821363,115.868285 30.81373,115.872309 30.806709,115.863476 30.792441,115.865825 30.788719,115.877422 30.786234,115.875672 30.781676,115.857662 30.771593,115.857064 30.762417,115.85402 30.761352,115.840658 30.762252,115.82886 30.756294,115.816104 30.765256,115.792347 30.763,115.771688 30.715526,115.772657 30.700282,115.767745 30.694612,115.768171 30.690951,115.777794 30.683605,115.780638 30.674777,115.801175 30.665835,115.804294 30.656048,115.817896 30.64497,115.820385 30.630732,115.813087 30.621712,115.824007 30.604475,115.829461 30.602055,115.833386 30.611926,115.854061 30.607463,115.862673 30.595031,115.880346 30.588913,115.884593 30.583824,115.880954 30.574867,115.888666 30.564918,115.89181 30.549389,115.904693 30.543706,115.916036 30.525154,115.926697 30.523412,115.915055 30.512749,115.903385 30.488223,115.906101 30.472615,115.899297 30.465305,115.900062 30.458558,115.922408 30.448996,115.927994 30.43481,115.951524 30.431067,115.94572 30.424197,115.93647 30.426842,115.929185 30.424607,115.917671 30.407157,115.906027 30.403756,115.890743 30.387713,115.90073 30.363726,115.9078 30.357794,115.912463 30.347674,115.91972 30.346279,115.924055 30.339604,115.92052 30.323124,115.912347 30.314241,115.935972 30.307002,115.945403 30.316165,115.948881 30.312347,115.958854 30.312169,115.965677 30.30641,115.979242 30.304494,115.994949 30.292735,115.990897 30.283856,115.997086 30.277172,115.992685 30.27178,115.994357 30.267901,116.00258 30.261562,116.016766 30.259885,116.021479 30.251972,116.019792 30.24688,116.0345 30.24611,116.035058 30.239861,116.049434 30.232436,116.054099 30.225438,116.063322 30.225327,116.071754 30.211239,116.067526 30.207052,116.072542 30.200057,116.062027 30.192428,116.061776 30.188938,116.067774 30.159359,116.076815 30.138369,116.094534 30.131912,116.090472 30.120182,116.095002 30.110338,116.087375 30.096647,116.09333 30.083696,116.08326 30.071534,116.085095 30.061653,116.098449 30.037232,116.08855 30.031404,116.090631 30.024616,116.084122 30.000224,116.086296 29.986582,116.07991 29.971681,116.132426 29.90812,116.133564 29.890932,116.13982 29.88508),(115.136881 30.223666,115.156066 30.243421,115.152526 30.248852,115.141529 30.253336,115.136949 30.249692,115.121486 30.263801,115.118886 30.251386,115.100492 30.240335,115.124195 30.223945,115.136881 30.223666))")
arcpy.CopyFeatures_management(polygons, 'wuhan.shp')
|
[
"2962575697@whut.edu.cn"
] |
2962575697@whut.edu.cn
|
7f9523279e54933a282f53790b3106e4f5825bdf
|
9fea90e6756eeb1f67c71fbb9b893fbf8e3821b1
|
/catalogue/settings.py
|
a6bfb1f6963d57c8a0556f7431fd95d896649a73
|
[] |
no_license
|
a-romald/scrapy-mysqlclient
|
169c88fa6993edb3d7070e79636b0f5d90ead467
|
86c1773e4e6c6036fb604e86cbd165ec0eea348f
|
refs/heads/master
| 2021-04-28T19:36:31.185284
| 2018-02-17T23:52:09
| 2018-02-17T23:52:09
| 121,901,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for catalogue project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'catalogue'
SPIDER_MODULES = ['catalogue.spiders']
NEWSPIDER_MODULE = 'catalogue.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'catalogue (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'catalogue.middlewares.CatalogueSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'catalogue.middlewares.CatalogueDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORT_ENCODING = 'utf-8'
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1,
'catalogue.pipelines.MySQLStorePipeline': 300,
}
IMAGES_STORE = 'images'
|
[
"a_rhoma@mail.ru"
] |
a_rhoma@mail.ru
|
537ad2da2c4e4aa5f8bbe056a3e25e3ef68ca0a9
|
23b9611fa4a04df65323f7aadb76c91a9423b2e0
|
/smoketest/systemconfiguration/sysAbout.py
|
c9ac5752c93a854c6742c9c0ec5d417084634acf
|
[] |
no_license
|
da3m0n/ctrguiautotest
|
b22b8a7cc64a4178534b85c7ea364c4fe994e910
|
ed5a55baa4d31ee8f26c616c1ba1a132b7b715f5
|
refs/heads/master
| 2020-05-17T15:42:41.367186
| 2018-11-14T22:03:56
| 2018-11-14T22:03:56
| 21,019,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
import sys, os
import time
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from smoketest.TestHelper import TestHelper
from smoketest.TestLog import TestLog
from smoketest.mylib.IsolatedLoginHandler import IsolatedLoginHandler
from smoketest.mylib.utils import Utils
def main():
log_dir = Utils.log_dir()
about = SystemAbout(IsolatedLoginHandler())
test_log = TestLog('System About', log_dir)
about.run_system_about(Utils.create_driver(sys.argv[2]), test_log)
test_log.close()
class SystemAbout(object):
def __init__(self, login_manager):
self.login_manager = login_manager
def run_system_about(self, driver, test_log):
gui_lib = Utils(driver)
self.login_manager.login()
test_log.start('System About')
test_helper = TestHelper(test_log, driver)
driver.switch_to_default_content()
# WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "menu_node_7_tree")))
# driver.find_element_by_id("menu_node_7_tree").click()
# time.sleep(2)
# WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "menu_node_10")))
gui_lib.click_element("top_menu_help")
gui_lib.click_element("help_about")
original_window_handle = driver.current_window_handle
driver.switch_to_window(driver.window_handles[1])
# about = driver.find_element(By.XPATH, "//body/fieldset/legend").text
# assert about == "About", ("Expected About but got ", about)
title = 'Aviat Networks Converged Transport Router'
# webTitle = driver.find_element_by_xpath('//body/div/div/h3').text
# assert title == webTitle, ('Expected ', title, ' but got ', webTitle)
#
# # WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'licenses')))
# time.sleep(2)
licenses = gui_lib.find_element('licenses')
# driver.execute_script("document.getElementById('licenses').innerHTML=\"\";")
test_helper.assert_true(len(licenses.text) == 0, 'Expected SW Version to be > 0',
'Checking Licenses text not empty')
driver.close()
driver.switch_to_window(original_window_handle)
time.sleep(2)
self.login_manager.logout()
if __name__ == "__main__":
main()
|
[
"rowan.naude@aviatnet.com"
] |
rowan.naude@aviatnet.com
|
fad7775f9731907181f1e05c218b3bd67efc38a3
|
a68d32aeae18bbedf81dc759fbf1eb8d24e0b70b
|
/happiness_project/settings.py
|
286e6b05c417f15108ffab6fe1134e1e7d8881a3
|
[] |
no_license
|
fernandocmelo/django-test
|
94679580b9184a5199c31777707f22c553903432
|
60826ffd016eb7601074638415f89f937616cae5
|
refs/heads/master
| 2020-03-29T03:59:16.761764
| 2018-09-26T01:23:23
| 2018-09-26T01:23:23
| 149,510,092
| 0
| 0
| null | 2018-09-19T20:44:26
| 2018-09-19T20:44:26
| null |
UTF-8
|
Python
| false
| false
| 3,686
|
py
|
"""
Django settings for happiness_project project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'goqsn30ib2w9$3-@#g7t4iost8cnrlnvsv!#(8%t=@x3v1dig$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'happiness_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'happiness_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'happiness_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Toronto'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Login and Logout URLs
LOGIN_REDIRECT_URL = "index"
LOGOUT_REDIRECT_URL = "index"
AUTH_PROFILE_MODULE = 'happiness_app.UserProfile'
|
[
"grandifalcao@gmail.com"
] |
grandifalcao@gmail.com
|
8092ddd1f080a38810e69f276d3975ec42790fd9
|
e91fd1071b91bfae76d293c8b9b85d4afa1286f7
|
/utilidades/__init__.py
|
e49bfceab9edfd4a8fb393638c594497d22fdbe8
|
[] |
no_license
|
MiguelRubioS2P/CodeBreaker
|
2d540981d18ce9b5f0aefb859318f7f8db07fed6
|
5f6ebd73b8e1876e700c6b938fa64e061b80a298
|
refs/heads/master
| 2020-12-15T00:00:52.124787
| 2020-01-20T19:03:17
| 2020-01-20T19:03:17
| 234,919,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
import utilidades
import JuegoPalabras
|
[
"miguelrubio@paucasesnovescifp.cat"
] |
miguelrubio@paucasesnovescifp.cat
|
a904290ec8ed97238dff5bff3c599df824611c11
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/ejecutable_20200216215432.py
|
dfa14620e68d6ef4a0639d6914525ed6612644cf
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,272
|
py
|
import pylab as pl
import numpy as np
# Crear una figura de 8x6 puntos de tamaño, 80 puntos por pulgada (Se modifica a 16x8)
pl.figure(figsize=(16, 8), dpi=100)
# Crear una nueva subgráfica en una rejilla de 1x1 (se podrian crean una de dos graficas en una reijlla)
pl.subplot(1, 1, 1)
# Obtencion de datos para seno y coseno (Desde -2pi hasta 2pi)
X = np.linspace(-2.1*np.pi, 2.1*np.pi, 256, endpoint=True) #el numero 256 es la cantidad de datos en ese intervalo
C, S = np.cos(X), np.sin(X)
# Graficar la función coseno con una línea continua azul de 1 pixel de grosor
pl.plot(X, C, color="blue", linewidth=1.0, linestyle="-")
# Graficar la función seno con una línea continua verde de 1 pixel de grosor
pl.plot(X, S, color="green", linewidth=1.0, linestyle="-")
# Establecer límites del eje x (Divisiones en X)
pl.xlim(-8.0, 8.0)
# Ticks en x(Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.xticks(np.linspace(-8, 8, 17, endpoint=True))
# Establecer límites del eje y (Divisiones en Y)
pl.ylim(-1.0, 1.0)
# Ticks en y (Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.yticks(np.linspace(-1, 1, 5, endpoint=True))
'''Otra opcion de determinar los limites a imprimir
pl.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
pl.yticks([-1, 0, +1]) '''
#AGREGAR LINEAS DE EJES Y QUITAR EL RECUADRO:
'''
ax = pl.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))'''
# Guardar la figura usando 72 puntos por pulgada
# savefig("exercice_2.png", dpi=72)
#Indicamos los espacios entre los bordes de grafica y las graficas
#pl.xlim(X.min() * 1.1, X.max() * 1.1)
pl.ylim(C.min() * 1.1, C.max() * 1.1)
#AGREGRA UNA LEYENDA
pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="Coseno")
pl.plot(X, S, color="red", linewidth=2.5, linestyle="-", label="Seno")
pl.legend(loc='upper left')
#AGREGRA UNA ANOTACION EN UN PUNTO CONOCIDO
t = 2 * np.pi / 3
#Para coseno------------------------------------------------------------------------------------
pl.plot([t, t], [0, np.cos(t)], color='blue', linewidth=2.5, linestyle="--")
pl.scatter([t, ], [np.cos(t), ], 350, color='blue')
pl.annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$',#DATOS A IMPRIMIR DEL TEXTO
xy=(t, np.sin(t)), xycoords='data', #COORDENADAS DE REFERENCIA PARA LA FLECHA Y EL TEXTO
xytext=(+10, +30), textcoords='offset points', fontsize=16,#INDICAN POSICION DEL TEXTO
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) #DIRECCION DE LA FLECHA
#Para seno--------------------------------------------------------------------------------------
pl.plot([t, t],[0, np.sin(t)], color='red', linewidth=2.5, linestyle="--")
pl.scatter([t, ],[np.sin(t), ], 50, color='red')
pl.annotate(r'$cos(\frac{2\pi}{3})=-\frac{1}{2}$',
xy=(t, np.cos(t)), xycoords='data',
xytext=(-90, -50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))#DIRECCION DE LA FLECHA
# Mostrar resultado en pantalla (Con 2 segundos de muestreo)
pl.pause(10)
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
cdec8f2f5cee0435ef81c841599dda2f2479e791
|
18db4071110091026eece33676a87cdc8da53a3a
|
/func/constants.py
|
7881b87d7f18f4cf8774b942171ea7b32de4d72f
|
[] |
no_license
|
gengd/my_shell
|
57e859116db65c6ba001eb5e08658bab28aa77ac
|
7d84166539625c547febf1b22326a2df73afcab2
|
refs/heads/master
| 2021-07-04T09:15:19.868551
| 2017-09-26T08:57:17
| 2017-09-26T08:57:17
| 104,856,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
import os
SHELL_STATUS_STOP = 0
SHELL_STATUS_RUN = 1
HISTORY_PATH = os.path.expanduser('~') + os.sep + '.shiyanlou_shell_history'
|
[
"Daniel.Geng@emc.com"
] |
Daniel.Geng@emc.com
|
1b0db2cf114ec7808c322fa1e8df8a933b35ea6f
|
7ebb62d990d530e5a97cea3bf7891edda5881b00
|
/youtube-dl-master/youtube_dl/jsinterp.py
|
ae5bca2e643f1ec4d719e8e895b0a655a98a151a
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
dj168/Hackathon1
|
fb3f537320189844f9c6514c0b70a5d432426dd1
|
36339458edea2a207e2c7b67ad4cfd1acc3ba569
|
refs/heads/master
| 2021-01-24T11:00:43.937962
| 2014-08-16T20:05:19
| 2014-08-16T20:05:19
| 21,998,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,487
|
py
|
from __future__ import unicode_literals
import re
from .utils import (
ExtractorError,
)
class JSInterpreter(object):
def __init__(self, code):
self.code = code
self._functions = {}
self._objects = {}
def interpret_statement(self, stmt, local_vars, allow_recursion=20):
if allow_recursion < 0:
raise ExtractorError('Recursion limit reached')
if stmt.startswith('var '):
stmt = stmt[len('var '):]
ass_m = re.match(r'^(?P<out>[a-z]+)(?:\[(?P<index>[^\]]+)\])?' +
r'=(?P<expr>.*)$', stmt)
if ass_m:
if ass_m.groupdict().get('index'):
def assign(val):
lvar = local_vars[ass_m.group('out')]
idx = self.interpret_expression(
ass_m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
lvar[idx] = val
return val
expr = ass_m.group('expr')
else:
def assign(val):
local_vars[ass_m.group('out')] = val
return val
expr = ass_m.group('expr')
elif stmt.startswith('return '):
assign = lambda v: v
expr = stmt[len('return '):]
else:
raise ExtractorError(
'Cannot determine left side of statement in %r' % stmt)
v = self.interpret_expression(expr, local_vars, allow_recursion)
return assign(v)
def interpret_expression(self, expr, local_vars, allow_recursion):
if expr.isdigit():
return int(expr)
if expr.isalpha():
return local_vars[expr]
m = re.match(r'^(?P<in>[a-z]+)\.(?P<member>.*)$', expr)
if m:
member = m.group('member')
variable = m.group('in')
if variable not in local_vars:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
key, args = member.split('(', 1)
args = args.strip(')')
argvals = [int(v) if v.isdigit() else local_vars[v]
for v in args.split(',')]
return obj[key](argvals)
val = local_vars[variable]
if member == 'split("")':
return list(val)
if member == 'join("")':
return ''.join(val)
if member == 'length':
return len(val)
if member == 'reverse()':
return val[::-1]
slice_m = re.match(r'slice\((?P<idx>.*)\)', member)
if slice_m:
idx = self.interpret_expression(
slice_m.group('idx'), local_vars, allow_recursion - 1)
return val[idx:]
m = re.match(
r'^(?P<in>[a-z]+)\[(?P<idx>.+)\]$', expr)
if m:
val = local_vars[m.group('in')]
idx = self.interpret_expression(
m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
m = re.match(r'^(?P<a>.+?)(?P<op>[%])(?P<b>.+?)$', expr)
if m:
a = self.interpret_expression(
m.group('a'), local_vars, allow_recursion)
b = self.interpret_expression(
m.group('b'), local_vars, allow_recursion)
return a % b
m = re.match(
r'^(?P<func>[a-zA-Z$]+)\((?P<args>[a-z0-9,]+)\)$', expr)
if m:
fname = m.group('func')
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
argvals = [int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(',')]
return self._functions[fname](argvals)
raise ExtractorError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(
(r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
r'\s*(?P<fields>([a-zA-Z$]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' +
r'\}\s*;',
self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(
r'(?P<key>[a-zA-Z$]+)\s*:\s*function'
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, funcname):
func_m = re.search(
(r'(?:function %s|[{;]%s\s*=\s*function)' % (
re.escape(funcname), re.escape(funcname))) +
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
self.code)
if func_m is None:
raise ExtractorError('Could not find JS function %r' % funcname)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def build_function(self, argnames, code):
def resf(args):
local_vars = dict(zip(argnames, args))
for stmt in code.split(';'):
res = self.interpret_statement(stmt, local_vars)
return res
return resf
|
[
"huderrick@berkeley.edu"
] |
huderrick@berkeley.edu
|
08cb23a06a7856db4ecb22d88ec90a611deba95b
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/man/case/life/big_government.py
|
7fba7f61642853f57bfca0dad6bb4279f36648e4
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#! /usr/bin/env python
def fact_or_eye(str_arg):
ask_new_woman(str_arg)
print('world_or_last_life')
def ask_new_woman(str_arg):
print(str_arg)
if __name__ == '__main__':
fact_or_eye('long_child_or_few_place')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
0a1d557a8ad4bb66d5e06b1c0dbaa7b77111316c
|
2e2f334d5ad1e37294e691e71c2cf0513a533e19
|
/loop4.py
|
b57cfb5fcdcf3bdb1d3fdf8fcebd644961c67765
|
[] |
no_license
|
vishnubalaji1998/guvi2
|
142afccbdb35037ae465ccad73527228d6c9229c
|
496cfa35f126020c9e3a71c994d70d0803f5affd
|
refs/heads/master
| 2020-11-29T22:15:52.385856
| 2019-12-26T08:45:09
| 2019-12-26T08:45:09
| 230,228,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
i=0
while i<101:
print(i)
i+=2
|
[
"noreply@github.com"
] |
vishnubalaji1998.noreply@github.com
|
04cead922b1938277bca7947212824fa8027daed
|
f61da218587b48026fb91cb6bcf6c06ece9c8f2c
|
/Matrix - Nine Submatrices.py
|
7054cb0faefdc5fb68b7128143c7a91b551f1d36
|
[] |
no_license
|
DANUSHRAJ/Skillrack-Daily-Challenge-And-Daily-Test
|
ec1b7dafab8be603f3eaa193316c895bb7dbc997
|
f32da952e0d01a015cc9f9d01a67c5c7de0b20fa
|
refs/heads/main
| 2023-05-21T00:33:36.896465
| 2021-06-04T03:58:04
| 2021-06-04T03:58:04
| 317,618,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
Matrix - Nine Submatrices
The program must accept an integer matrix of size N*N as the input.
The program must divide the given matrix into 9 submatrices based
on the following conditions.
- The program must divide t he given matrix into four submatrices of
equal size.
- Then the program must divide the top-right submatrix horizontally
into two submatrices of equal size.
- Then the program must divide th e bottom-left submatrix vertically
into two submatrices of equal size.
- Then the program must divide th e bottom-right submatrix into four
submatrices of equal size.
Finally, the program must print the sum of integers in each submatrix
as the output.
Note: The valu e of N is always a multiple of 4.
Boundary Condition(s):
4 <= N <= 100
0 <= Matrix ele ment value <= 1000
Input Format:
The first line co ntains N.
The next N lines, each co ntains N integers separated by a space.
Output Format:
The first line cont ains 9 integers representing the sum of integers in the
9 submatrices.
Example Input/Output 1:
Input:
4
10 20 55 65
40 30 92 82
11 23 34 24
21 43 74 94
Output:
100 120 174 32 66 34 24 74 94
Explanation:
The 1 submatrix (2*2) is given below.
10 20
40 30
The 2 submatrix (1*2) is given below.
55 65
The 3 submatrix (1*2) is given below.
92 82
The 4 submatrix (2*1) is given below.
11
21
The 5 submatrix (2*1) is given below.
23
43
The 6 , 7 , 8 and 9 submatrices (1*1) are given below.
34 24 74 94
Example Input/Output 2:
Input:
8
7 2 3 0 3 4 2 7
4 7 9 4 3 4 6 8
5 3 3 0 3 6 2 6
0 4 5 0 1 3 4 6
7 5 8 8 9 2 6 7
0 8 0 9 7 8 6 6
6 9 8 4 2 1 8 4
8 9 9 4 1 2 2 4
Output:
56 37 31 52 50 26 25 6 18
SOLUTION:
def fun(l,a,b,c,d):
su=0
for k in range(a,b):
for h in range(c,d):
su+=l[k][h]
print(su,end=' ')
x=int(input());l=[list(map(int,input().split())) for _ in range(x)]
fun(l,0,x//2,0,x//2);fun(l,0,(x//2)//2,x//2,x);fun(l,(x//2)//2,x//2,x//2,x)
fun(l,x//2,x,0,(x//2)//2);fun(l,x//2,x,(x//2)//2,x//2);fun(l,x//2,x-(x//4),x//2,x-(x//4))
fun(l,x//2,x-(x//4),x-(x//4),x);fun(l,x-(x//4),x,x//2,x-(x//4));fun(l,x-(x//4),x,x-(x//4),x)
|
[
"noreply@github.com"
] |
DANUSHRAJ.noreply@github.com
|
09d021b1d8dfe86996bf792304540f403d3b766f
|
ac5934409435751a338af98cb9141c1dd76b56e9
|
/pet/votes/migrations/0001_initial.py
|
d205dfa208e8d564edaef2b798357fc80a5133f0
|
[] |
no_license
|
jfarriagada/pets
|
667f5b61f5c9445fe5e93b49ab0b090111874283
|
f2571f2ba7158484f30e9d9395daf9127282ad94
|
refs/heads/master
| 2021-06-25T13:20:59.126903
| 2019-10-09T00:36:53
| 2019-10-09T00:36:53
| 213,784,034
| 0
| 0
| null | 2021-06-10T22:04:23
| 2019-10-09T00:32:21
|
Python
|
UTF-8
|
Python
| false
| false
| 818
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-08 22:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=100)),
('name', models.CharField(max_length=100)),
('photo', models.ImageField(blank=True, null=True, upload_to='static/img/pets/')),
('votes', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=1)),
],
),
]
|
[
"fainnovex@innovex.cl"
] |
fainnovex@innovex.cl
|
c776c118e859ee46295315dd953878803a4fe286
|
29728c3b5584ffcbff26646f6f4fc794b5be51ac
|
/gogoedu/migrations/0001_initial.py
|
c54a889250bef98ec21abffcfc7b9b93d37be413
|
[] |
no_license
|
tuantuan98/hn-naitei18-python-els
|
d6c966189ff6f8bdcaa484daafe06f962b8507ae
|
6674edbbf270bb95fda9c764ddabe41dccf4b2ef
|
refs/heads/master
| 2022-12-17T20:14:10.306934
| 2020-09-15T01:37:36
| 2020-09-15T02:12:21
| 288,126,316
| 0
| 0
| null | 2020-08-17T08:33:41
| 2020-08-17T08:33:41
| null |
UTF-8
|
Python
| false
| false
| 6,730
|
py
|
# Generated by Django 3.0.9 on 2020-08-18 03:15
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='myUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.CharField(max_length=255)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Catagory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('catagory', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Catagory')),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_num', models.IntegerField()),
('time', models.TimeField()),
('lesson', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Lesson')),
],
),
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=255)),
('mean', models.CharField(blank=True, max_length=255, null=True)),
('type', models.CharField(max_length=255)),
('catagory', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Catagory')),
('lesson', models.ManyToManyField(to='gogoedu.Lesson')),
],
),
migrations.CreateModel(
name='User_word',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('memoried', models.BooleanField(default=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('word', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Word')),
],
),
migrations.CreateModel(
name='User_test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('correct_answer_num', models.IntegerField()),
('test', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Test')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=255)),
('test', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Test')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=255)),
('is_true', models.BooleanField(default=False)),
('question', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='gogoedu.Question')),
],
),
]
|
[
"dang.hoang.tuan@sun-asterisk.com"
] |
dang.hoang.tuan@sun-asterisk.com
|
662e84a750cca07fce5f1cf16fec0a383a1efbd9
|
1e2afd391efc662363098011dcb500b8279656b6
|
/meiduo_mall/meiduo_mall/apps/goods/constants.py
|
3a22e91ff594fab09d549ec21fa37dd93b5a6023
|
[] |
no_license
|
baixin999/mymd
|
c5269e70ebbb6fe26338a18f210dd7e0fc1fdfe0
|
267d5d18ba03d30b7255d4ee1f820b20cad6ed2c
|
refs/heads/master
| 2020-07-07T00:17:24.354799
| 2019-08-25T13:29:03
| 2019-08-25T13:29:03
| 203,183,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
# 列表页中每页显示多少条数据
SKU_LIST_PER_PAGE = 5
|
[
"324559937@qq.com"
] |
324559937@qq.com
|
9b4933a763e3a74f5645ce3216ced5248e67092d
|
0487f110c61453ffd1df3446ebdbfe525f03079a
|
/hiseek/clustering/regionizer.py
|
0b62f81f60b9354b23067fd8dc06ce49e89cae14
|
[
"BSD-2-Clause"
] |
permissive
|
droftware/Medusa
|
76024f6076dae8bfcd85ed08cc15681391606a65
|
a22f1b252bd752a4ab6e6d4f6245c5c90991dcb8
|
refs/heads/master
| 2021-03-16T10:18:28.894072
| 2019-11-15T14:43:15
| 2019-11-15T14:43:15
| 96,506,839
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
file_name = 'id_3.obstruction'
visibility_map = np.loadtxt(file_name)
visibility_map = visibility_map.astype('int')
num_rows = visibility_map.shape[0]
num_cols = visibility_map.shape[1]
n = num_rows * num_cols
# Creating the weight matrix of similarity graph
W = np.zeros((n, n))
print('Creating W matrix')
for i in range(num_rows):
for j in range(num_cols):
print('')
if visibility_map[i, j] != -1:
print('Value of',i,j,' is:', visibility_map[i, j])
right = i+1, j
down = i, j+1
node1 = (i * num_cols) + j
if right[0] < num_rows and right[1] < num_cols and visibility_map[right[0], right[1]] != -1:
print('Evaluating right edge')
node2 = (right[0] * num_cols) + right[1]
print('Value of', right[0], right[1], 'is:', visibility_map[right[0], right[1]])
val = abs(visibility_map[i, j] - visibility_map[right[0], right[1]]) * 1.0
if val == 0:
print('right val is 0')
print(visibility_map[i, j], visibility_map[right[0], right[1]])
val = 1
W[node1, node2] = 1/val
print('Right: Assigning val', W[node1, node2], 'to:', i,j,':',right)
if down[0] < num_rows and down[1] < num_cols and visibility_map[down[0], down[1]] != -1:
print('Evaluating down edge')
node2 = (down[0] * num_cols) + down[1]
print('Value of', down[0], down[1], 'is:', visibility_map[down[0], down[1]])
val = abs(visibility_map[i, j] - visibility_map[down[0], down[1]]) * 1.0
if val == 0:
print('Down val is 0')
print(visibility_map[i, j], visibility_map[down[0], down[1]])
val = 1
W[node1, node2] = 1/val
print('Down: Assigning val', W[node1, node2], 'to:', i,j, down)
print('Saving W matrix')
np.savetxt('W.mtx', W)
# Computing the D matrix
D = np.zeros((n, n))
print('Creating D matrix')
for i in range(n):
wt = 0
for j,k in [(1,0), (-1,0), (0,1), (0,-1)]:
idx = i + j, i + k
if idx[0] >=0 and idx[0] < num_rows:
if idx[1] >=0 and idx[1] < num_cols:
wt += W[idx[0], idx[1]]
D[i,i] = wt
print('Saving D matrix')
np.savetxt('D.mtx', D)
# Computing the graph laplacian matrix
print('Computing L matrix')
L = np.subtract(D, W)
print('Savinf L matrix')
np.savetxt('L.mtx', L)
print('Computing eigen vectors and vals')
eigen_vals, eigen_vecs = LA.eig(L)
print('Saving eigen vector and vals')
np.savetxt('eigen_vals.mtx', eigen_vals)
np.savetxt('eigen_vecs.mtx', eigen_vecs)
|
[
"akshat.tandon@research.iiit.ac.in"
] |
akshat.tandon@research.iiit.ac.in
|
954acd060cc73756929785875add890248c8f565
|
3967a651350ef70dd84f61b50b21082b0a493af8
|
/scripts/graph2.py
|
94e3756936a08117c88d9e3556baa51bdb702b0e
|
[] |
no_license
|
goyalnikhil02/tensosrflowexamples
|
8b73766cb366b13ffcbeadb41fe3fedbecdbc23c
|
6dec8ddf2cb1ce638613d38182cd6fa3d8320373
|
refs/heads/master
| 2020-05-18T07:51:02.983946
| 2019-05-12T06:52:34
| 2019-05-12T06:52:34
| 184,278,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
import tensorflow as tf
import numpy as np
n_features=10 #row or say feauters or attribute
n_dense_neuron=3 #layer
#weight
x=tf.placeholder(tf.float32,(None,n_features))
#print(x)
#matrix data
W = tf.Variable(tf.random_normal([n_features,n_dense_neuron]))
#print(W)
c = tf.Variable(tf.ones([n_dense_neuron]))
#print(c)
xW = tf.matmul(x,W)
#print(xW)
z = tf.add(xW,c)
a=tf.sigmoid(z)
init=tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
layer_out=session.run(a,feed_dict={x:np.random.random([1,n_features])})
print(layer_out)
|
[
"G521784@mph.morpho.com"
] |
G521784@mph.morpho.com
|
93f64826229da5dd4cba501017b9f57d118a48e5
|
339456d4f86b19bc5f44b8ba986944306e509e11
|
/python-methods/main-dani.py
|
cc9dd8a2e93e405204ce942bab6e12b1577dc296
|
[] |
no_license
|
PepitoAndCo/Blip
|
d0567b64e7d9f78bd0219cad3dd4590a62f5bfc1
|
e756a2701d5e2ca1771f5d9bdf9f2b90ecc286c2
|
refs/heads/master
| 2021-06-16T20:56:09.448638
| 2017-05-06T08:46:28
| 2017-05-06T08:46:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import tbContacts
import re
logBook = tbContacts.loadcsv('sample.csv');
# print('all',logBook.getAll())
# print('get 16473839095',logBook.get("6473839095"))
# print('get all but 16473839095',logBook.getAllbut("16473839095"))
# print('numbers',logBook.numbers())
textList = logBook.get("6472349313");
print(textList)
a = [];
for txt in textList:
txt = textList[0];
words = txt.split(' ');
s = 0;
for word in words:
print('word =',word);
# print(' > ',(re.findall(r'[a-z]|[A-Z]',word)))
# print(' > ',len(re.findall(r'[a-z]|[A-Z]',word)))
# print(' > ',len(re.findall(r'[a-z]|[A-Z]',word))>0);
# print(' > ',1*(len(re.findall(r'[a-z]|[A-Z]',word))>0));
# s=s+int(1*(len(re.findall(r'[a-z]|[A-Z]',word))>0));
# print('s =',s);
# print('final s = ',s)
# print(a)
|
[
"gaspard.witrand@epitech.eu"
] |
gaspard.witrand@epitech.eu
|
aea61614eb4da3c08f9b3da9a334abbd9b207bc3
|
1b275405f78a6fbbff53a84cfa8fa2354ba19289
|
/mitushchapchap/url.py
|
e99d2411744a33eed18b02c6c79f16ee4d4e0dbf
|
[] |
no_license
|
marianjoroge/mitumbachap
|
662002835071d0d949f59d04775d426f5411f74d
|
e62f2c88a26e9ea5173732eb4e6d8950f0f88177
|
refs/heads/master
| 2020-04-24T17:50:20.190935
| 2019-02-23T03:14:51
| 2019-02-23T03:14:51
| 172,160,945
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post-list/', views.mitushchapchapListView.as_view(), name='post-list')
]
|
[
"njorogemaria8@gmail.com"
] |
njorogemaria8@gmail.com
|
b0d36ff01b81621a8a30b4260aee51ff0b7fd312
|
0ac1df08e2cb2a089e912e3237209e0fb683504a
|
/edgy/workflow/transition.py
|
d44537e832efd938ea119ed0b1b40d23812ce52a
|
[] |
no_license
|
python-edgy/workflow
|
ee8654b5cd3931f26dc6c4c519bc865cba1864ca
|
b27edaa7a80bf7cd40d5a26df114058f3795dacd
|
refs/heads/master
| 2020-12-11T20:24:36.461621
| 2016-07-22T09:26:47
| 2016-07-22T09:26:47
| 51,644,998
| 1
| 0
| null | 2016-08-17T14:18:02
| 2016-02-13T12:28:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
# -*- coding: utf-8 -*-
"""
The smallest atom of ``edgy.workflow`` is a ``Transition``, which basically is a regular python
callable with additional metadata to make the system aware of when it can be applied.
"""
from edgy.workflow.constants import WILDCARD
from edgy.workflow.utils import issequence
class Transition(object):
"""
Defines when and how to go from one state to another, eventually applying a user-defined
side-effect while being applied.
Example::
>>> t = Transition(name='sleep', source='awake', target='asleep')
>>> class Person(object):
... state = 'awake'
>>> me = Person()
>>> t(me)
>>> me.state
'asleep'
This class can also be used as a decorator::
>>> @Transition(source='asleep', target='awake')
>>> def wakeup(self, subject):
... print('HEY!')
>>> wakeup(me)
>>> me.state
'awake'
A special wildcard source can make transitions work from any state. Just specify "*" as a
transition source and you'll be able to transition from any state.
"""
# Tracks each time a Transition instance is created. Used to retain order.
creation_counter = 0
# Transition handler. If absent, the transition is considered as "partial", and should be called with a handler
# callable to be complete.
handler = None
def __init__(self, handler=None, name=None, source=None, target=None):
self.source = tuple(source if issequence(source) else (source,))
self.target = target
self._name = name
# Increase the creation counter, and save our local copy.
self.creation_counter = Transition.creation_counter
Transition.creation_counter += 1
if handler:
self.handler = handler or self.handler
def __call__(self, *args, **kwargs):
if self.handler:
return self.__call_complete(*args, **kwargs)
return self.__call_partial(*args, **kwargs)
def __call_partial(self, handler):
self.handler = handler
return self
def __call_complete(self, subject, *args, **kwargs):
if not WILDCARD in self.source and not subject.state in self.source:
raise RuntimeError(
'This transition cannot be executed on a subject in "{}" state, authorized source '
'states are {}.'.format(subject.state,
', '.join(['"{}"'.format(state) for state in self.source]))
)
try:
retval = self.handler(self, subject, *args, **kwargs)
subject.state = self.target
except Exception as e:
raise
return retval
@property
def __name__(self):
if self._name:
return self._name
if self.handler:
return self.handler.__name__
return 'partial'
# Alias that can be used in django templates, for example.
name = __name__
def __repr__(self):
return '<{}.{} object "{}" ({} to {}) at {}>'.format(
type(self).__module__,
type(self).__name__,
self.__name__,
'/'.join(self.source),
self.target,
hex(id(self)),
)
|
[
"romain@dorgueil.net"
] |
romain@dorgueil.net
|
4807df81987b6ae2a41ea2af20450135a6d02107
|
b414178038ebb3006660301e4c7741c5adc1abe0
|
/web/app/app/forms.py
|
ab3d7c8578957628c5529ae0444bfb4b2c2a123d
|
[] |
no_license
|
KatyaShchuplova/openvpn-as-gui
|
9525cc759ab9d03cce83b65a5420fd69060faea5
|
fd8eb3dc2b400f47cc1decc0e94458584a484fd4
|
refs/heads/master
| 2021-05-15T21:51:57.538485
| 2018-03-10T12:09:46
| 2018-03-10T12:09:46
| 106,600,777
| 0
| 1
| null | 2018-03-10T12:09:48
| 2017-10-11T19:37:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
from app import app, models, db
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, RadioField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, Email, Length
# форма для авторизации пользователей
class LoginForm(FlaskForm):
username = StringField('Username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(), Length(min=4, max=15)])
# форма для регистрации новых пользователей
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[InputRequired(), Length(min=4, max=15)])
email = StringField('Email', validators=[InputRequired(), Email(message='Invalid email'), Length(max=50)])
password = PasswordField('Password', validators=[InputRequired(), Length(min=4, max=15)])
# форма для генерации ключей
class RegisterKeyForm(FlaskForm):
unique_name = StringField('UniqueName', validators=[InputRequired(), Length(max=50)])
days = RadioField('Days', choices=[('90', '90 days'), ('180', '180 days'), ('360', '360 days')], default='180')
def choice_query():
return models.Key.query
# форма для удаления ключей
class DeactivationKeyForm(FlaskForm):
deactivation_key = QuerySelectField(query_factory=choice_query(), allow_blank=True, get_label="uniqueName")
|
[
"e.shchuplova@gmail.com"
] |
e.shchuplova@gmail.com
|
78ab8469e9d3cb049c2360ccbb087a9236a83ec7
|
a1a3fc3511d3e2e29909411163bafd8932f87426
|
/tests/extension/dataflow_/regionadd_filter_enable/dataflow_regionadd_filter_enable.py
|
bef3f6dbb5ee6c07fe7b89b1e8dc44cb193f5f69
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
yongfu-li/veriloggen
|
25316c6f1a024669e7cb87f3491a1d3592356ea9
|
a6230da3350c6e4bb54e10a46ac855c24c27f17f
|
refs/heads/master
| 2021-01-23T11:50:43.050607
| 2017-09-04T08:30:06
| 2017-09-04T08:30:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,378
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.dataflow as dataflow
def mkMain():
# input variiable
x = dataflow.Variable('xdata', valid='xvalid', ready='xready')
reset = dataflow.Variable(
'resetdata', valid='resetvalid', ready='resetready', width=1)
enable = dataflow.Variable(
'enabledata', valid='enablevalid', ready='enableready', width=1)
# dataflow definition
z, v = dataflow.RegionAdd(
x * x, 4, initval=0, enable=enable, reset=reset, filter=True)
# set output attribute
z.output('zdata', valid='zvalid', ready='zready')
v.output('vdata', valid='vvalid', ready='vready')
df = dataflow.Dataflow(z, v)
m = df.to_module('main')
# df.draw_graph()
return m
def mkTest(numports=8):
m = Module('test')
# target instance
main = mkMain()
params = m.copy_params(main)
ports = m.copy_sim_ports(main)
clk = ports['CLK']
rst = ports['RST']
xdata = ports['xdata']
xvalid = ports['xvalid']
xready = ports['xready']
resetdata = ports['resetdata']
resetvalid = ports['resetvalid']
resetready = ports['resetready']
enabledata = ports['enabledata']
enablevalid = ports['enablevalid']
enableready = ports['enableready']
zdata = ports['zdata']
zvalid = ports['zvalid']
zready = ports['zready']
vdata = ports['vdata']
vvalid = ports['vvalid']
vready = ports['vready']
uut = m.Instance(main, 'uut',
params=m.connect_params(main),
ports=m.connect_ports(main))
reset_done = m.Reg('reset_done', initval=0)
reset_stmt = []
reset_stmt.append(reset_done(0))
reset_stmt.append(xdata(0))
reset_stmt.append(xvalid(0))
reset_stmt.append(enabledata(0))
reset_stmt.append(enablevalid(0))
reset_stmt.append(resetdata(0))
reset_stmt.append(resetvalid(0))
reset_stmt.append(zready(0))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, reset_stmt, period=100)
nclk = simulation.next_clock
init.add(
Delay(1000),
reset_done(1),
nclk(clk),
Delay(10000),
Systask('finish'),
)
def send(name, data, valid, ready, step=1, waitnum=10, send_size=20):
fsm = FSM(m, name + 'fsm', clk, rst)
count = m.TmpReg(32, initval=0)
fsm.add(valid(0))
fsm.goto_next(cond=reset_done)
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.goto_next()
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == 5, ready))
fsm.goto_next(cond=AndList(count == 5, ready))
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == send_size, ready))
fsm.goto_next(cond=AndList(count == send_size, ready))
fsm.make_always()
def receive(name, data, valid, ready, waitnum=10):
fsm = FSM(m, name + 'fsm', clk, rst)
fsm.add(ready(0))
fsm.goto_next(cond=reset_done)
fsm.goto_next()
yinit = fsm.current
fsm.add(ready(1), cond=valid)
fsm.goto_next(cond=valid)
for i in range(waitnum):
fsm.add(ready(0))
fsm.goto_next()
fsm.goto(yinit)
fsm.make_always()
send('x', xdata, xvalid, xready, waitnum=10, send_size=100)
receive('z', zdata, Ands(zvalid, vvalid), zready, waitnum=5)
receive('v', vdata, Ands(zvalid, vvalid), vready, waitnum=5)
# enable port
enable_fsm = FSM(m, 'enable', clk, rst)
enable_count = m.Reg('enable_count', 32, initval=0)
enable_fsm.goto_next(cond=reset_done)
enable_fsm_init = enable_fsm.current
enable_fsm.add(enablevalid(1)) # always High
enable_fsm.add(enable_count.inc(), cond=AndList(enablevalid, enableready))
enable_fsm.add(enabledata(1), cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.goto_next(cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.add(enabledata(0), cond=AndList(enablevalid, enableready))
enable_fsm.add(enable_count(0))
enable_fsm.goto(enable_fsm_init, cond=AndList(enablevalid, enableready))
enable_fsm.make_always()
# reset port
reset_fsm = FSM(m, 'reset', clk, rst)
reset_count = m.Reg('reset_count', 32, initval=0)
reset_fsm.goto_next(cond=reset_done)
reset_fsm_init = reset_fsm.current
reset_fsm.add(resetvalid(1)) # always High
reset_fsm.add(reset_count.inc(), cond=AndList(resetvalid, resetready))
#reset_fsm.add( resetdata(1), cond=AndList(resetvalid, resetready, reset_count==2) )
reset_fsm.add(resetdata(0), cond=AndList(
resetvalid, resetready, reset_count == 2))
reset_fsm.goto_next(cond=AndList(resetvalid, resetready, reset_count == 2))
reset_fsm.add(resetdata(0), cond=AndList(resetvalid, resetready))
reset_fsm.add(reset_count(0))
reset_fsm.goto(reset_fsm_init, cond=AndList(resetvalid, resetready))
reset_fsm.make_always()
m.Always(Posedge(clk))(
If(reset_done)(
If(AndList(xvalid, xready))(
Systask('display', 'xdata=%d', xdata)
),
If(AndList(zvalid, zready))(
Systask('display', 'zdata=%d', zdata)
),
If(AndList(vvalid, vready))(
Systask('display', 'vdata=%d', vdata)
)
)
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
# run simulator (Icarus Verilog)
sim = simulation.Simulator(test)
rslt = sim.run() # display=False
#rslt = sim.run(display=True)
print(rslt)
# launch waveform viewer (GTKwave)
# sim.view_waveform() # background=False
# sim.view_waveform(background=True)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
8053d37ecac9ad02b42e97ea9b0bbbbb3dd4a690
|
a871507b02337a12e4ad2abe900ce5250146d0de
|
/pachong.py
|
9ae727f1572d88abc7872572c4c635f3b343dcbc
|
[] |
no_license
|
onlinesen/gfxtest
|
3263a7039ade21e44025939ad6a0ec5eb8c1c308
|
8ba7ba858d0a0ae5ce108047b3b857ba69424f2b
|
refs/heads/master
| 2020-03-28T05:39:13.115627
| 2018-09-07T07:45:02
| 2018-09-07T07:45:02
| 147,789,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2, urllib
import cookielib
rooturl=r"http://osgroup.jstinno.com:8082/encrypt/key/encrypt"
url = r'http://ttms.tinno.com/auth/login?next=%2F'
url1 = r'http://ttms.tinno.com/tools/test-tools-version/24/'
import requests
session = requests.Session()
session.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
session.get(url)
r = session.get('http://ttms.tinno.com/tools/test-tools-version/24/')
login_data = {
'username': 'lin.shen',
'password': 'lin.shen',
'bg_color': 'true'
}
session.post(url, data=login_data)
res = session.get(url1)
filename = 'cookie.txt'
#声明一个MozillaCookieJar对象实例来保存cookie,之后写入文件
cookie = cookielib.MozillaCookieJar(filename)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({
'username': 'lin.shen',
'password': 'lin.shen',
'bg_color': 'true'
})
#登录教务系统的URL
loginUrl =url
#模拟登录,并把cookie保存到变量
result = opener.open(loginUrl,postdata)
#保存cookie到cookie.txt中
cookie.save(ignore_discard=True, ignore_expires=True)
#利用cookie请求访问另一个网址,此网址是成绩查询网址
gradeUrl = url1
#请求访问成绩查询网址
result = opener.open(gradeUrl)
#print result.read()
import requests
import json
data={"keyString":"45GUAIW44LTOEE7H"}
r=requests.post(rooturl,data=data) #在一些post请求中,还需要用到headers部分,此处未加,在下文中会说到
c= r.json().get("encryptString")
print type(c.encode())
print (r.json().get("encryptString")),"encryptString" in (r.content),r.content
|
[
"lin.shen@tinno.com"
] |
lin.shen@tinno.com
|
cac5fc6ab5693b393eca31f726948adb1534be15
|
cc6ab068e4b0374d593e89010a91493cacb3b6be
|
/Practice_Exercises/007_no_idea_MED.py
|
4bb81fef821ab19df92f7988387e9bd024f2a136
|
[] |
no_license
|
EugeneMondkar/hackerrank-code
|
7671a09ca33046800afdd91fe0f1ee098b6eba2b
|
7578fc278f8caf275e37815ee1e0f3ce7af6e03b
|
refs/heads/main
| 2023-08-24T00:17:25.457984
| 2021-10-02T23:00:02
| 2021-10-02T23:00:02
| 409,016,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
# Problem Link: https://www.hackerrank.com/challenges/no-idea/problem
n,m = tuple(map(int, input().split()))
arr = list(map(int, input().split()))
set_a = set(map(int, input().split()))
set_b = set(map(int, input().split()))
happiness_score = 0
# My Original Solution
# for x in arr:
# if x in set_a:
# happiness_score += 1
# if x in set_b:
# happiness_score -= 1
# Alternative Solution
happiness_score = sum([(x in set_a) - (x in set_b) for x in arr])
print(happiness_score)
|
[
"yatinm@gmail.com"
] |
yatinm@gmail.com
|
66b3bc9dc0d840c47e8a9166f415c12b9fb029aa
|
62f26a0417e3e2ba72a0b0736a6ae82c66b06719
|
/Solo-Yasser Lachachi/Toolbox.py
|
05eaf9575a6f89fbf6560c80d59b598e2c882807
|
[] |
no_license
|
EgyAlg/Projet
|
288d62b114a1f80e7968d181aeeddf63478a77a0
|
97ce93ee22854fd5c4dd09f1f0ed36052a0ee323
|
refs/heads/master
| 2021-01-11T20:32:10.238104
| 2017-05-01T20:07:59
| 2017-05-01T20:07:59
| 79,136,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
from soccersimulator import Strategy
from soccersimulator import SoccerTeam, Simulation
from soccersimulator import SimuGUI,show_state,show_simu
from soccersimulator import Vector2D, SoccerState, SoccerAction
from soccersimulator import Simulation, SoccerTeam, Player, show_simu
from soccersimulator import Strategy
from soccersimulator.settings import *
from Toolbox import *
import math
class Toolbox(object):
def __init__(self,state,id_team,id_player):
self.state = state
self.id_team = id_team
self.id_player = id_player
self.key = (id_team, id_player)
#POSITIONS
def my_position(self):
return self.state.player_state(self.key[0],self.key[1]).position
def my_positionX(self):
return self.state.player_state(self.key[0],self.key[1]).position.x
def my_positionY(self):
return self.state.player_state(self.key[0],self.key[1]).position.y
def position_player1(self):
return self.state.player_state(self.id_team,0).position
def position_player2(self):
return self.state.player_state(self.id_team,1).position
def ball_position(self):
return self.state.ball.position
def ball_positionX(self):
return self.state.ball.position.x
def ball_positionY(self):
return self.state.ball.position.y
def position_but_adv(self):
if (self.id_team == 1):
return Vector2D(GAME_WIDTH,GAME_HEIGHT/2)
else :
return Vector2D(0,GAME_HEIGHT/2)
def getX_position_but_adv(self):
if (self.id_team == 1):
return GAME_WIDTH
else :
return 0
def position_mon_but(self):
if (self.id_team == 1):
return Vector2D(0,GAME_HEIGHT/2)
else :
return Vector2D(GAME_WIDTH,GAME_HEIGHT/2)
def distanceMonBut(self):
return (self.ball_prediction()-self.position_mon_but()).norm
def distanceAuButAdv(self):
return (self.ball_prediction()-self.position_but_adv()).norm
def distanceAuBallon(self):
return (self.ball_prediction()-self.my_position()).norm
#DEPLACEMENTS
def fonceur(self,me):
return me.aller(me.ball_prediction())+me.shoot(me.position_but_adv())
def ball_vitesse(self):
return self.state.ball.vitesse
def ball_prediction(self):
if (self.ball_vitesse().norm > 2 or self.ball_vitesse() < -2):
return self.ball_position() + self.ball_vitesse()*10
else:
return self.ball_position()
def aller(self,p):
return SoccerAction(p-self.my_position(),Vector2D())
def get_position_def(self):
if (self.id_team == 1):
return self.aller(Vector2D(7,GAME_HEIGHT/2))
else :
return self.aller(Vector2D(GAME_WIDTH-7,GAME_HEIGHT/2))
#ACTIONS
def shoot(self,p):
return SoccerAction(Vector2D(), p-self.my_position())
def mini_shoot(self, p):
return SoccerAction(Vector2D(),(p-self.my_position())*0.040)
def passe(self):
if (self.id_player == 0):
return SoccerAction(Vector2D(),(self.position_player2()-self.my_position())*0.125)
else :
return SoccerAction(Vector2D(),(self.position_player1()-self.my_position())*0.125)
def laisse(self):
if (self.id_player == 0):
if(self.distanceAuBallon()>(self.ball_prediction()-self.position_player2()).norm):
return self.trace()
else :
return self.aller(self.ball_prediction())
else:
if(self.distanceAuBallon()>(self.ball_prediction()-self.position_player1()).norm):
return self.trace()
else :
return self.aller(self.ball_prediction())
def trace(self):
if (self.id_team == 1):
return self.aller(Vector2D(self.ball_positionX()+30,self.my_positionY()))
else:
return self.aller(Vector2D(self.ball_positionX()-30,self.my_positionY()))
|
[
"yasser_lachachi@hotmail.fr"
] |
yasser_lachachi@hotmail.fr
|
507a2d4956f10161437960274d64c27c47843770
|
50921ecf3b98151180e70c8e86a0cc374393fdff
|
/examples/demo_beholder.py
|
067c6dc7c959b8a9996424707fa892db1d51693c
|
[
"MIT"
] |
permissive
|
lanpa/tensorboardX
|
bd7ca7bd256c5cff628eef53ab3f2b6ee8f65aeb
|
092fcb08f295a483ecd4808407d4c5c39b7a369f
|
refs/heads/master
| 2023-09-06T09:53:03.203099
| 2023-09-03T09:26:33
| 2023-09-03T09:26:33
| 94,218,531
| 5,884
| 814
|
MIT
| 2023-09-03T09:39:59
| 2017-06-13T13:54:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple MNIST classifier to demonstrate features of Beholder.
Based on tensorflow/examples/tutorials/mnist/mnist_with_summaries.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorboardX.beholder as beholder_lib
import time
from collections import namedtuple
LOG_DIRECTORY = '/tmp/beholder-demo'
tensor_and_name = namedtuple('tensor_and_name', 'tensor, name')
def beholder_pytorch():
for i in range(1000):
fake_param = [tensor_and_name(np.random.randn(128, 768, 3), 'test' + str(i))
for i in range(5)]
arrays = [tensor_and_name(np.random.randn(128, 768, 3), 'test' + str(i))
for i in range(5)]
beholder = beholder_lib.Beholder(logdir=LOG_DIRECTORY)
beholder.update(
trainable=fake_param,
arrays=arrays,
frame=np.random.randn(128, 128),
)
time.sleep(0.1)
print(i)
if __name__ == '__main__':
import os
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
print(LOG_DIRECTORY)
beholder_pytorch()
|
[
"huang.dexter@gmail.com"
] |
huang.dexter@gmail.com
|
87cc2abd3c13a2d90fd462c57af819701673b894
|
7c5f9f4a237669d9acc4b59711ac1cf91ba71f26
|
/torch/_dynamo/guards.py
|
c16c48515857ab7f942a58f2c98d68360e317e40
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
colesbury/pytorch
|
4924df03a4aaf88b51ff9d2c5572ac35a30f26be
|
d70f9c7888dc488304010a554d8b56a505919bda
|
refs/heads/master
| 2023-08-27T22:11:15.257393
| 2023-03-29T20:17:06
| 2023-03-29T20:17:10
| 79,366,698
| 1
| 0
| null | 2018-08-09T13:54:22
| 2017-01-18T17:47:38
|
Python
|
UTF-8
|
Python
| false
| false
| 33,502
|
py
|
import builtins
import collections
import logging
import math
import os
import re
import types
import weakref
from inspect import currentframe, getframeinfo
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from weakref import ReferenceType
import torch
from torch._guards import (
DuplicateInputs,
Guard,
GuardBuilderBase,
GuardEnvExpr,
GuardSource,
Source,
)
from torch.fx.experimental.symbolic_shapes import SYMPY_INTERP
from . import config, convert_frame, mutation_guard
from .eval_frame import set_guard_error_hook, set_guard_fail_hook
from .exc import unimplemented
from .types import GuardedCode, GuardFail, GuardFn # noqa: F401
from .utils import (
dict_const_keys,
dict_const_keys_repr,
dict_param_key_ids,
guard_failures,
HAS_NUMPY,
istype,
np,
orig_code_map,
rename_implicit,
tensor_always_has_static_shape,
tensor_static_reason_to_message,
tuple_iterator_getitem,
tuple_iterator_len,
)
log = logging.getLogger(__name__)
TensorGuards = torch._C._dynamo.guards.TensorGuards
check_obj_id = torch._C._dynamo.guards.check_obj_id
check_type_id = torch._C._dynamo.guards.check_type_id
CLOSURE_VARS = collections.OrderedDict(
[
("___check_type_id", check_type_id),
("___check_obj_id", check_obj_id),
("___is_grad_enabled", torch.is_grad_enabled),
("___odict_getitem", collections.OrderedDict.__getitem__),
("___dict_param_key_ids", dict_param_key_ids),
("___dict_const_keys", dict_const_keys),
("___tuple_iterator_len", tuple_iterator_len),
("___tuple_iterator_getitem", tuple_iterator_getitem),
("__math_isnan", math.isnan),
("inf", float("inf")),
]
)
def strip_function_call(name):
"""
"___odict_getitem(a, 1)" => "a"
"""
m = re.search(r"([a-z0-9_]+)\(([^(),]+)[^()]*\)", name)
if m and m.group(1) != "slice":
return strip_function_call(m.group(2))
return strip_getattr_getitem(name)
def strip_getattr_getitem(name):
"""
"a[1]" => "a"
"a.foo" => "a"
"""
return re.split(r"[.\[]", name)[0]
class GuardBuilder(GuardBuilderBase):
def __init__(
self,
id_ref: Callable[[Type[object]], str],
source_ref: Callable[[Source], str],
scope: Optional[Dict[str, object]],
check_fn_manager: "CheckFunctionManager",
renames=True,
):
self.id_ref = id_ref
self.source_ref = source_ref
if scope:
if renames:
scope = {rename_implicit(k): v for k, v in scope.items()}
else:
scope = dict()
self.scope: Dict[str, object] = scope
self.scope["__builtins__"] = builtins.__dict__.copy()
for (
name,
package_module,
) in torch.package.package_importer._package_imported_modules.items():
name = name.replace(">", "_").replace("<", "_").replace(".", "_dot_")
# Write the package module into the scope so that we can import it
self.scope["__builtins__"][name] = package_module # type: ignore[index]
# Write the demangled name to the scope so that we can use it
self.scope[name] = package_module
self.argnames: List[str] = []
# Code is python expression strings generated for each guard
self.code: List[str] = []
# shape_env_code is only used by local_builder and is used for
# shape env code. This exists only because we need to make sure
# shape env guards get run after tensor match guards (since the
# tensor match guards make sure we actually have tensors)
self.shape_env_code: List[str] = []
# [Note - On Eager Tensor Guards]
# Most of the time, we generate Python code in a guard to directly
# check various properties. However, tensors are a bit special;
# it is too slow to check their properties one-by-one in Python.
# Instead, there is a C++ function TensorGuards.check which takes
# all of the tensor arguments and checks them all against compile-time
# examples entirely in C++. Thus, every time we process a
# TENSOR_MATCH guard, we just add another entry to
# tensor_check_names/tensor_check_examples, saying "for this local,
# check it against this example", and it all ends up getting
# swept up into a single call to ___check_tensors. Invariant:
# len(tensor_check_names) == len(tensor_check_examples).
self.tensor_check_names: List[str] = []
self.tensor_check_examples: List[torch.Tensor] = []
self.check_fn_manager: CheckFunctionManager = check_fn_manager
# Warning: use this with care! This lets you access what the current
# value of the value you are guarding on is. You probably don't want
# to actually durably save this value though (because it's specific
# to this frame!) Instead, you should be reading out some property
# (like its type) which is what you permanently install into the
# guard code.
def get(self, name: str) -> Any:
return eval(name, self.scope, CLOSURE_VARS)
# Registers the usage of the source name referenced by the
# string (or stored in the Guard) as being guarded upon. It's important
# to call this before generating some code that makes use of 'guard',
# because without this call, we won't actually bind the variable
# you reference in the actual guard closure (oops!)
def arg_ref(self, guard: Union[str, Guard]) -> str:
name: str
if isinstance(guard, str):
name = guard
else:
name = guard.name
base = strip_getattr_getitem(strip_function_call(name))
if base not in self.argnames:
if re.match(r"^\d+$", base):
log.warning(f"invalid var name: {guard}")
self.argnames.append(base)
return name
def TYPE_MATCH(self, guard: Guard):
# ___check_type_id is same as `id(type(x)) == y`
t = type(self.get(guard.name))
obj_id = self.id_ref(t)
code = f"___check_type_id({self.arg_ref(guard)}, {obj_id})"
self._produce_guard_code(guard, [code])
def BOOL_FALSE(self, guard: Guard):
# Guard on the runtime value being 'False',
# can be faster than seemingly equivalent checks like DICT_KEYS for empty dict
#
# WARNING: this guard is not safe to use generally. It only works if the runtime
# value is of a type that supports bool(), and some types e.g. Tensor do not.
# Only use this guard in cases you can guarantee the runtime type will be friendly.
# (e.g. Specialized NNModule with mutation protection via setattr)
#
# Why not simply check the runtime type inside this guard? It's slow enough to defeat
# the purpose of using this guard, which itself is supposed to be a faster alternative
# to DICT_KEYS.
ref = self.arg_ref(guard)
code = f"not {ref}"
self._produce_guard_code(guard, [code])
def ID_MATCH(self, guard: Guard):
# ___check_obj_id is same as `id(x) == y`
m = re.match(r"^type\((.+)\)$", guard.name)
if m:
# optional optimization to produce cleaner/faster guard code
return self.TYPE_MATCH(
Guard(m.group(1), guard.source, GuardBuilder.TYPE_MATCH)
)
code = f"___check_obj_id({self.arg_ref(guard)}, {self.id_ref(self.get(guard.name))})"
self._produce_guard_code(guard, [code])
def NAME_MATCH(self, guard: Guard):
obj = self.get(guard.name)
code = f"{self.arg_ref(guard)}.__name__ == {obj.__name__}"
self._produce_guard_code(guard, [code])
def HASATTR(self, guard: Guard):
m = re.match(r"^(.*)[.]([a-zA-Z0-9_]+)$", guard.name)
assert m, f"invalid hasattr check {guard.name}"
base, attr = m.group(1, 2)
ref = self.arg_ref(base)
val = hasattr(self.get(base), attr)
code = None
if val:
code = f"hasattr({ref}, {attr!r})"
else:
code = f"not hasattr({ref}, {attr!r})"
self._produce_guard_code(guard, [code], provided_guarded_object=self.get(base))
def EQUALS_MATCH(self, guard: Guard):
ref = self.arg_ref(guard)
val = self.get(guard.name)
t = type(val)
np_types = (
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
np.float32,
np.float64,
)
if HAS_NUMPY
else ()
)
assert istype(
val,
(
int,
float,
bool,
type(None),
str,
type,
list,
tuple,
set,
slice,
frozenset,
range,
torch.Size,
torch.device,
torch.dtype,
)
+ np_types,
), t.__name__
if istype(val, (torch.device, torch.dtype)):
# TODO(jansel): is this slow? perhaps optimize it
code = [f"str({ref}) == {str(val)!r}"]
self._produce_guard_code(guard, code)
return
# Special case for nan because float("nan") == float("nan") evaluates to False
if istype(val, float) and math.isnan(val):
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"__math_isnan({ref})")
self._produce_guard_code(guard, code)
return
code = list()
# If matching equality against list/tuple, we must also check that
# the internal types match. (TODO: what about nested lists?)
if istype(val, (list, tuple)):
# NB: LIST_LENGTH takes care of the outer __check_type_id test
self.LIST_LENGTH(guard)
for idx, elem in enumerate(val):
code.append(
f"___check_type_id({ref}[{idx}], {self.id_ref(type(elem))})"
)
else:
# Add type check to prevent equality check between tensor and non-tensor.
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
if istype(val, torch.Size):
val = tuple(val)
# TODO: It feels like it would be better to just implement our own
# equality test in C that handles all of the necessary type checking
# and NaN tests
code.append(f"{ref} == {val!r}")
self._produce_guard_code(guard, code)
def CONSTANT_MATCH(self, guard: Guard):
val = self.get(guard.name)
if istype(val, (bool, type(None))):
self.ID_MATCH(guard)
else:
self.EQUALS_MATCH(guard)
def NN_MODULE(self, guard: Guard):
self.ID_MATCH(guard)
ref = self.arg_ref(guard)
val = self.get(guard.name)
def setup_guard():
assert istype(val.training, bool)
self.code.append(f"{ref}.training == {val.training}")
if hasattr(val, "training"):
# There are cases where a monkeypatched object has a guard made between __new__ and __init__
setup_guard()
else:
unimplemented(f"Guard setup for uninitialized class {type(val)}")
def FUNCTION_MATCH(self, guard: Guard):
"""things like torch.add and user defined functions"""
if guard.is_local():
return self.ID_MATCH(guard)
def BUILTIN_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def PYMODULE_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def LIST_LENGTH(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"len({ref}) == {len(value)}")
self._produce_guard_code(guard, code)
def TUPLE_ITERATOR_LEN(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}")
self._produce_guard_code(guard, code)
def DICT_KEYS(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
param_key_ids = set(dict_param_key_ids(value))
const_keys = set(dict_const_keys(value))
const_keys_repr = dict_const_keys_repr(const_keys)
if param_key_ids:
code.append(f"___dict_param_key_ids({ref}) == {param_key_ids!r}")
code.append(f"___dict_const_keys({ref}) == {const_keys_repr}")
else:
code.append(f"set({ref}.keys()) == {const_keys_repr}")
self._produce_guard_code(guard, code)
def WEAKREF_ALIVE(self, guard):
self._produce_guard_code(guard, [f"{self.arg_ref(guard)} is not None"])
def NN_MODULE_PARAM_NAMES(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
keys = {k for k, v in value.named_parameters()}
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"{{k for k, v in {ref}.named_parameters()}} == {keys!r}")
self._produce_guard_code(guard, code)
def ODICT_KEYS(self, guard):
"""OrderedDict keys match"""
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"str({ref}.keys()) == {str(value.keys())!r}")
self._produce_guard_code(guard, code)
def OBJECT_MUTATION(self, guard: Guard):
mutation_guard.watch(self.get(guard.name), self.check_fn_manager)
def GRAD_MODE(self, guard: Guard):
"""Guard on the initial grad state"""
assert guard.name == ""
assert guard.source is GuardSource.GLOBAL
code = None
if convert_frame.initial_grad_state:
code = "___is_grad_enabled()"
else:
code = "not ___is_grad_enabled()"
self._produce_guard_code(guard, [code])
def SHAPE_ENV(self, guard: Guard):
# Let's handle ShapeEnv guards. To do this, we will resolve
# shape variables to sources from tracked_fakes. This must happen after
# tensor checks.
assert guard.name == ""
output_graph = self.check_fn_manager.output_graph
# NB: self.output_graph can be None in the debug_nops tests
fs = output_graph.tracked_fakes
constraint_inputs = [a.constraint_dims for a in fs]
guards = output_graph.shape_env.produce_guards(
[a.fake for a in fs],
[a.source for a in fs],
constraint_inputs=constraint_inputs,
source_ref=self.source_ref,
)
for shape_guard in guards:
self._produce_guard_code(guard, [shape_guard], shape_env=True)
def TENSOR_MATCH(self, guard: Guard):
if guard.is_nn_module():
self.ID_MATCH(guard)
else:
value = self.get(guard.name)
assert isinstance(value, torch.Tensor)
tensor_name = self.arg_ref(guard)
# [Note - On Export Tensor Guards]
#
# In eager mode, tensor guards are evaluated through C++, in guards.cpp
# see [Note - On Eager Tensor Guards] for more info.
#
# In export mode, we instead maintain parallel logic between C++ and python
# here, with an exception of checking the dispatch key - with the idea that a dispatch key
# is an entirely runtime notion that would make no sense to keep in an exported graph.
#
# Now, this idea is okay, but to paraphrase @ezyang, this mental model is sufficient for now, although
# not entirely true.
# For example, suppose one of the input tensors had the negative dispatch key.
# You should end up with a graph that is specialized for tensors that have a negative dispatch key.
# If you allow a Tensor that does NOT have this bit set, you will accidentally run it "as if" it were negated.
# Now, negative key only shows up for complex numbers, and most likely, the exported to target doesn't
# support this feature at all, but the point stands that :some: tensor state only shows up on dispatch key.
# TODO(voz): Either populate a dispatch_key check into the guards, or error on users passing in an unsupported
# subset of keys during export.
#
# The list of tensor fields and calls we care about can be found in `terms` below.
# TODO(voz): We are missing storage offset in all our tensor guards?
code: List[str] = list()
if self.check_fn_manager.output_graph.export:
self.TYPE_MATCH(guard)
terms = [
"dtype",
"device.type",
"device.index",
"requires_grad",
"ndimension()",
]
if not config.dynamic_shapes:
terms.append("stride()")
# We need to do this to avoid the torch.Size type in guards
code.append(f"{tensor_name}.shape == {tuple(value.shape)}")
for term in terms:
real_value = self.get(tensor_name + "." + term)
code.append(f"{tensor_name}.{term} == {real_value}")
else:
self.tensor_check_names.append(tensor_name)
self.tensor_check_examples.append(value)
# A frame is valid for reuse with dynamic dimensions if the new dynamic dimensions are a
# strict subset of the old.
#
# The logic here is as follows:
#
# Every mark_dynamic directive is a user-knows-best command, which can incur a raise at tracing
# time if we find guards that run counter to the user directive.
# If compiling a frame with explicit dynamic dims X could cause an exception, we MUST NOT skip compiling.
#
# If the frame is compiled with any marked dynamic indices, let's call that set of indices X.
# When we evaluated inputs against the guards, given the same tensor with potentially new dynamic indices,
# let's call that set Y.
#
# When X is a strict subset of Y, the potential new raises introduced during compilation are a strict subset
# of the raises we
# could have encountered. The frame compiled under Y is safe to reuse with X.
# When X is not a strict subset of Y, the non-overlapping new elements of X may cause new raises, and the
# frame is no longer fit for reuse.
#
# This is the case because any newly introduced mark_dynamic directives have a chance of
# raising, failing compilation. Any existing mark_dynamic indices that we lost are safe to lose
# as all it means is that we have gotten rid of a user directive which could incur a raise at compile time.
# In the case of when there is no Y, that is, there are no dynamic indices marked at all, the frame is safe
# to reuse
# as an empty set is a safe degeneration - that is, a strictly static tensor is always valid for a frame
# compiled with that same
# tensor + more onerous user directives.
assert guard.source is not None
static, reason = tensor_always_has_static_shape(value, is_tensor=True)
if not static:
if hasattr(value, "_dynamo_dynamic_indices"):
code.append(
f"({tensor_name}._dynamo_dynamic_indices.issubset({value._dynamo_dynamic_indices})) if hasattr({tensor_name}, '_dynamo_dynamic_indices') else True" # noqa: B950
)
# In the case of us not having any dynamic dimension indices, we compiled the frame with no chance of
# raising for this specific tensor - and any inputs with more dynamic user directives specified must be recompiled.
else:
code.append(
f"hasattr({tensor_name}, '_dynamo_dynamic_indices') == False"
)
else:
assert not hasattr(
value, "_dynamo_dynamic_indices"
), f"Illegal Unreachable state, guard accumulation for dynamic tensor that should have been static. Initial static message: {tensor_static_reason_to_message(reason)}" # noqa: B950
if len(code) > 0:
self._produce_guard_code(guard, code)
# A util that appends guarded code, or, in the case of export, adds data onto guards
def _produce_guard_code(
self, guard, code_list, provided_guarded_object=None, shape_env=False
):
# WARNING: It is important that cur_frame/caller do NOT stay in
# the current frame, because they will keep things live longer
# than they should. See TestMisc.test_release_module_memory
cur_frame = currentframe()
assert cur_frame is not None
caller = cur_frame.f_back
del cur_frame
assert caller is not None
func_name = getframeinfo(caller)[2]
del caller
# We use func_name for export, so might as well get a nice defensive check out of it
assert func_name in dir(
self.__class__
), f"_produce_guard_code must be called from inside GuardedCode. Called from {func_name}"
if shape_env:
self.shape_env_code.extend(code_list)
else:
self.code.extend(code_list)
# Not all guards have names, some can be installed globally (see asserts on HAS_GRAD)
if provided_guarded_object is None:
name_valid = guard.name is not None and guard.name != ""
guarded_object = self.get(guard.name) if name_valid else None
else:
guarded_object = provided_guarded_object
guarded_object_type = (
weakref.ref(type(guarded_object)) if guarded_object is not None else None
)
obj_ref = None
if hasattr(guarded_object.__class__, "__weakref__"):
obj_ref = weakref.ref(guarded_object)
guard.set_export_info(
func_name,
guarded_object_type,
code_list,
obj_ref,
)
# NB: Naively, you'd expect this to only be a function that produces
# the callable that constitutes the guard. However, there is some
# delicate handling for invalidating this check function when the
# locals/globals get invalidated, so there's some extra state
# we have to hold in this manager class.
#
# TODO: this object has reference cycle with itself, via check_fn which
# references back to CheckFunction via ___guarded_code in closure_vars.
# Ideally, there shouldn't be any ref cycle so that guards are
# promptly disposed of.
class CheckFunctionManager:
def __init__(
self,
output_graph=None,
f_locals: Optional[Dict[str, object]] = None,
f_globals: Optional[Dict[str, object]] = None,
guard_fail_fn: Optional[Callable[[Tuple[str, str]], None]] = None,
):
guards = output_graph.guards if output_graph else None
self.valid = True
self._weakrefs: List["ReferenceType[object]"] = []
self._seen_ids: Set[int] = set()
self.output_graph = output_graph
# Note: right overrides left
def combine_scopes(left, right):
if left is None:
return right
if right is None:
return left
return {**left, **right}
def source_ref(source):
guard_source = source.guard_source()
if guard_source is GuardSource.CONSTANT:
# No need to track constants
return source.name()
builder = guard_source.select(w_local(), w_global())
assert builder is not None
return builder.arg_ref(source.name())
local_builder = GuardBuilder(
self.id_ref,
source_ref,
combine_scopes(f_globals, f_locals),
self,
renames=True,
)
global_builder = GuardBuilder(
self.id_ref, source_ref, f_globals, self, renames=False
)
# source_ref can cause a cycle, make sure we break it with weakref
w_local = weakref.ref(local_builder)
w_global = weakref.ref(global_builder)
for guard in sorted(guards or [], key=Guard.sort_key):
if (
not config.guard_nn_modules
and guard.is_nn_module()
# Default func args must be guarded on.
# TODO: we could make use of 'DefaultsSource' and offer a .guard.is_defaults() API
and "__defaults__" not in guard.name
and "__kwdefaults__" not in guard.name
and (config.skip_nnmodule_hook_guards or "hooks" not in guard.name)
):
continue
guard.create(local_builder, global_builder)
self.check_fn = self.compile_check_fn(
local_builder, global_builder, guards, guard_fail_fn
)
self._seen_ids.clear()
def compile_check_fn(
self, local_builder, global_builder, guards_out, guard_fail_fn
):
assert not (set(local_builder.argnames) & set(global_builder.argnames))
# see parallel handling of ".0" / "___implicit0" in _eval_frame.c
largs = [a for a in local_builder.scope.keys() if a == "___implicit0"]
largs += [a for a in local_builder.argnames if a != "___implicit0"]
largs += ["**___kwargs_ignored"]
args = ",".join(largs)
code_parts = (
["___guarded_code.valid"] + local_builder.code + global_builder.code
)
# TODO(whc) maybe only the 'check_tensors' one is ambiguous? if so we can be less general..
verbose_code_parts = (
["___guarded_code.valid"] + local_builder.code + global_builder.code
)
tensor_check_names = (
local_builder.tensor_check_names + global_builder.tensor_check_names
)
check_tensors_fn = None
check_tensors_verbose_fn = None
if tensor_check_names:
assert (
not self.output_graph.export
), "Illegal to set tensor_check_names in export."
tensor_check_examples = (
local_builder.tensor_check_examples
+ global_builder.tensor_check_examples
)
tensor_guards = TensorGuards(
*tensor_check_examples, dynamic_shapes=config.dynamic_shapes
)
check_tensors_fn = tensor_guards.check
check_tensors_verbose_fn = tensor_guards.check_verbose
code_parts.append(f"___check_tensors({', '.join(tensor_check_names)})")
verbose_args = ", ".join(
tensor_check_names + ["tensor_check_names=tensor_check_names"]
)
verbose_code_parts.append(f"___check_tensors_verbose({verbose_args})")
aotautograd_guards: List[GuardEnvExpr] = (
self.output_graph.tracing_context.guards_context.aotautograd_guards
if self.output_graph
else []
)
for guard in aotautograd_guards:
if isinstance(guard, DuplicateInputs):
source_a = guard.input_source_a
source_b = guard.input_source_b
code_part = f"{source_a.name()} is {source_b.name()}"
code_parts.append(code_part)
verbose_code_parts.append(code_part)
else:
raise RuntimeError(f"Unknown GuardEnvExpr: {guard}")
code_parts.extend(local_builder.shape_env_code)
verbose_code_parts.extend(local_builder.shape_env_code)
assert not global_builder.shape_env_code
code = " and ".join(unique(code_parts))
closure_vars = collections.OrderedDict(
[
("___guarded_code", self),
("___check_tensors", check_tensors_fn),
("___check_tensors_verbose", check_tensors_verbose_fn),
("tensor_check_names", tensor_check_names),
]
+ list(SYMPY_INTERP.items())
)
closure_vars.update(CLOSURE_VARS)
py_code = f"""\
def ___make_guard_fn({','.join(closure_vars.keys())}):
return lambda {args}: {code}
"""
if os.environ.get("TORCHDYNAMO_PRINT_GUARDS", None) == "1":
print("GUARDS", code)
set_guard_fail_hook(guard_fail_hook)
out: Dict[str, Any] = dict()
# print("RUNNING PY CODE", py_code)
exec(py_code, global_builder.scope, out)
guard_fn = out["___make_guard_fn"](*closure_vars.values())
guard_fn.closure_vars = closure_vars
# TODO(whc) maybe '.code_parts' was only kept around for the guard callback? so we don't need both
guard_fn.args = largs
guard_fn.code_parts = code_parts
guard_fn.verbose_code_parts = verbose_code_parts
guard_fn.global_scope = global_builder.scope
guard_fn.guard_fail_fn = guard_fail_fn
return guard_fn
def invalidate(self, ref):
# A weakref is no longer valid, self.check_fn should return false
self.valid = False
def id_ref(self, obj):
"""add a weakref, return the id"""
try:
if id(obj) not in self._seen_ids:
self._weakrefs.append(weakref.ref(obj, self.invalidate))
self._seen_ids.add(id(obj))
except TypeError:
pass # cannot weakref bool object
return id(obj)
stashed_first_fail_reason = None
def guard_fail_hook(
guard_fn: GuardFn,
code: types.CodeType,
f_locals: Dict[str, object],
index: int,
last: bool,
) -> None:
"""
called whenever a guard fails.
"""
first = index == 0
global stashed_first_fail_reason
# Don't waste time computing the fail reason for guards we aren't going to report out.
if not guard_fn.guard_fail_fn and not (first or last):
return
scope = {rename_implicit(k): v for k, v in f_locals.items()}
scope.update(guard_fn.closure_vars)
reason = None
for part in guard_fn.verbose_code_parts:
fail_reason = eval(part, guard_fn.global_scope, scope)
# TODO(whc) hacky for now as not every 'part' in guard_fn.verbose_code_parts
# is updated to return a string explaining the failure.
if isinstance(fail_reason, str):
reason = fail_reason
break
elif isinstance(fail_reason, bool) and not fail_reason:
reason = part
break
if first:
stashed_first_fail_reason = reason
if not last:
return
# Technically, we're failing our last guard, which is our oldest guard due to the
# eval_frame.c logic that moves newest frames to head, but for logging purposes
# it's more useful to see the 'first' failure (if we never got a hit) since it's
# likely not yet been logged as a failure reason in a case of repeating failures.
assert stashed_first_fail_reason
guard_failures[orig_code_map[code]].append(stashed_first_fail_reason)
stashed_first_fail_reason = None
# TODO should we GuardFail our stashed_first_fail_reason too?
try:
if guard_fn.guard_fail_fn is not None:
guard_fn.guard_fail_fn(
GuardFail(reason or "unknown reason", orig_code_map[code])
)
except Exception as e:
log.error(
"Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval",
exc_info=True,
)
def guard_error_hook(
guard_fn: GuardFn,
code: types.CodeType,
f_locals: Dict[str, object],
index: int,
last: bool,
):
print(
f"ERROR RUNNING GUARDS {code.co_name} {code.co_filename}:{code.co_firstlineno}"
)
# TODO: If we passed in the exception here, we could get a precise
# column number of which subexpression failed. But that would also
# require us to have the TRUE code that was eval'ed, not a shoddy
# reconstruction (like is done here)
print("lambda " + ", ".join(guard_fn.args) + ":")
print(" ", " and\n ".join(guard_fn.code_parts))
set_guard_error_hook(guard_error_hook)
def unique(seq):
seen = set()
for x in seq:
if x not in seen:
yield x
seen.add(x)
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
12b0bbe1c7d0171b65f4472d8f2656afe5089091
|
4fdb272e69f1114d1c7fc35281cc4f7366dc1169
|
/AI_Challenger_2018-master/Baselines/autonomous_driving_perception208_baseline/segmentation/vis.py
|
160b161efdce7a51f967621879567ee47175d0e9
|
[
"Apache-2.0"
] |
permissive
|
DarkT2014/mydeeplearning
|
b7d004f94f707b327075702174e74d45f6a48f6a
|
7707492a4df320e35a59113c00edaaf39b684c3a
|
refs/heads/master
| 2022-10-27T18:29:48.574175
| 2019-09-23T02:56:15
| 2019-09-23T02:56:15
| 203,542,891
| 0
| 1
| null | 2022-10-06T06:27:09
| 2019-08-21T08:41:07
|
Python
|
UTF-8
|
Python
| false
| false
| 13,017
|
py
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Segmentation results visualization on a given set of images.
See model.py for more details and usage.
"""
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from deeplab import common
from deeplab import model
from deeplab.datasets import segmentation_dataset
from deeplab.utils import input_generator
from deeplab.utils import save_annotation
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
slim = tf.contrib.slim
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('vis_logdir', './vis_res', 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', './train', 'Directory of model checkpoints.')
# Settings for visualizing the model.
flags.DEFINE_integer('vis_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_multi_integer('vis_crop_size', [960, 1280],
'Crop size [height, width] for visualization.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
# Dataset settings.
flags.DEFINE_string('dataset', 'bdd100k',
'Name of the segmentation dataset.')
flags.DEFINE_string('vis_split', 'val',
'Which split of the dataset used for visualizing results')
flags.DEFINE_string('dataset_dir', './tfrecord', 'Where the dataset reside.')
flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes'],
'Visualization colormap type.')
flags.DEFINE_boolean('also_save_raw_predictions', False,
'Also save raw predictions.')
flags.DEFINE_integer('max_number_of_iterations', 0,
'Maximum number of visualization iterations. Will loop '
'indefinitely upon nonpositive values.')
# The folder where semantic segmentation predictions are saved.
_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results'
# The folder where raw semantic segmentation predictions are saved.
_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results'
# The format to save image.
_IMAGE_FORMAT = '%06d_image'
# The format to save prediction
_PREDICTION_FORMAT = '%06d_prediction'
# To evaluate Cityscapes results on the evaluation server, the labels used
# during training should be mapped to the labels for evaluation.
_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def _process_batch(sess, original_images, semantic_predictions, image_names,
image_heights, image_widths, image_id_offset, save_dir,
raw_save_dir, train_id_to_eval_id=None):
"""Evaluates one single batch qualitatively.
Args:
sess: TensorFlow session.
original_images: One batch of original images.
semantic_predictions: One batch of semantic segmentation predictions.
image_names: Image names.
image_heights: Image heights.
image_widths: Image widths.
image_id_offset: Image id offset for indexing images.
save_dir: The directory where the predictions will be saved.
raw_save_dir: The directory where the raw predictions will be saved.
train_id_to_eval_id: A list mapping from train id to eval id.
"""
(original_images,
semantic_predictions,
image_names,
image_heights,
image_widths) = sess.run([original_images, semantic_predictions,
image_names, image_heights, image_widths])
num_image = semantic_predictions.shape[0]
for i in range(num_image):
image_height = np.squeeze(image_heights[i])
image_width = np.squeeze(image_widths[i])
original_image = np.squeeze(original_images[i])
semantic_prediction = np.squeeze(semantic_predictions[i])
crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
# Save image.
save_annotation.save_annotation(
original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
add_colormap=False)
# Save prediction.
save_annotation.save_annotation(
crop_semantic_prediction, save_dir,
_PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
colormap_type=FLAGS.colormap_type)
if FLAGS.also_save_raw_predictions:
image_filename = os.path.basename(image_names[i])
if train_id_to_eval_id is not None:
crop_semantic_prediction = _convert_train_id_to_eval_id(
crop_semantic_prediction,
train_id_to_eval_id)
save_annotation.save_annotation(
crop_semantic_prediction, raw_save_dir, image_filename,
add_colormap=False)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Get dataset-dependent information.
dataset = segmentation_dataset.get_dataset(
FLAGS.dataset, FLAGS.vis_split, dataset_dir=FLAGS.dataset_dir)
train_id_to_eval_id = None
if dataset.name == segmentation_dataset.get_cityscapes_dataset_name():
tf.logging.info('Cityscapes requires converting train_id to eval_id.')
train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID
# Prepare for visualization.
tf.gfile.MakeDirs(FLAGS.vis_logdir)
save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(save_dir)
raw_save_dir = os.path.join(
FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(raw_save_dir)
tf.logging.info('Visualizing on %s set', FLAGS.vis_split)
g = tf.Graph()
with g.as_default():
samples = input_generator.get(dataset,
FLAGS.vis_crop_size,
FLAGS.vis_batch_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
dataset_split=FLAGS.vis_split,
is_training=False,
model_variant=FLAGS.model_variant)
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},
crop_size=FLAGS.vis_crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
if FLAGS.min_resize_value and FLAGS.max_resize_value:
# Only support batch_size = 1, since we assume the dimensions of original
# image after tf.squeeze is [height, width, 3].
assert FLAGS.vis_batch_size == 1
# Reverse the resizing and padding operations performed in preprocessing.
# First, we slice the valid regions (i.e., remove padded region) and then
# we reisze the predictions back.
original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
original_image_shape = tf.shape(original_image)
predictions = tf.slice(
predictions,
[0, 0, 0],
[1, original_image_shape[0], original_image_shape[1]])
resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]),
tf.squeeze(samples[common.WIDTH])])
predictions = tf.squeeze(
tf.image.resize_images(tf.expand_dims(predictions, 3),
resized_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True), 3)
tf.train.get_or_create_global_step()
saver = tf.train.Saver(slim.get_variables_to_restore())
sv = tf.train.Supervisor(graph=g,
logdir=FLAGS.vis_logdir,
init_op=tf.global_variables_initializer(),
summary_op=None,
summary_writer=None,
global_step=None,
saver=saver)
num_batches = int(math.ceil(
dataset.num_samples / float(FLAGS.vis_batch_size)))
last_checkpoint = None
# Loop to visualize the results when new checkpoint is created.
num_iters = 0
while (FLAGS.max_number_of_iterations <= 0 or
num_iters < FLAGS.max_number_of_iterations):
num_iters += 1
last_checkpoint = slim.evaluation.wait_for_new_checkpoint(
FLAGS.checkpoint_dir, last_checkpoint)
start = time.time()
tf.logging.info(
'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
tf.logging.info('Visualizing with model %s', last_checkpoint)
with sv.managed_session(FLAGS.master,
start_standard_services=False) as sess:
sv.start_queue_runners(sess)
sv.saver.restore(sess, last_checkpoint)
image_id_offset = 0
for batch in range(num_batches):
tf.logging.info('Visualizing batch %d / %d', batch + 1, num_batches)
_process_batch(sess=sess,
original_images=samples[common.ORIGINAL_IMAGE],
semantic_predictions=predictions,
image_names=samples[common.IMAGE_NAME],
image_heights=samples[common.HEIGHT],
image_widths=samples[common.WIDTH],
image_id_offset=image_id_offset,
save_dir=save_dir,
raw_save_dir=raw_save_dir,
train_id_to_eval_id=train_id_to_eval_id)
image_id_offset += FLAGS.vis_batch_size
tf.logging.info(
'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('vis_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
|
[
"lincheng@linchengdeMacBook-Pro.local"
] |
lincheng@linchengdeMacBook-Pro.local
|
d236ab80a1798bb92f400c21f53470b7b4d79c24
|
fdffd3f8ad31ffd917b1df4199ff5d88df80b420
|
/Chapter_08/matplotlib_learning.py
|
f2b0806f79708322e36af13779f60ecd5eb0b416
|
[] |
no_license
|
LelandYan/data_analysis
|
83c0cefa1b0783a8d3d13050092b2ab085cd859e
|
9482c4667ecac189545f40b9f5bad3c495d48068
|
refs/heads/master
| 2020-04-17T04:25:47.975087
| 2019-02-12T07:40:37
| 2019-02-12T07:40:37
| 166,229,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,529
|
py
|
import matplotlib.pyplot as plt
from numpy.random import randn
import numpy as np
# fig = plt.figure()
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2)
# ax3 = fig.add_subplot(2, 2, 3)
# plt.plot(randn(50).cumsum(), 'k--')
# _ = ax1.hist(randn(100), bins=20, color='k', alpha=0.3)
# ax2.scatter(np.arange(30), np.arange(30) + 3 * randn(30))
# 调整subplot周围的间距
# fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
# for i in range(2):
# for j in range(2):
# axes[i, j].hist(randn(500), bins=50, color='k', alpha=0.5)
# plt.subplots_adjust(wspace=0, hspace=0)
# plt.plot(randn(30).cumsum(), 'ko--')
# plt.plot(randn(30).cumsum(), color='k', linestyle='dashed', marker='o')
# data = randn(30).cumsum()
# # plt.plot(data, 'k--', label='Default')
# plt.plot(data, 'k-', drawstyle='steps-post', label='steps-post')
# plt.legend(loc='best')
# plt.xlim()
# plt.savefig('figpath.svg')
# plt.show()
# from io import BytesIO
# buffer = BytesIO()
# plt.savefig(buffer)
# plot_data = buffer.getvalue()
# ax = fig.add_subplot(1, 1, 1)
# ax.plot(randn(1000).cumsum())
# plt.show()
# from datetime import datetime
# import pandas as pd
#
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
#
# data = pd.read_csv('spx.csv', index_col=0, parse_dates=True)
# spx = data['SPX']
# spx.plot(ax=ax, style='k--',alpha=0.3)
# crisis_data = [
# (datetime(2007, 10, 11), 'Peak of bull market'),
# (datetime(2008, 3, 12), 'Bear Stearns Fails'),
# (datetime(2008, 9, 15), 'Lehman Bankruptcy')
# ]
# for date, label in crisis_data:
# ax.annotate(label, xy=(date, spx.asof(date) + 50),
# xytext=(date, spx.asof(date) + 200),
# arrowprops=dict(facecolor='black'),
# horizontalalignment='left', verticalalignment='top')
# ax.set_xlim(['1/1/2007', '1/1/2011'])
# ax.set_ylim([600, 1800])
# ax.set_title("Important dates in 2008-2009 financial crisis")
# plt.show()
# ax.savefig('figpath.svg')
# matplotlib配置
# plt.rc('figure', figsize=(10, 10))
from pandas import DataFrame, Series
# pandas中的绘图函数
# 线型图
# s = Series(np.random.randn(10).cumsum(), index=np.arange(0, 100, 10))
# s.plot()
# plt.show()
# df = DataFrame(np.random.randn(10, 4).cumsum(0), columns=['A', 'B', 'C', 'D'], index=np.arange(0, 100, 10))
# df.plot()
# plt.show()
# 柱状图 kind='bar/barh' Serise和DataFrame的索引将会被X,Y刻度
# fig, axes = plt.subplots(2, 1)
# data = Series(np.random.rand(16), index=list('abcdefghijklmnop'))
# data.plot(kind='bar', ax=axes[0], color='k', alpha=0.7)
# data.plot(kind='barh', ax=axes[1], color='k', alpha=0.7)
# plt.show()
import pandas as pd
# df = DataFrame(np.random.rand(6, 4),
# index=['one', 'two', 'three', 'four', 'five', 'six'],
# columns=pd.Index(['A', 'B', 'C', 'D'], names='Genus'))
# df.plot(kind='bar')
# df.plot(kind='barh', stacked=True, alpha=0.5)
# plt.show()
# tips = pd.read_csv('tips.csv')
# party_counts = pd.crosstab(tips.day,tips.size)
# print(party_counts.ix[:,2:5])
# 直方图和密度图
# tips = pd.read_csv('tips.csv')
# tips['tip_pct'] = tips['tip'] / tips['total_bill']
# tips['tip_pct'].hist(bins=20)
# tips['tip_pct'].plot(kind='kde')
# plt.show()
# comp1 = np.random.normal(0, 1, size=200)
# comp2 = np.random.normal(10, 2, size=200)
# values = Series(np.concatenate([comp1,comp2]))
# values.hist(bins=100,alpha=0.3,color='k',normed=True)
# values.plot(kind='kde',style='k--')
# plt.show()
# 散步图
# macro = pd.read_csv('macrodata.csv')
# # data = macro[['cpi', 'm1', 'tbilrate', 'unemp']]
# # # print(data[-5:])
# # trans_data = np.log(data).diff().dropna()
# # # print(trans_data[-5:])
# # plt.scatter(trans_data['m1'],trans_data['unemp'])
# # plt.title('Changes in log')
# # pd.scatter_matrix(trans_data,diagonal='kde',color='k',alpha=0.3)
# # plt.show()
# 绘制地图
data = pd.read_csv('Haiti.csv')
# 清除错误的信息
data = data[(data.LATITUDE > 18) & (data.LATITUDE < 20) & (data.LONGITUDE > -75) & (data.LONGITUDE < -70) & (
data.CATEGORY.notnull())]
def to_cat_list(catstr):
stripped = (x.strip() for x in catstr.split(','))
return [x for x in stripped if x]
def get_all_categories(cat_series):
cat_sets = (set(to_cat_list(x) for x in cat_series))
return sorted(set.union(*cat_sets))
def get_english(cat):
code, names = cat.split('.')
if '|' in names:
names = names.split('|')[1]
return code, names.strip()
print(get_english('2. Urgences logistiques | Vital Lines'))
|
[
"2721599586@qq.com"
] |
2721599586@qq.com
|
60dd612b880ef07f9e7cccbc338222d0d7dc4514
|
bdaf2715c970f1863356897c2d86722aa42dc067
|
/hash-table/0016-3sum-closest.py
|
3296052fcb473d1697ec792c7716dc54aa6e8137
|
[] |
no_license
|
chengchaoyang/my-leetcode-record
|
e32ec43d46c8b5cf249a372ff7eeb25363e0ba92
|
04bf208aec4245de199cadf7cc7936038dffcb5b
|
refs/heads/master
| 2021-05-19T06:48:28.808531
| 2020-06-10T10:42:41
| 2020-06-10T10:42:41
| 251,571,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
"""
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum-closest
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
if len(nums) < 3:
return []
# 初始化
diff = float('inf')
nums.sort()
for index in range(len(nums) - 2):
if index > 0 and nums[index] == nums[index - 1]:
continue
l = index + 1
r = len(nums) - 1
while l < r:
s = nums[index] + nums[l] + nums[r]
if abs(s - target) < diff:
diff = abs(s - target)
res = s
if s > target:
r -= 1
elif s < target:
l += 1
else:
# 如果已经等于 target 的话, 肯定是最接近的,根据题目要求,返回这三个数的和
return target
return res
|
[
"526879530@qq.com"
] |
526879530@qq.com
|
0036a3aa6a27f4fb7809ec127059fbbf09413654
|
7b4682d56541c863ca05f84737ce9296f425e339
|
/query_parser/spark/spark.py
|
49ebfb27ab745891013b1636606d496666ae9013
|
[] |
no_license
|
dav009/truthgraph
|
f3387788c0158aae1c50f1d3da8e674fce47325d
|
e8e2ff40353bb1a1e13f6465d74656a78fa3f52d
|
refs/heads/master
| 2016-09-16T05:26:23.871595
| 2013-07-10T19:05:44
| 2013-07-10T19:05:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,142
|
py
|
# Copyright (c) 1998-2002 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.7 (pre-alpha-7)'
import re
import sys
import string
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in c.__dict__.keys():
if not namedict.has_key(name):
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner:
def __init__(self, flags=0):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE|flags)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return string.join(rv, '|')
def error(self, s, pos):
print "Lexical error at position %s" % pos
raise SystemExit
def position(self, newpos=None):
oldpos = self.pos
if newpos is not None:
self.pos = newpos
return self.string, oldpos
def tokenize(self, s):
self.string = s
self.pos = 0
n = len(s)
while self.pos < n:
m = self.re.match(s, self.pos)
if m is None:
self.error(s, self.pos)
groups = m.groups()
self.pos = m.end()
for i in range(len(groups)):
if groups[i] is not None and self.index2func.has_key(i):
self.index2func[i](groups[i])
def t_default(self, s):
r'( . | \n )+'
print "Specification error: unmatched input"
print s
raise SystemExit
#
# Extracted from GenericParser and made global so that [un]picking works.
#
class _State:
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
class GenericParser:
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968. New formulation of
# the parser according to J. Aycock, "Practical Earley Parsing
# and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
# 2001, and J. Aycock and R. N. Horspool, "Practical Earley
# Parsing", unpublished paper, 2001.
#
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.augment(start)
self.ruleschanged = 1
_NULLABLE = '\e_'
_START = 'START'
_BOF = '|-'
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = 1
while changes:
changes = 0
for k, v in self.edges.items():
if v is None:
state, sym = k
if self.states.has_key(state):
self.goto(state, sym)
changes = 1
rv = self.__dict__.copy()
for s in self.states.values():
del s.items
del rv['rule2func']
del rv['nullable']
del rv['cores']
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D['rules'][self._START][0][1][1] # Blech.
self.augment(start)
D['rule2func'] = self.rule2func
D['makeSet'] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func, _preprocess=1):
fn = func
rules = string.split(doc)
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
if self.rules.has_key(lhs):
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], 0)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in self.rules.values():
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if not self.rules.has_key(sym):
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in self.rules.values():
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if not self.rules.has_key(sym) or \
not self.nullable[sym]:
candidate = 0
i = i + 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE+sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i+1,
candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE+lhs
rule = (lhs, rhs)
if self.newrules.has_key(lhs):
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [ rule ]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def error(self, token):
print "Syntax error at or near `%s' token" % token
raise SystemExit
def parse(self, tokens):
sets = [ [(1,0), (2,0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in xrange(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens[i], sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
# _dump(tokens, sets, self.states)
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
self.error(tokens[i-1])
else:
self.error(None)
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2)
def isnullable(self, sym):
#
# For symbols in G_e only. If we weren't supporting 1.5,
# could just use sym.startswith().
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
def skip(self, (lhs, rhs), pos=0):
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos:pos+1] == (sym,):
kitems.append((rule, self.skip(rule, pos+1)))
core = kitems
core.sort()
tcore = tuple(core)
if self.cores.has_key(tcore):
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k+1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if not rules.has_key(nextSym):
if not edges.has_key(key):
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if not predicted.has_key(nextSym):
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
core = predicted.keys()
core.sort()
tcore = tuple(core)
if self.cores.has_key(tcore):
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if not self.edges.has_key(key):
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
return [self.goto(state, t)]
def gotoST(self, state, st):
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, token, sets, i):
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent),
i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
#self.add(next, (k, parent), i+1, ptr)
#INLINED --v
new = (k, parent)
key = (new, i+1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(next, (nk, i+1))
#INLINED --v
new = (nk, i+1)
if new not in next:
next.append(new)
#INLINED --^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
#k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
#self.add(cur, (k, pparent),
# i, pptr, why)
#INLINED --v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(cur, (nk, i))
#INLINED --v
new = (nk, i)
if new not in cur:
cur.append(new)
#INLINED --^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
sym = rhs[i]
if not self.newrules.has_key(sym):
if sym != self._BOF:
attr[i] = tokens[k-1]
key = (item, k)
item, k = self.predecessor(key, None)
#elif self.isnullable(sym):
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0],
tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda (a,b): b, sortlist)
return rules[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, sets, states):
for i in range(len(sets)):
print 'set', i
for item in sets[i]:
print '\t', item
for (lhs, rhs), pos in states[item[0]].items:
print '\t\t', lhs, '::=',
print string.join(rhs[:pos]),
print '.',
print string.join(rhs[pos:])
if i < len(tokens):
print
print 'token', str(tokens[i])
print
|
[
"dav.alejandro@gmail.com"
] |
dav.alejandro@gmail.com
|
fe52fa4e112e388bd4ed6512fec0ae90874dd234
|
7b1c4e8039dc289d3606ba293d98dfaf9dac2a65
|
/simplecode.py
|
2714fe63229557106b4c6d6768d908400817293b
|
[] |
no_license
|
abiroslyn/Module-10
|
c1d702e98de45c5f9c241fd40a932cae4e7c5e08
|
cb898e4d4a7d83b7b67d5b17cef1c8839f2d9aec
|
refs/heads/main
| 2023-04-27T23:14:08.522645
| 2021-05-06T01:09:35
| 2021-05-06T01:09:35
| 364,748,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
import sys
shift = int(sys.argv[1])
message=input()
print (shift)
print(message.upper())
message = message.upper()
for letter in message:
if not letter.isalpha():
continue
offset = 65
ord('A') = 65
pi = ord(letter) - offset
ci = (pi + k) % 26
message.append(chr(ci + offset))
return message
|
[
"abigail.thomas@bison.howard.edu"
] |
abigail.thomas@bison.howard.edu
|
123b7bc3f22b4abfbc14b4111b7add46ae238606
|
ef1bba79251acf71504b8fbb30f9292de67278dc
|
/core_firefly/firefly/firefly_library.py
|
129d014ddd8011ecc1a0dfe4edd2f216b4885dd3
|
[] |
no_license
|
OliWenman/firefly_website
|
3c857f54ddceafc8dcbade84686705743221b75b
|
701f2b57f7cbdb4eaf8a75106cdfe5732f229804
|
refs/heads/master
| 2022-06-07T14:12:35.664931
| 2020-04-29T17:26:47
| 2020-04-29T17:26:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,997
|
py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
if sys.version > '3':
long = int
import numpy as np
import copy
import pickle as cPickle
from scipy.stats import chi2
#-----------------------------------------------------------------------
def airtovac(wave_air):
"""
__author__ = 'Kyle B. Westfall'
Wavelengths are corrected for the index of refraction of air under
standard conditions. Wavelength values below 2000 A will not be
altered. Uses formula from Ciddor 1996, Applied Optics 62, 958.
Args:
wave_air (int or float): Wavelength in Angstroms, scalar or
vector. If this is the only parameter supplied, it will be
updated on output to contain double precision vacuum
wavelength(s).
Returns:
numpy.float64 : The wavelength of the line in vacuum.
Example:
If the air wavelength is W = 6056.125 (a Krypton line), then
:func:`airtovac` returns vacuum wavelength of W = 6057.8019.
*Revision history*:
| Written W. Landsman November 1991
| Use Ciddor (1996) formula for better accuracy in the infrared
| Added optional output vector, W Landsman Mar 2011
| Iterate for better precision W.L./D. Schlegel Mar 2011
| Transcribed to python, K.B. Westfall Apr 2015
.. note::
Take care within 1 A of 2000 A. Wavelengths below 2000 A *in
air* are not altered.
"""
# Copy the data
wave_vac = wave_air.astype(np.float64) if hasattr(wave_air, "__len__") else float(wave_air)
g = wave_vac > 2000.0 # Only modify above 2000 A
Ng = np.sum(g)
if Ng > 0:
# Handle both arrays and scalars
if hasattr(wave_air, "__len__"):
_wave_air = wave_air[g].astype(np.float64)
_wave_vac = wave_vac[g]
else:
_wave_air = float(wave_air)
_wave_vac = float(wave_vac)
for i in range(0,2):
sigma2 = np.square(1.0e4/_wave_vac) #Convert to wavenumber squared
fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2)
_wave_vac = _wave_air*fact
if hasattr(wave_air, "__len__"): # Save the result
wave_vac[g] = _wave_vac
else:
wave_vac = _wave_vac
return wave_vac
#-----------------------------------------------------------------------
def bisect_array(array):
"""
It takes an array as input and returns the bisected array :
bisected array[i] = (array[i] + array[i+1] )/2. Its lenght is one less than the array.
:param array: input array
"""
bisected_array = np.zeros(len(array) - 1)
for ai in range(len(bisected_array)):
bisected_array[ai] = (array[ai] + array[ai + 1])/2.0
return bisected_array
#-----------------------------------------------------------------------
def max_pdf(probs,property,sampling):
"""
determines the maximum of a pdf of a property for a given sampling
:param probs: probabilities
:param property: property
:param sampling: sampling of the property
"""
lower_limit = np.min(property)
upper_limit = np.max(property)
error_interval = np.round(upper_limit, 2) - np.round(lower_limit, 2)
if np.round(upper_limit, 2) == np.round(lower_limit, 2) or error_interval <= abs((upper_limit/100.)*3):
return np.asarray(property),np.ones(len(probs))/np.size(probs)
property_pdf_int= np.arange(lower_limit, upper_limit * 1.001, (upper_limit-lower_limit) /sampling ) + ( upper_limit - lower_limit) * 0.000001
prob_pdf = np.zeros(len(property_pdf_int))
for p in range(len(property_pdf_int)-1):
match_prop = np.where( (property <= property_pdf_int[p+1]) & (property > property_pdf_int[p]) )
if np.size(match_prop) == 0:
continue
else:
prob_pdf[p] = np.max( probs[match_prop] )
property_pdf = bisect_array(property_pdf_int)
return property_pdf,prob_pdf[:-1]/np.sum(prob_pdf)
#-----------------------------------------------------------------------
def convert_chis_to_probs(chis,dof):
"""
Converts chi squares to probabilities.
:param chis: array containing the chi squares.
:param dof: array of degrees of freedom.
"""
chis = chis / np.min(chis) * dof
prob = 1.0 - chi2.cdf(chis,dof)
prob = prob / np.sum(prob)
return prob
#-----------------------------------------------------------------------
def light_weights_to_mass(light_weights,mass_factors):
"""
Uses the data/model mass-to-light ratio to convert
SSP contribution (weights) by light into
SSP contributions by mass.
:param light_weights: light (luminosity) weights obtained when model fitting
:param mass_factors: mass factors obtained when normalizing the spectrum
"""
mass_weights = np.zeros(np.shape(light_weights))
unnorm_mass = np.zeros(np.shape(light_weights))
for w in range(len(light_weights)):
unnorm_mass[w] = light_weights[w] * mass_factors
mass_weights[w] = unnorm_mass[w] / np.sum(unnorm_mass[w])
return unnorm_mass,mass_weights
#-----------------------------------------------------------------------
def find_closest(A, target):
"""
returns the id of the target in the array A.
:param A: Array, must be sorted
:param target: target value to be located in the array.
"""
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
#-----------------------------------------------------------------------
def averages_and_errors(probs,prop,sampling):
"""
determines the average and error of a property for a given sampling
returns : an array with the best fit value, +/- 1, 2, 3 sigma values.
:param probs: probabilities
:param property: property
:param sampling: sampling of the property
"""
# This prevents galaxies with 1 unique solution from going any further. This is because the code crashes when constructing the likelihood
# distributions. HACKY, but we need to think about this...
if ((len(probs) <= 1) or (len(prop[~np.isnan(prop)]) <= 1)):
best_fit, upper_onesig,lower_onesig, upper_twosig,lower_twosig, upper_thrsig,lower_thrsig = 999.0, 999.0, 999.0, 999.0, 999.0, 999.0, 999.0
else:
xdf,y = max_pdf(probs,prop,sampling)
cdf = np.zeros(np.shape(y))
cdf_probspace = np.zeros(np.shape(y))
for m in range(len(y)):
cdf[m] = np.sum(y[:m])
cdf = cdf / np.max(cdf)
area_probspace = y*(xdf[1]-xdf[0])
area_probspace = area_probspace/np.sum(area_probspace)
indx_probspace = np.argsort(area_probspace)[::-1]
desc_probspace = np.sort(area_probspace)[::-1]
cdf_probspace = np.zeros(np.shape(desc_probspace))
for m in range(len(desc_probspace)):
cdf_probspace[m] = np.sum(desc_probspace[:m])
av_sigs = [0.6827,0.9545,0.9973] # Median, + / - 1 sig, + / - 2 sig, + / - 3 sig
# Sorts results by likelihood and calculates confidence intervals on sorted space
index_close = find_closest(cdf_probspace, av_sigs)
best_fit = xdf[indx_probspace[0]]
upper_onesig,lower_onesig = np.max(xdf[indx_probspace[:index_close[0]]]),np.min(xdf[indx_probspace[:index_close[0]]])
upper_twosig,lower_twosig = np.max(xdf[indx_probspace[:index_close[1]]]),np.min(xdf[indx_probspace[:index_close[1]]])
upper_thrsig,lower_thrsig = np.max(xdf[indx_probspace[:index_close[2]]]),np.min(xdf[indx_probspace[:index_close[2]]])
if np.size(xdf) == 0:
raise Exception('No solutions found??? FIREFLY error (see statistics.py)')
return [best_fit,upper_onesig,lower_onesig,upper_twosig,lower_twosig,upper_thrsig,lower_thrsig]
#-----------------------------------------------------------------------
def calculate_averages_pdf(probs,light_weights,mass_weights,unnorm_mass,age,metal,sampling,dist_lum, flux_units):
"""
Calculates light- and mass-averaged age and metallicities.
Also outputs stellar mass and mass-to-light ratios.
And errors on all of these properties.
It works by taking the complete set of probs-properties and
maximising over the parameter range (such that solutions with
equivalent values but poorer probabilities are excluded). Then,
we calculate the median and 1/2 sigma confidence intervals from
the derived 'max-pdf'.
NB: Solutions with identical SSP component contributions
are re-scaled such that the sum of probabilities with that
component = the maximum of the probabilities with that component.
i.e. prob_age_ssp1 = max(all prob_age_ssp1) / sum(all prob_age_ssp1)
This is so multiple similar solutions do not count multiple times.
Outputs a dictionary of:
- light_[property], light_[property]_[1/2/3]_sigerror
- mass_[property], mass_[property]_[1/2/3]_sigerror
- stellar_mass, stellar_mass_[1/2/3]_sigerror
- mass_to_light, mass_to_light_[1/2/3]_sigerror
- maxpdf_[property]
- maxpdf_stellar_mass
where [property] = [age] or [metal]
:param probs: probabilities
:param light_weights: light (luminosity) weights obtained when model fitting
:param mass_weights: mass weights obtained when normalizing models to data
:param unnorm_mass: mass weights obtained from the mass to light ratio
:param age: age
:param metal: metallicity
:param sampling: sampling of the property
:param dist_lum: luminosity distance in cm
"""
# Sampling number of max_pdf (100:recommended) from options
# Keep the age in linear units of Age(Gyr)
log_age = age
av = {} # dictionnary where values are stored :
av['light_age'],av['light_age_1_sig_plus'],av['light_age_1_sig_minus'], av['light_age_2_sig_plus'], av['light_age_2_sig_minus'], av['light_age_3_sig_plus'], av['light_age_3_sig_minus'] = averages_and_errors(probs,np.dot(light_weights,log_age),sampling)
av['light_metal'], av['light_metal_1_sig_plus'], av['light_metal_1_sig_minus'], av['light_metal_2_sig_plus'], av['light_metal_2_sig_minus'], av['light_metal_3_sig_plus'], av['light_metal_3_sig_minus'] = averages_and_errors(probs, np.dot(light_weights, metal), sampling)
av['mass_age'], av['mass_age_1_sig_plus'], av['mass_age_1_sig_minus'], av['mass_age_2_sig_plus'], av['mass_age_2_sig_minus'], av['mass_age_3_sig_plus'], av['mass_age_3_sig_minus'] = averages_and_errors(probs, np.dot(mass_weights, log_age), sampling)
av['mass_metal'], av['mass_metal_1_sig_plus'], av['mass_metal_1_sig_minus'], av['mass_metal_2_sig_plus'], av['mass_metal_2_sig_minus'], av['mass_metal_3_sig_plus'], av['mass_metal_3_sig_minus'] = averages_and_errors(probs, np.dot(mass_weights, metal), sampling)
conversion_factor = flux_units * 4 * np.pi * dist_lum**2.0 # unit 1e-17 cm2
# Keep the mass in linear units until later M/M_{odot}.
tot_mass = np.sum(unnorm_mass, 1) * conversion_factor
av['stellar_mass'], av['stellar_mass_1_sig_plus'], av['stellar_mass_1_sig_minus'], av['stellar_mass_2_sig_plus'], av['stellar_mass_2_sig_minus'], av['stellar_mass_3_sig_plus'], av['stellar_mass_3_sig_minus'] = averages_and_errors(probs,tot_mass,sampling)
return av
#-----------------------------------------------------------------------
def normalise_spec(data_flux,model_flux):
"""
Normalises all models to the median value of the spectrum.
Saves the factors for later use.
Outputs : normed models and translation factors.
:param data_flux: observed flux in the data
:param model_flux: flux from the models
"""
data_norm = np.median(data_flux)
num_mods = len(model_flux)
model_norm,mass_factor = np.zeros(num_mods),np.zeros(num_mods)
normed_model_flux = np.zeros((num_mods,len(model_flux[0])))
for m in range(len(model_flux)):
model_norm[m] = np.median(model_flux[m])
mass_factor[m] = data_norm/model_norm[m]
normed_model_flux[m] = model_flux[m] / model_norm[m] * data_norm
return normed_model_flux,mass_factor
#-----------------------------------------------------------------------
def match_data_models( data_wave_int, data_flux_int, data_flags, error_flux_int, model_wave_int, model_flux_int, min_wave_in, max_wave_in, saveDowngradedModel = True, downgradedModelFile = "DGmodel.txt"):
"""
* 0.Take data and models as inputs
* 1. interpolate data and model to the lowest sampled array.
* 1.1. Defines the wavelength range on the model and on the data
* 1.2. Downgrades the array, model or data, that has most sampling
* 1.3. integrate between them to output a matched resolution array for data and model
* 2. Returns the matched wavelength array, the corresponding data, error and model arrays : matched_wave,matched_data,matched_error,matched_model
:param data_wave_int: data wavelength array in the restframe
:param data_flux_int: data flux array
:param data_flags: data quality flag array : 1 for good data
:param error_flux_int: data flux error array
:param model_wave_int: model wavelength array (in the rest frame)
:param model_flux_int: model flux array
:param min_wave_in: minimum wavelength to be considered
:param max_wave_in: maximum wavelength to be considered
:param saveDowngradedModel: if True it will save the downgraded models
:param downgradedModelFile: location where downgreaded models will be saved
"""
# 1. interpolate onto the bisection of lowest sampled array.
num_models = len(model_flux_int)
# 1.1. Defines the wavelength range on the model and on the data
min_wave = np.max([np.min(data_wave_int[np.where(data_flags==1)]), np.min(model_wave_int),min_wave_in])
max_wave = np.min([np.max(data_wave_int[np.where(data_flags==1)]), np.max(model_wave_int),max_wave_in])
print("min wave =",min_wave)
print("max wave =",max_wave)
#print np.min(data_wave_int[np.where(data_flags==1)]), np.min(model_wave_int), min_wave_in
#print np.max(data_wave_int[np.where(data_flags==1)]), np.max(model_wave_int), max_wave_in
loc_model = np.array(( model_wave_int <= max_wave) & (model_wave_int >= min_wave))
if np.sum(loc_model)==0:
raise ValueError("The wavelength range input is below or above model wavelength coverage!")
model_wave = model_wave_int[loc_model]
num_mod = np.sum(loc_model)
model_flux = np.zeros((num_models,num_mod))
for m in range(num_models):
model_flux[m] = model_flux_int[m][loc_model]
loc_data = np.array(( data_wave_int <= max_wave) & (data_wave_int >= min_wave))
if np.sum(loc_data)==0:
raise ValueError("The wavelength range input is below or above data wavelength coverage!")
num_dat = np.sum(loc_data)
data_wave = data_wave_int[loc_data]
data_flux = data_flux_int[loc_data]
error_flux = error_flux_int[loc_data]
# 1.2. Downgrades the array, model or data, that has most sampling
if num_mod >= num_dat:
#print "More model points than data points! Downgrading models to data sampling ..."
bisect_data = bisect_array(data_wave) + np.min(data_wave)*0.0000000001
matched_model = np.zeros((num_models,len(bisect_data) - 1))
for m in range(num_models):
model_flux_bounds = np.interp(bisect_data, model_wave, model_flux[m])
combined_wave_int = np.concatenate((model_wave,bisect_data))
combined_flux_int = np.concatenate((model_flux[m],model_flux_bounds))
sort_indices = np.argsort(combined_wave_int)
combined_wave = np.sort(combined_wave_int)
boundary_indices = np.searchsorted(combined_wave,bisect_data)
combined_flux = combined_flux_int[sort_indices]
len_combo = len(combined_flux)
# 1.3. produces a matched resolution array
for l in range(len(boundary_indices) - 1):
if boundary_indices[l + 1] >= len_combo:
matched_model[m][l] = matched_model[m][l - 1]
else:
matched_model[m][l] = np.trapz(combined_flux[boundary_indices[l] : boundary_indices[l + 1] + 1], x=combined_wave[boundary_indices[l] :boundary_indices[l + 1] + 1]) / (combined_wave[boundary_indices[l + 1]] - combined_wave[boundary_indices[l] ])
matched_wave = data_wave[1:-1]
matched_data = data_flux[1:-1]
matched_error = error_flux[1:-1]
# OPTION : saves the downgraded models.
if saveDowngradedModel:
#print "saving downgraded models to ",downgradedModelFile
f.open(downgradedModelFile,'w')
cPickle.dump([matched_wave, matched_data, matched_error],f)
f.close()
else:
#print "More data points than model points! Downgrading data to model sampling ..."
bisect_model = bisect_array(model_wave) + np.min(model_wave)*0.0000000001
boundaries = np.searchsorted(data_wave,bisect_model)
data_flux_bounds = np.interp(bisect_model, data_wave, data_flux)
error_flux_bounds = np.interp(bisect_model, data_wave, error_flux)
combined_wave_int = np.concatenate((data_wave,bisect_model))
combined_flux_int = np.concatenate((data_flux,data_flux_bounds))
combined_error_int = np.concatenate((data_flux,error_flux_bounds))
sort_indices = np.argsort(combined_wave_int)
combined_wave = np.sort(combined_wave_int)
boundary_indices = np.searchsorted(combined_wave,bisect_model)
combined_flux = combined_flux_int[sort_indices]
combined_error = combined_error_int[sort_indices]
# 1.3. produces a matched resolution array
matched_data,matched_error= np.zeros(len(boundary_indices) - 1),np.zeros(len(boundary_indices) - 1)
len_combo = len(combined_flux)
for l in range(len(boundary_indices) - 1):
if boundary_indices[l + 1] >= len_combo:
matched_data[l] = matched_data[l - 1]
matched_error[l] = matched_error[l - 1]
else:
matched_data[l] = np.trapz(combined_flux[boundary_indices[l]:boundary_indices[l + 1] + 1], x=combined_wave[boundary_indices[l]: boundary_indices[l + 1] + 1])/ (combined_wave[boundary_indices[l + 1]] - combined_wave[boundary_indices[l]])
matched_error[l] = np.trapz(combined_error[boundary_indices[l]:boundary_indices[l + 1] + 1], x=combined_wave[boundary_indices[l]:boundary_indices[l + 1] + 1])/ (combined_wave[boundary_indices[l + 1]] - combined_wave[boundary_indices[l]])
matched_wave = model_wave[1:-1]
matched_model = np.zeros((num_models,len(matched_wave)))
for m in range(num_models):
matched_model[m][:] = model_flux[m][1:-1]
return matched_wave,matched_data,matched_error,matched_model
|
[
"42441085+OliWenman@users.noreply.github.com"
] |
42441085+OliWenman@users.noreply.github.com
|
5f4d648fe87f277326ed1c245f130c1540612c9f
|
acda0bc700943654156d491eaa0b766bea0ae7bd
|
/apps/item/views.py
|
5a977f899786fbf9bf8ce0698dd0d24153b6aefd
|
[] |
no_license
|
bluehawkarthur/casa_campo
|
a11baaec966d51a1e733ad2dd48bb77a0ecd6cb5
|
22a57b58a722769e8e25330457ed868d230f5c05
|
refs/heads/master
| 2021-01-18T15:05:47.674205
| 2016-10-26T20:03:55
| 2016-10-26T20:03:55
| 68,387,895
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
from django.shortcuts import render_to_response, render
from django.views.generic import FormView, ListView, DetailView, UpdateView
from pure_pagination.mixins import PaginationMixin
from django.core.urlresolvers import reverse_lazy
from .forms import ItemForm
from django.http import HttpResponseRedirect
from django.template import RequestContext
from .models import Item
# Create your views here. , UpdateView, DeleteView
def CrearItem(request):
if request.method == 'POST':
form = ItemForm(request.POST)
if form.is_valid():
item = Item(
codigo=form.cleaned_data['codigo'],
unidad=form.cleaned_data['unidad'],
descripcion=form.cleaned_data['descripcion'],
cantidad=form.cleaned_data['cantidad'],
pr_costo=form.cleaned_data['pr_costo'])
item.save()
return HttpResponseRedirect(reverse_lazy('listar_item'))
else:
print 'dfsdfsdfsdf'
form = ItemForm()
variables = RequestContext(request, {'form': form})
return render_to_response('item/crearitem.html', variables)
class ListarItem(PaginationMixin, ListView):
template_name = 'item/listar_item.html'
paginate_by = 5
model = Item
context_object_name = 'item'
class DetalleItem(DetailView):
template_name = 'item/detalle_item.html'
model = Item
context_object_name = 'item'
class EditItem(UpdateView):
template_name = 'item/edit_item.html'
model = Item
fields = ['codigo', 'unidad', 'descripcion', 'cantidad', 'pr_costo']
success_url = reverse_lazy('listar_item')
def DeleteItem(request, item):
e = Item.objects.get(id=item)
e.delete()
return HttpResponseRedirect(reverse_lazy('listar_item'))
|
[
"josedanielf9@gmail.com"
] |
josedanielf9@gmail.com
|
bfd136f3bac4f2347c1f6f74a47385b3c44153cf
|
30e4859a4c471a25c1e980c4629c589b6b499e7e
|
/Lab/Lab7/book_analyzer_profiled.py
|
66dcadda10ae0b46eee3cb826ecea36410ab5d08
|
[] |
no_license
|
GoodestUsername/Python-examples-from-school
|
a8ad63b617f95c7850031fa77396928f9feb9c62
|
cb20b571ab50dd46c6f838e55ff5ad39b51e6f11
|
refs/heads/master
| 2023-03-29T21:01:15.838077
| 2021-04-03T04:33:34
| 2021-04-03T04:33:34
| 328,768,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,169
|
py
|
"""
This module is responsible for holding a badly written (but not so bad
that you won't find this in the workplace) BookAnalyzer class that needs
to be profiled and optimized.
"""
import time
class BookAnalyzer:
"""
This class provides the ability to load the words in a text file in
memory and provide the ability to filter out the words that appear
only once.
"""
# a constant to help filter out common punctuation.
COMMON_PUNCTUATION = [",", "*", ";", ".", ":", "(", "[", "]", ")"]
def __init__(self):
self.text = None
def read_data(self, src="House of Usher.txt"):
"""
Reads through a text file and loads in all the words. This
function also processes the words such that all whitespace and
common punctuation is removed.
:param src: the name of the file, a string
"""
# read lines
with open(src, mode='r', encoding='utf-8') as book_file:
self.text = book_file.readlines()
# strip out empty lines
stripped_text = []
for line in self.text:
if line != "\n":
stripped_text.append(line)
self.text = stripped_text
# convert list of lines to list of words
words = []
for line in self.text:
words += line.split()
self.text = words
# remove common punctuation from words
temp_text = []
for word in self.text:
temp_word = word
for punctuation in self.COMMON_PUNCTUATION:
temp_word = temp_word.replace(punctuation, '')
temp_text.append(temp_word)
self.text = temp_text
@staticmethod
def is_unique(word, word_list):
"""
Checks to see if the given word appears in the provided sequence.
This check is case in-sensitive.
:param word: a string
:param word_list: a sequence of words
:return: True if not found, false otherwise
"""
for a_word in word_list:
if word.lower() == a_word.lower():
return False
return True
def find_unique_words(self):
"""
Filters out all the words in the text.
:return: a list of all the unique words.
"""
temp_text = self.text
unique_words = []
unique_words_2 = []
while temp_text:
word = temp_text.pop()
if word.lower() not in unique_words_2:
if self.is_unique(word, temp_text):
unique_words.append(word)
else:
unique_words.append(word)
unique_words_2.append(word.lower())
return unique_words
def main():
start_time = time.time()
book_analyzer = BookAnalyzer()
book_analyzer.read_data()
unique_words = book_analyzer.find_unique_words()
print("-" * 50)
print(f"List of unique words (Count: {len(unique_words)})")
print("-" * 50)
for word in unique_words:
print(word)
print("-" * 50)
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__':
main()
|
[
"ericdongcannn@hotmail.com"
] |
ericdongcannn@hotmail.com
|
a06e523614c65dc76a0ee5de471b3d4970df6c87
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GstVideo/VideoResampler.py
|
a56a7a96d40d30d7b2b241bb4ea4fabe51b4f99e
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 6,320
|
py
|
# encoding: utf-8
# module gi.repository.GstVideo
# from /usr/lib64/girepository-1.0/GstVideo-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class VideoResampler(__gi.Struct):
"""
:Constructors:
::
VideoResampler()
"""
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def init(self, method, flags, n_phases, n_taps, shift, in_size, out_size, options): # real signature unknown; restored from __doc__
""" init(self, method:GstVideo.VideoResamplerMethod, flags:GstVideo.VideoResamplerFlags, n_phases:int, n_taps:int, shift:float, in_size:int, out_size:int, options:Gst.Structure) -> bool """
return False
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
in_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
max_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_phases = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
out_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
phase = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_gst_reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(VideoResampler), '__module__': 'gi.repository.GstVideo', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'VideoResampler' objects>, '__weakref__': <attribute '__weakref__' of 'VideoResampler' objects>, '__doc__': None, 'in_size': <property object at 0x7f930d2a1770>, 'out_size': <property object at 0x7f930d2a1860>, 'max_taps': <property object at 0x7f930d2a1950>, 'n_phases': <property object at 0x7f930d2a1a40>, 'offset': <property object at 0x7f930d2a1b30>, 'phase': <property object at 0x7f930d2a1c20>, 'n_taps': <property object at 0x7f930d2a1d10>, 'taps': <property object at 0x7f930d2a1e00>, '_gst_reserved': <property object at 0x7f930d2a1ef0>, 'clear': gi.FunctionInfo(clear), 'init': gi.FunctionInfo(init)})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(VideoResampler)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
2e5ea15b346c2e4143dd8edea22dcce8081c4c42
|
7cf619b3348e6a9ccc0402c27ae2480a60eef1c2
|
/Python_Learning/thread_test.py
|
2e418dece76f55a07d9d1c39363c5fe191bad850
|
[
"MIT"
] |
permissive
|
lifedespicable/Python_Learning
|
cbe4284ce4688c931a3c3860197d077d0fb19a48
|
b3ec851c80a8137d64d02d15a3566af49b9a7ba7
|
refs/heads/master
| 2022-11-20T12:33:03.987342
| 2020-07-19T03:25:05
| 2020-07-19T03:25:05
| 194,821,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
import threading
import time
from threading import current_thread
def myThread(arg1, arg2):
print(current_thread().getName(),'start')
print('%s , %s' %(arg1, arg2))
time.sleep(1)
print(current_thread().getName(),'stop')
for i in range(1, 6, 1): # 循环从1到5,递增的值是1,会循环5次
# t1 = myThread(i, i+1)
# print(t1)
t1 = threading.Thread(target= myThread, args= (i,i+1))
# t1 = threading.Thread(target=myThread(i, i+1))
t1.start()
print(current_thread().getName(),'end')
|
[
"945432954@qq.com"
] |
945432954@qq.com
|
00e7a6ada85829b18aee5223436b27d7d2b44fb1
|
d3b0c2f4a2a2a5087b517501126ec101018af84c
|
/dcgan.py
|
ac31ec12e6f47d5624ff65232118449a06bb9a8f
|
[] |
no_license
|
pl561/dcgan
|
9b14ad90b44c1b4007ceabec98d3867cb0074371
|
62e39e72b926a4249690f5d131e8f20245681663
|
refs/heads/master
| 2022-12-02T20:02:40.775251
| 2020-08-24T06:56:40
| 2020-08-24T06:56:40
| 289,850,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,776
|
py
|
"""
DCGAN implementation (based on the Keras DCGAN tutorial)
Tensorflow 2.1/Keras
(should work from TF2.0)
experiments to train with an autoencoder on 512x512 images
"""
import os
import sys
import argparse
import numpy as np
from numpy import expand_dims
from numpy import mean
from numpy import ones
from numpy.random import randn
from numpy.random import randint
import cv2
import imutils
import datetime
import glob
import imageio
import matplotlib.pyplot as plt
import PIL
import tensorflow as tf
from tensorflow.keras import layers
import time
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import backend
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.constraints import Constraint
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorboardX import SummaryWriter
import tqdm
import yaml
from easydict import EasyDict
import wgan
# fixed config args
with open('cfg/main.yaml', 'r') as f:
cargs = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))
# def gen_layers(num_filter, )
def get_time_now():
now = datetime.datetime.now()
now = now.strftime("%d-%m-%Y_%H-%M-%S")
return now
def add_layers(model, filter_size, assert_output=None):
c2dt = layers.Conv2DTranspose(filter_size, (5, 5), strides=(2, 2),
padding='same', use_bias=False)
bn = layers.BatchNormalization()
lrelu = layers.LeakyReLU()
model.add(c2dt)
if assert_output is not None:
assert model.output_shape == assert_output
model.add(bn)
model.add(lrelu)
def make_encoder_model(noise_dim):
model = tf.keras.Sequential()
# model.add(layers.Input(shape=(512,512,3)))
model.add(layers.Conv2D(
16, (5, 5), strides=(2, 2),
input_shape=(512, 512, 3),
padding='same', use_bias=False))
assert model.output_shape == (None, 256, 256, 16), model.output_shape
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(
32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 128, 128, 32), model.output_shape
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(
64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 64, 64, 64), model.output_shape
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# model.add(layers.Conv2D(
# 128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 32, 32, 128), model.output_shape
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Conv2D(
# 256, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 16, 16, 256), model.output_shape
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Conv2D(
# 512, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 8, 8, 512), model.output_shape
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
model.add(layers.Flatten())
# assert model.output_shape == (None, 32*32*128)
# model.add(layers.Dense(noise_dim))
return model
def make_generator_model(noise_dim):
model = tf.keras.Sequential()
# model.add(layers.Dense(8*8*512, use_bias=False, input_shape=(noise_dim,)))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
model.add(layers.Input(shape=(noise_dim,)))
model.add(layers.Reshape((64, 64, 64)))
# Note: None is the batch size
# assert model.output_shape == (None, 32, 32, 128)
assert model.output_shape == (None, 64, 64, 64)
# model.add(layers.Conv2DTranspose(
# 512, (5, 5), strides=(1, 1), padding='same', use_bias=False))
# assert model.output_shape == (None, 8, 8, 512)
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Conv2DTranspose(
# 256, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 16, 16, 256)
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Conv2DTranspose(
# 128, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 32, 32, 128)
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Conv2DTranspose(
# 64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
# assert model.output_shape == (None, 64, 64, 64)
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(
32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 128, 128, 32)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(
16, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 256, 256, 16)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2),
padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 512, 512, 3)
return model
def make_autoencoder_model(noise_dim):
pass
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same',
input_shape=[512, 512, 3]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
# model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(0.3))
# model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(100))
model.add(layers.Dense(1))
return model
def discriminator_loss(real_output, fake_output, cross_entropy):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output, cross_entropy):
return cross_entropy(tf.ones_like(fake_output), fake_output)
def autoencoder_loss(real_image, generated_image):
mse = tf.keras.losses.MeanSquaredError()
total_mse = mse(real_image[:, :, :, 0], generated_image[:, :, :, 0]) + \
mse(real_image[:, :, :, 1], generated_image[:, :, :, 1]) + \
mse(real_image[:, :, :, 2], generated_image[:, :, :, 2])
return total_mse
# def show_test_image():
# generator = make_generator_model()
# noise = tf.random.normal([1, 100])
# generated_image = generator(noise, training=False)
# plt.imshow(generated_image[0, :, :, 0], cmap='gray')
# # plt.show()
# discriminator = make_discriminator_model()
# decision = discriminator(generated_image)
# print(decision)
def bgr2rgb(img):
return img[:, :, ::-1]
def autoencode_ds(sample_it, autoencoder, img_shape, n_sample=16):
for i in range(n_sample):
img, _ = sample_it.next()
generated_image = autoencoder(img, training=False)
image = generated_image[0].numpy() * 127.5 + 127.5
img_ae = bgr2rgb(image)
cv2.imwrite("generated/image_{}.png".format(i), img_ae)
def do(args):
# (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
path = cargs.ds_path
nb_image = 15
# train_images = np.zeros((nb_image, 28, 28))
# for i in range(nb_image):
# fname = path + "{}.jpg".format(i+1)
# img = cv2.imread(fname, 0)
# img = cv2.resize(img, (28,28))
# cv2.imwrite("orig/image_{}.png".format(i), img)
# train_images[i] = img
BUFFER_SIZE = cargs.buffer_size
BATCH_SIZE = cargs.batch_size
EPOCHS = cargs.epochs
start_epoch = args.startepoch
# noise_dim = 100
# noise_dim = 8*8*512
noise_dim = 64 * 64 * 64
num_examples_to_generate = cargs.num_ex_to_gen
img_shape = cargs.img_shape_w, cargs.img_shape_h
# 940
train_datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,
horizontal_flip=True, vertical_flip=True)
orig_datagen = ImageDataGenerator()
train_it = train_datagen.flow_from_directory(path, batch_size=BATCH_SIZE, target_size=img_shape,
color_mode="rgb", shuffle=True)
orig_it = orig_datagen.flow_from_directory(path, batch_size=BATCH_SIZE, target_size=img_shape,
color_mode="rgb")
sample_it = orig_datagen.flow_from_directory(path, batch_size=1, target_size=img_shape,
color_mode="rgb")
for i in range(nb_image):
fname = path + "{}.jpg".format(i + 1)
img, _ = orig_it.next()
img = bgr2rgb(img[0])
# breakpoint()
cv2.imwrite("orig/image_{}.png".format(i), img)
# breakpoint()
# train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
# train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
# train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
# train_datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
# Batch and shuffle the data
# train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
mse = tf.keras.losses.MeanSquaredError()
# mse = tf.keras.losses.MeanSquaredError()
generator = make_generator_model(noise_dim)
encoder = make_encoder_model(noise_dim)
inp = layers.Input(shape=(512, 512, 3))
# breakpoint()
# inp1 = encoder.layers[0](inp)
# encoded = generator.layers[0](encoder(inp))
outp = generator(encoder(inp))
autoencoder = tf.keras.Model(inputs=[inp], outputs=[outp])
discriminator = make_discriminator_model()
# autoencoder = tf.keras.Model(encoder.layers[0], generator(encoder.layers[0]))
generator_optimizer = tf.keras.optimizers.Adam(cargs.lr_gen)
discriminator_optimizer = tf.keras.optimizers.Adam(cargs.lr_disc)
ae_opt = tf.keras.optimizers.Adam(cargs.lr_ae)
wgan_critic_opt = tf.keras.optimizers.RMSProp(lr=cargs.lr_critic)
wgan_gan_opt = tf.keras.optimizers.RMSProp(lr=cargs.lr_critic)
writer = SummaryWriter(logdir=cargs.log_path, max_queue=1)
checkpoint_dir = cargs.checkpoints_path
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
# breakpoint()
if args.restore:
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
n_sample = len(train_it)
now = get_time_now()
@tf.function
def train_step_gan(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
# tf.reshape(noise, (BATCH_SIZE, ))
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output, cross_entropy)
disc_loss = discriminator_loss(
real_output, fake_output, cross_entropy)
gradients_of_generator = gen_tape.gradient(
gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables))
@tf.function
def train_step_ae(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as ae_tape, tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# latent_images = encoder(images, training=True)
generated_images = autoencoder(images, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
ae_loss = autoencoder_loss(images, generated_images)
gen_loss = generator_loss(fake_output, cross_entropy)
disc_loss = discriminator_loss(real_output, fake_output,
cross_entropy)
# trainable vars
ae_vars = autoencoder.trainable_variables
gen_vars = generator.trainable_variables
disc_vars = discriminator.trainable_variables
# GRADIENTS
grad_ae = ae_tape.gradient(ae_loss, ae_vars)
grad_gen = gen_tape.gradient(gen_loss, gen_vars)
grad_disc = disc_tape.gradient(disc_loss, disc_vars)
# breakpoint()
# AE UPDATE
# ae_opt.apply_gradients(zip(grad_ae, ae_vars))
# GEN UPDATE
generator_optimizer.apply_gradients(zip(grad_gen, gen_vars))
# DISC UPDATE
discriminator_optimizer.apply_gradients(zip(grad_disc, disc_vars))
return ae_loss, gen_loss, disc_loss
def train(train_it, epochs, start_epoch=0):
for epoch in range(start_epoch, epochs):
start = time.time()
for ind in range(len(train_it)):
image_batch, _ = train_it.next()
image_batch = (image_batch - 127.5) / 127.5
ae_loss, gen_loss, disc_loss = train_step_ae(image_batch)
# train_step_gan(image_batch)
print("batch {} \r".format(ind), end="")
end = time.time()
ae_loss, gen_loss, disc_loss = ae_loss.numpy(), gen_loss.numpy(), disc_loss.numpy()
loss_print = "{:.8} {:.8} {:.8} \r".format(
ae_loss, gen_loss, disc_loss)
print(' Time for epoch {} is {:.2} sec '.format(
epoch + 1, end - start) + loss_print, end="")
writer.add_scalars("losses/{}_{}".format("dcgan_ae", now),
{
'ae_loss': ae_loss.numpy(),
'gen_loss': gen_loss.numpy(),
'disc_loss': disc_loss.numpy(),
}, epoch)
# Save the model every 200 epochs
if (epoch + 1) % 500 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print(
"[Epoch {}] Checkpoint saved. ".format(epoch + 1))
if (epoch + 1) % 20 == 0:
predictions = generator(seed, training=False)
for i in range(num_examples_to_generate):
# breakpoint()
img = predictions[i].numpy() * 127.5 + 127.5
img = bgr2rgb(img)
cv2.imwrite("from_noise/image_{}.png".format(i), img)
# saves on disk example images generated by the AE
autoencode_ds(sample_it, autoencoder, img_shape, n_sample=16)
print("n_sample =", n_sample)
train(train_it, EPOCHS, start_epoch=start_epoch)
print("n_sample =", n_sample)
writer.close()
# predictions = generator(seed, training=False)
# Generate after the final epoch
# display.clear_output(wait=True)
# generate_and_save_images(generator,
# epochs,
# seed)
# def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
# fig = plt.figure(figsize=(4,4))
# for i in range(predictions.shape[0]):
# plt.subplot(4, 4, i+1)
# plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
# plt.axis('off')
# plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
# plt.show()
# show_test_image()
def main():
parser = argparse.ArgumentParser("Parser")
parser.add_argument("--startepoch", default=0,
help="help")
parser.add_argument("--restore", action="store_true",
help="help")
args = parser.parse_args()
print(args)
# if args.arg:
# do()
do(args)
if __name__ == "__main__":
sys.exit(main())
|
[
"noreply@github.com"
] |
pl561.noreply@github.com
|
abdaa8154bd3938393381ebeaf41a1febab3ef94
|
9ab856a6f80fa305efcad6974da6ab220f268177
|
/Suma, resta, multiplicación y división de dos números.py
|
b519a0f5c998a16a0df286fecb9090f54d263c59
|
[] |
no_license
|
EduZarate/My-first-one
|
f97859b862efea9876427b5f997a5ae77552be46
|
d395b5981677e3cdefd5e3647a0c34be766a786b
|
refs/heads/main
| 2023-05-21T05:04:06.372330
| 2021-06-14T00:32:34
| 2021-06-14T00:32:34
| 376,557,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
""" Implementar un algoritmo que permita ingresar dos númerosenteros
por teclado y luego muestre la suma, resta, multiplicación y división de
ambos números."""
a = int(input("Ingrese un número: "))
b = int(input("Ingrese el otro: "))
print ("El resultado de la suma es: ",a+b)
print ("El resultado de la resta es: ", a-b)
print ("El resultado de la multiplicación es: ", a*b)
print ("El resultado de la división es: ", a//b)
|
[
"noreply@github.com"
] |
EduZarate.noreply@github.com
|
f62b6f02dd845156b9c7c0fe2884823a58fed264
|
3746667a7b2c6d0c941a22a19929b0ffaf432a33
|
/starkapp/apps.py
|
1df7173440a063bf5d0441e5edfcaf9dc294327a
|
[] |
no_license
|
maxhope8/stark
|
fe4cf0a99614256a8d50f275ede3de3158779cea
|
4670686909b1f0e92b72f1791a0afe056da3d8f9
|
refs/heads/master
| 2020-11-28T03:31:16.493901
| 2019-12-23T07:01:21
| 2019-12-23T07:01:21
| 229,693,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from django.apps import AppConfig
from django.utils.module_loading import autodiscover_modules
class StarkappConfig(AppConfig):
name = 'starkapp'
# 程序启动时,扫描app下得指定文件(stark1.py)并执行
def ready(self):
autodiscover_modules('stark1')
|
[
"18309221170@163.com"
] |
18309221170@163.com
|
b9622cc80db54887aced95f3b024315916271d8a
|
8eb9276ec6b9af26b4161e4f81fd8c21a3fcac22
|
/home/views.py
|
0484af25188d3f79482afe1f4a89ad3177287729
|
[] |
no_license
|
olufekosamuel/bincomtest
|
4cd4fdf38926ad30bed29bfac432debab1101bd3
|
0c138009014552e41c3df2e331d2ea1abd170c74
|
refs/heads/master
| 2020-08-15T02:09:29.954537
| 2019-10-15T16:53:41
| 2019-10-15T16:53:41
| 215,264,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
from datetime import datetime
import datetime
from smtplib import SMTPException
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.core.mail import send_mail, BadHeaderError
from django.shortcuts import render,redirect, reverse, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, permission_required
from django.template.response import TemplateResponse
from django.utils.datastructures import MultiValueDictKeyError
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
#from django.core.mail import send_mail
from django.http import HttpResponse, HttpResponseRedirect
from datetime import timedelta
from django.views.decorators.csrf import csrf_protect
from django.http import JsonResponse
from .models import *
def index(request):
pol = PollingUnit.objects.get(uniqueid=8)
return render(request,'index.html')
def polling(request):
pol = PollingUnit.objects.all()
return render(request, 'poll.html', {'poll':pol})
def result(request, id):
try:
res = AnnouncedPuResults.objects.filter(polling_unit_uniqueid=id)
print(res)
except AnnouncedPuResults.DoesNotExist:
pass
return render(request, 'result.html', {'result':res})
def lgalist(request):
lga = Lga.objects.all()
party = Party.objects.all()
return render(request, 'lga.html', {'locals':lga,'party':party})
def lgaresult(request, id, party):
result = []
total = 0
polls = PollingUnit.objects.filter(lga_id=id)
if party== "LABOUR":
party="LABO"
for poll in polls:
results = AnnouncedPuResults.objects.filter(polling_unit_uniqueid=poll.polling_unit_id,party_abbreviation=party)
for result in results:
total += result.party_score
return JsonResponse({'totstat': total})
def storeresult(request):
party = Party.objects.all()
users = Agentname.objects.all()
wards = Ward.objects.all()
Laga = Lga.objects.all()
if request.method == "POST":
name = request.POST['pollname']
no = request.POST['pollno']
desc = request.POST['description']
ward = request.POST['ward']
lga = request.POST['lga']
user = request.POST['user']
polls = PollingUnit.objects.all().reverse()[0]
try:
PollingUnit.objects.get(polling_unit_number=no)
error= "Polling Unit Already Exist"
return render(request, 'storeresult.html', {'party':party,'users':users,'wards':wards,'lga':lga,'error':error})
except PollingUnit.DoesNotExist:
poll = PollingUnit.objects.create(polling_unit_number=no,polling_unit_name=name,polling_unit_description=desc,lat=0,long=0,entered_by_user=user,user_ip_address=0,lga_id=lga,ward_id=ward,polling_unit_id=polls.polling_unit_id+1,)
poll.save()
for part in party:
if part.partyname== "LABOUR":
res = AnnouncedPuResults.objects.create(party_abbreviation="LABO",party_score=request.POST[part.partyname],entered_by_user=user,user_ip_address=0,polling_unit_uniqueid=poll.polling_unit_id,date_entered=datetime.datetime.now())
else:
res = AnnouncedPuResults.objects.create(party_abbreviation=part.partyname,party_score=request.POST[part.partyname],entered_by_user=user,user_ip_address=0,polling_unit_uniqueid=poll.polling_unit_id,date_entered=datetime.datetime.now())
res.save()
message = "Results submitted successfully"
return render(request, 'storeresult.html', {'party':party,'users':users,'wards':wards,'lga':Laga,'message':message})
return render(request, 'storeresult.html', {'party':party,'users':users,'wards':wards,'lga':Laga})
|
[
"mololuwasamuel12@gmail.com"
] |
mololuwasamuel12@gmail.com
|
bc73fb03c4c94707f8f879d786c89b45091a67da
|
d131a239dc3a40966301721fb359eb11d3854280
|
/IA_Notebook_2.py
|
b7427c01dec4b1112940880f73c96f0d87fc3663
|
[] |
no_license
|
FORGIS98/ia-notebook-dotcsv
|
b8aad39cd9c659acaeccbbd2209a4b6b005d33af
|
c30b8e09e003cd3fd7d30a40692cf18a8e6f2412
|
refs/heads/main
| 2023-06-27T23:49:00.100011
| 2021-08-04T09:18:33
| 2021-08-04T09:18:33
| 388,564,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,507
|
py
|
#!/usr/bin/env python
# coding: utf-8
# El objetivo es trucar la red neuronal **Inception V3** con un ataque adversario. Pero antes, probemos si funciona la red neuronal con una imágen.
import tensorflow as tf # plataforma para el aprendizaje automatico
# Desabilitamos eager-execution por que peta cuando usemos más adelante keras.gradients()
tf.compat.v1.disable_eager_execution()
import keras # deep learning framework/api
import matplotlib.pyplot as plt
import numpy as np
from keras.applications.inception_v3 import InceptionV3, decode_predictions # importamos la red neuronal ya entrenada
from keras import backend as k
iv3 = InceptionV3() # Cargamos la red neuronal, la primera vez se descarga de internet
# print(iv3.summary()) # Ver resumen de la red neuronal
from keras.preprocessing import image
# Para introducir la imágen a InceptionV3, la imagen requiere de unos requisitos
# Cargamos la imagen con dimension 299x299 (para iv3) y la convertimos a una matriz de datos
coche_2 = image.img_to_array(image.load_img("./img/Notebook_2/coche_2.jpg", target_size=(299,299)))
# Cambiamos el rango o base, de [0-255] a [-1-1], por que
# así espera iv3 que pases los datos de la imágen
coche_2 /= 255
coche_2 -= 0.5
coche_2 *= 2
# La red neuronal espera que como entrada le pasemos un tensor,
# este tiene una dimensión más que la imágen que le pasamos,
# la primera dimensión hace referencia al tamaño del lote o "badge size?"
# esto es útil si queremos meter varias imágenes
coche_2 = coche_2.reshape([1, coche_2.shape[0], coche_2.shape[1], coche_2.shape[2]])
# LISTO, la variable ya esta lista para pasarsela al modelo
resultado = iv3.predict(coche_2)
decode_predictions(resultado)
# Ahora, pasemos a romper el modelo con un `Ataque Adversario`. El objetivo es generar una imágen que truque a la red neuronal pero que a su vez, los cambios no sean perceptibles por el ojo humano.
# Queremos hacer un grafo que represente un proceso de optimización en el que vamos a maximizar la probabilidad
# de que cierta clase aparezca. Queremos que prediga una clase concreta, del palo, le damos un gato
# y queremos que la red identifique que el gato es un limón.
# Para ello solo necesitamos el punto de entrada y salida de la red neuronal:
input_layer = iv3.layers[0].input # Primera capa de la red neuronal, en concreto el punto de entrada
out_layer = iv3.layers[-1].output # Última capa de la red neuronal, el punto de salida vamos
target_class = 951 # Referencia a la clase limon (porque lo dice DotCSV)
# La función de coste es la probabilidad de la clase 951, osea, de todo el vector de probabilidades
# que devuelve le pedimos la fila 0, columna 951 que es el que nos interesa maximizar (el limon)
loss = out_layer[0, target_class]
# Gradiente sobre la variable de entrada,
gradiente = k.gradients(loss, input_layer)[0] # Calcular el gradiente del coste respecto al tensor de entrada
# A estas funciones le suministramos los valores de entrada y salida, nuestra entrada es
# el input_layer y le decimos también que estamos en la fase de entrenamiento y la salida
# es el gradiente y el coste.
optimize_gradient = k.function([input_layer, k.learning_phase()], [gradiente, loss])
copy_coche_2 = np.copy(coche_2) # Copia para no modificar la imágen original
cost = 0.0
# Minimicemos los cambios en la imágen para que no sean perceptibles
# por el ojo humano (de lo contrario apareceran pixeles muy saturados,
# y se nota que la imágen ha sido modificada).
max_perturb = coche_2 + 0.01
min_perturb = coche_2 - 0.01
while cost < 0.95:
new_grad, cost = optimize_gradient([copy_coche_2, 0])
copy_coche_2 += new_grad
# Los valores fuera del rango, se redondean al min o max especificado
# Ej: [3, 2, 5, 2, 1, -2] con rango [0, 2] -> [2, 2, 2, 2, 1, 0]
copy_coche_2 = np.clip(copy_coche_2, min_perturb, max_perturb)
copy_coche_2 = np.clip(copy_coche_2, -1, 1)
# print("Verbose:", cost)
# Anteriormente reformateamos la matriz de la imágen para
# que la red neuronal la entendiese, ahora para mostrarla por
# pantalla hay que volver a darle el formato normal
copy_coche_2 /= 2
copy_coche_2 += 0.5
copy_coche_2 *= 255
plt.imshow(copy_coche_2[0].astype(np.uint8))
plt.show()
from PIL import Image
# Importante guardarlo con png, si se guarda con jpg se pierde
# calidad en los píxeles y la red neuronal es capaz de identificar
# correctamente que hay en la foto
Image.fromarray(copy_coche_2[0].astype(np.uint8)).save("lemon.png")
|
[
"jorgesolgonzalez1998@gmail.com"
] |
jorgesolgonzalez1998@gmail.com
|
dd405bcc91982cd7b3a2368fbc2ccd80f9bdd887
|
d42e8c23b5a6cd221b2971469f7266fcb767c8c1
|
/Unet_CTseg.py
|
6fcacf7f5f5f36472db519dec237acf02679b324
|
[] |
no_license
|
yBourne/CTImgSegmentation
|
cec18a53e93a579d55853e44e52793df8f49cb79
|
b3d68e4e3b4f671d9f67ca8cbfab1e7c9a43a4e8
|
refs/heads/master
| 2022-12-10T06:47:45.146371
| 2020-09-07T11:24:52
| 2020-09-07T11:24:52
| 293,469,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,531
|
py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import random
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import chain
from skimage.io import imread, imshow, imread_collection, concatenate_images, imsave
from skimage.transform import resize
from skimage.morphology import label
from skimage.color import rgb2gray
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
import tensorflow as tf
#Set some parameters
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
TRAIN_PATH = './dataset/train_data-2/'
TEST_PATH = './dataset/test_data-2/'
# VALIDATION_PATH = './dataset/validation_data/'
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
random.seed = seed
np.random.seed = seed
# In[3]:
#Get train and test IDs
train_ids = next(os.walk(TRAIN_PATH))[2]
test_ids = next(os.walk(TEST_PATH))[2]
# val_ids = next(os.walk(VALIDATION_PATH))[2]
# In[8]:
test_ids
# In[4]:
X_train = np.zeros((int(len(train_ids)/2), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y_train = np.zeros((int(len(train_ids)/2), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
print('Getting and resizing train images and masks ...')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
if id_=='desktop.ini':
continue
path = TRAIN_PATH + id_
if n%2 == 0:
img = imread(path)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)#mark
X_train[int(n/2)] = img
# mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 3), dtype=np.unint8)
# for mask_file in next(os.walk(path +'/masks/'))[2]:#mark
# mask_ = imread(path + '/masks/' + mask_file)
# mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
# preserve_range=True), axis=-1)#mark
# mask = np.maximum(mask, mask_)
else:
Y_train[int(n/2), :, :, 0] = rgb2gray(imread(path))
# Get and resize test images
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
# sizes_test = []
print('Getting and resizing test images ...')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = TEST_PATH + id_
img = imread(path)[:,:,:IMG_CHANNELS]
# sizes_test.append([img.shape[0], img.shape[1]])#mark
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
# Get validation data
# X_val = np.zeros((int(len(val_ids)/2), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
# Y_val = np.zeros((int(len(val_ids)/2), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
# print('Getting and resizing validation images ...')
# sys.stdout.flush()
# for n, id_ in tqdm(enumerate(val_ids), total=len(val_ids)):
# if os.path.splitext(id_)[-1]!='.png':
# continue
# path = VALIDATION_PATH + id_
# if n%2 == 0:
# img = imread(path)[:,:,:IMG_CHANNELS]
# img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)#mark
# X_val[int(n/2)] = img
# else:
# Y_val[int(n/2), :, :, 0] = rgb2gray(imread(path))
print('Done!')
# In[5]:
# Define IoU metric
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
## intersection over union
def IoU(y_true, y_pred, eps=1e-6):
if np.max(y_true) == 0.0:
return IoU(1-y_true, 1-y_pred) ## empty image; calc IoU of zeros
intersection = K.sum(y_true * y_pred, axis=[1,2,3])
union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) - intersection
return -K.mean( (intersection + eps) / (union + eps), axis=0)
# 原文链接:https://blog.csdn.net/m0_37477175/article/details/83004746
def dice_coef(y_true, y_pred, smooth=1):
intersection = K.sum(y_true * y_pred, axis=[1,2,3])
union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])
return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred, smooth=1)
# import cv2
# def mean_count(y_true, y_pred):
# # print(K.is_tensor(y_true), type(y_pred))
# try:
# count1, markers_true = cv2.connectedComponents(K.eval(y_true),connectivity=8)
# count2, markers_pred = cv2.connectedComponents(K.eval(y_pred),connectivity=8)
# except:
# return K.zeros(shape=(1))
# return K.cast_to_floatx(count2/count1*0.5)
# In[6]:
# Build U-Net model
from keras import metrics
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s)
c1 = Dropout(0.1) (c1)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Dropout(0.1) (c2)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.3) (c5)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9)
c9 = Dropout(0.1) (c9)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss=IoU, metrics=[mean_iou, 'mae', 'acc'])#dice_coef_loss, 'binary_crossentropy',mean_absolute_error
model.summary()
# In[7]:
# Fit model
earlystopper = EarlyStopping(patience=30, verbose=1)
checkpointer = ModelCheckpoint('model-CTseg-9wts.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=16, epochs=80,
callbacks=[earlystopper, checkpointer])# validation_data=(X_val, Y_val)
# In[8]:
# 绘制训练 & 验证的准确率值
# results.history
plt.plot(results.history['acc'])
plt.plot(results.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('ACC')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(results.history['loss'][1:])
plt.plot(results.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# In[ ]:
results.history.keys()
# In[10]:
# Predict on train, val and test
# model = load_model('model-CTseg-8IoUloss-2.h5', custom_objects={'mean_iou': mean_iou,'dice_coef_loss':dice_coef_loss, 'IoU':IoU})
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train == 1).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(160, 160),
mode='constant', preserve_range=True))
print('finish')
# In[12]:
# Perform a sanity check on some random training samples
ix = random.randint(0, len(preds_train_t))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
imshow(np.squeeze(preds_train_t[ix]), cmap='gray')
plt.show()
# In[22]:
# preds_test = model.predict(X_test, verbose=1)
# preds_test_t = (preds_test == 1).astype(np.uint8)
# Perform a sanity check on some random test samples
ix = random.randint(0, len(preds_test_t))
# ix = 0
imshow(X_test[ix])
plt.show()
imshow(np.squeeze(preds_test_t[ix]), cmap='gray')
plt.show()
# In[27]:
# 保存
for n, img in enumerate(preds_test_upsampled):
imsave(TEST_PATH+str(n+1)+'pred.png', img)
# In[2]:
import numpy as np
help(np.squeeze)
|
[
"49144395+yBourne@users.noreply.github.com"
] |
49144395+yBourne@users.noreply.github.com
|
cec03f25f354aaa3f99e4de8a868b3777d100efc
|
0010a92176b766f4bdf37c1144fa0f724cfaf564
|
/env/lib/python3.5/site-packages/aliyunsdkecs/request/v20140526/CreateImageRequest.py
|
5fcd8e48fafcdcaf41af6211a1c3634952c20daa
|
[] |
no_license
|
pengjinfu/My-Admin
|
bc2d8b53da8be0fad60e1d8979bdca3f2c4560d9
|
26206d1def673adb7dfe5c8044c654a0e65320d1
|
refs/heads/master
| 2021-08-30T02:17:57.432743
| 2017-12-15T17:05:05
| 2017-12-15T17:05:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,655
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImage')
def get_DiskDeviceMappings(self):
return self.get_query_params().get('DiskDeviceMappings')
def set_DiskDeviceMappings(self,DiskDeviceMappings):
for i in range(len(DiskDeviceMappings)):
if DiskDeviceMappings[i].get('Size') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Size' , DiskDeviceMappings[i].get('Size'))
if DiskDeviceMappings[i].get('SnapshotId') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.SnapshotId' , DiskDeviceMappings[i].get('SnapshotId'))
if DiskDeviceMappings[i].get('Device') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Device' , DiskDeviceMappings[i].get('Device'))
if DiskDeviceMappings[i].get('DiskType') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.DiskType' , DiskDeviceMappings[i].get('DiskType'))
def get_Tag4Value(self):
return self.get_query_params().get('Tag.4.Value')
def set_Tag4Value(self,Tag4Value):
self.add_query_param('Tag.4.Value',Tag4Value)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SnapshotId(self):
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self,SnapshotId):
self.add_query_param('SnapshotId',SnapshotId)
def get_Tag2Key(self):
return self.get_query_params().get('Tag.2.Key')
def set_Tag2Key(self,Tag2Key):
self.add_query_param('Tag.2.Key',Tag2Key)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Tag3Key(self):
return self.get_query_params().get('Tag.3.Key')
def set_Tag3Key(self,Tag3Key):
self.add_query_param('Tag.3.Key',Tag3Key)
def get_Platform(self):
return self.get_query_params().get('Platform')
def set_Platform(self,Platform):
self.add_query_param('Platform',Platform)
def get_Tag1Value(self):
return self.get_query_params().get('Tag.1.Value')
def set_Tag1Value(self,Tag1Value):
self.add_query_param('Tag.1.Value',Tag1Value)
def get_ImageName(self):
return self.get_query_params().get('ImageName')
def set_ImageName(self,ImageName):
self.add_query_param('ImageName',ImageName)
def get_Tag3Value(self):
return self.get_query_params().get('Tag.3.Value')
def set_Tag3Value(self,Tag3Value):
self.add_query_param('Tag.3.Value',Tag3Value)
def get_Architecture(self):
return self.get_query_params().get('Architecture')
def set_Architecture(self,Architecture):
self.add_query_param('Architecture',Architecture)
def get_Tag5Key(self):
return self.get_query_params().get('Tag.5.Key')
def set_Tag5Key(self,Tag5Key):
self.add_query_param('Tag.5.Key',Tag5Key)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Tag5Value(self):
return self.get_query_params().get('Tag.5.Value')
def set_Tag5Value(self,Tag5Value):
self.add_query_param('Tag.5.Value',Tag5Value)
def get_Tag1Key(self):
return self.get_query_params().get('Tag.1.Key')
def set_Tag1Key(self,Tag1Key):
self.add_query_param('Tag.1.Key',Tag1Key)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Tag2Value(self):
return self.get_query_params().get('Tag.2.Value')
def set_Tag2Value(self,Tag2Value):
self.add_query_param('Tag.2.Value',Tag2Value)
def get_ImageVersion(self):
return self.get_query_params().get('ImageVersion')
def set_ImageVersion(self,ImageVersion):
self.add_query_param('ImageVersion',ImageVersion)
def get_Tag4Key(self):
return self.get_query_params().get('Tag.4.Key')
def set_Tag4Key(self,Tag4Key):
self.add_query_param('Tag.4.Key',Tag4Key)
|
[
"dylan@zhxfei.com"
] |
dylan@zhxfei.com
|
6c1c35ef28e08ac096358de3535ce5d1f50ca604
|
e57d7785276053332c633b57f6925c90ad660580
|
/sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_shared_keys_operations.py
|
e209ceb60ee78a0cc0c90df3c27836f9fb07693b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
adriananeci/azure-sdk-for-python
|
0d560308497616a563b6afecbb494a88535da4c5
|
b2bdfe659210998d6d479e73b133b6c51eb2c009
|
refs/heads/main
| 2023-08-18T11:12:21.271042
| 2021-09-10T18:48:44
| 2021-09-10T18:48:44
| 405,684,423
| 1
| 0
|
MIT
| 2021-09-12T15:51:51
| 2021-09-12T15:51:50
| null |
UTF-8
|
Python
| false
| false
| 7,719
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SharedKeysOperations:
"""SharedKeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_shared_keys(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get_shared_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'} # type: ignore
async def regenerate(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Regenerates the shared keys for a Log Analytics Workspace. These keys are used to connect
Microsoft Operational Insights agents to the workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.regenerate.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/regenerateSharedKey'} # type: ignore
|
[
"noreply@github.com"
] |
adriananeci.noreply@github.com
|
c0ef2d87ef5e7fd809684761560fd8507a7c3e1f
|
a199d75cd956ee288709db95cd9f7e83bd105629
|
/jz21.py
|
6c2a0d4f64d67a0da94901794f1350aa5558c512
|
[] |
no_license
|
yikouniao/JianZhiOffer
|
5defc9a9ec3b2f1a5d730074b5715125abee1f06
|
fdb498e4954833fba0ff207c1569bca332380fdf
|
refs/heads/main
| 2023-07-31T18:07:31.767759
| 2021-09-15T03:19:54
| 2021-09-15T03:19:54
| 399,709,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
# https://leetcode-cn.com/problems/diao-zheng-shu-zu-shun-xu-shi-qi-shu-wei-yu-ou-shu-qian-mian-lcof/
class Solution:
def exchange(self, nums: List[int]) -> List[int]:
odd = [num for num in nums if num % 2]
even = [num for num in nums if not num % 2]
return odd + even
|
[
"noreply@github.com"
] |
yikouniao.noreply@github.com
|
f8738efaaf8b348ef5e99c3f6300bacf6df4a265
|
8f9df03ace0b6e4677b79e34a766f43dbafbd334
|
/HW1/Hongyi Nie/graph_adjacency_list.py
|
b974a8eef1128be7a884d8be8e12394e8659bf77
|
[] |
no_license
|
sylvianie98/Algorithms-and-Data-Structures
|
04e5c716cfee80486eaae5e2ce51b8c4a84ac723
|
d7f7688d8909ef6dcdb29ba638f36497771d71bd
|
refs/heads/main
| 2023-06-08T22:42:03.348272
| 2021-07-03T23:03:01
| 2021-07-03T23:03:01
| 382,723,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
# Hongyi Nie, hn327
# Please see instructions.pdf for the description of this problem.
# An implementation of a weighted, directed graph as an adjacency list. This
# means that it's represented as a map from each node to a list of it's
# respective adjacent nodes.
class Graph:
def __init__(self):
# DO NOT EDIT THIS CONSTRUCTOR
self.graph = {}
def add_edge(self, node1, node2, weight):
# Adds a directed edge from `node1` to `node2` to the graph with weight defined by `weight`.
# TODO: YOUR CODE HERE, delete the `raise NotImplementedError`line below once you finish writing your code
if node1 in self.graph:
self.graph[node1].append((node2,weight))
else:
self.graph[node1] = [(node2,weight)]
def has_edge(self, node1, node2):
# Returns whether the graph contains an edge from `node1` to `node2`.
# DO NOT EDIT THIS METHOD
if node1 not in self.graph:
return False
return node2 in [x for (x,i) in self.graph[node1]]
def get_neighbors(self, node):
# Returns the neighbors of `node` as a list of tuples [(x, y), ...] where
# `x` is the neighbor node, and `y` is the weight of the edge from `node`
# to `x`.
# TODO: YOUR CODE HERE, delete the `raise NotImplementedError`line below once you finish writing your code
if node not in self.graph:
return []
return self.graph[node]
|
[
"noreply@github.com"
] |
sylvianie98.noreply@github.com
|
8f966c091fe8a880c52ace080de665e6e1a0bb02
|
743e3624fe1c5eb7c1daa1154aafe72d18977ab5
|
/pridesport_work/sportgoods/migrations/0001_initial.py
|
7ba1f47cb0a4afdd2f2ef4fe47bed993d4e38ff5
|
[
"MIT"
] |
permissive
|
Trifon87/pridesport_work
|
54f958eb0861a872f986d84573afe4bc4ef63fcc
|
9ab47d3c58915c2e791bf8a1fcb3ceee1d8de62c
|
refs/heads/main
| 2023-01-29T12:24:02.790164
| 2020-12-13T11:04:08
| 2020-12-13T11:04:08
| 319,147,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# Generated by Django 3.1.3 on 2020-11-12 17:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('dog', 'Dog'), ('cat', 'Cat'), ('parrot', 'Parrot'), ('unknown', 'Unknown')], default='unknown', max_length=7)),
('name', models.CharField(max_length=6)),
('price', models.FloatField()),
('description', models.TextField(blank=True)),
('image_url', models.URLField()),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sportgoods.pet')),
],
),
]
|
[
"trifon.ivanov@gmail.com"
] |
trifon.ivanov@gmail.com
|
169cc6e3a08adc088826a5b3ab17e5fcb13c6c44
|
b976a3ca1e9cb98a9c90e57243255d0a8ace3572
|
/Probability & Statistics/pharmacy_multi_regression.py
|
911ba06a6717cd97204579ffadd3597f75e39138
|
[
"MIT"
] |
permissive
|
akhilvydyula/Data-Science-and-Machine-Learning-Projects-Dojo
|
fbe9408818cbfdb31d7fa0e52d9566bab998b9e1
|
4e2932dfa6749b360a7a605050c953ef52fc6547
|
refs/heads/master
| 2023-05-06T00:42:57.787384
| 2021-05-28T06:40:25
| 2021-05-28T06:40:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
"""
A pharmacy delivers medications to the surrounding community.
Drivers can make several stops per delivery.
The owner would like to predict the length of time a delivery will take based on one or two related variables.
"""
from sklearn.linear_model import LinearRegression
x1, x2 = [1,3,2,3,1], [8,4,9,6,3]
y = [29, 31, 36, 35, 19]
reg = LinearRegression()
reg.fit(list(zip(x1,x2)), y)
b1, b2 = reg.coef_[0], reg.coef_[1]
b0 = reg.intercept_
print(f'y = {b0:.{3}} + {b1:.{3}}x1 + {b2:.{3}}x2')
|
[
"ptyadana@users.noreply.github.com"
] |
ptyadana@users.noreply.github.com
|
b137998baadcf6c1c7eddef0dd667c340d56e435
|
6a41f12ddb104c4f214fa8bf2864860a8952d17c
|
/books_crawler/books_crawler/settings.py
|
7916845ebd89642dd40df20c0bfb0f0e827a9905
|
[] |
no_license
|
jakiiii/Web-Scraping-Scratch
|
39bb32ea2044e6c4e52ee58ea88794f2a77d75cd
|
46cd54d3a06d70cef070f47b3c15b530691c3187
|
refs/heads/master
| 2020-04-21T00:34:50.736222
| 2019-02-07T06:38:54
| 2019-02-07T06:38:54
| 169,200,752
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for books_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books_crawler'
SPIDER_MODULES = ['books_crawler.spiders']
NEWSPIDER_MODULE = 'books_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1
}
IMAGES_STORE = '/home/jaki/Dev/WebScrapingScratch/images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"me.jaki@outlook.com"
] |
me.jaki@outlook.com
|
5fc70f024b6f44a29946ecfd1ddaff48ffd8d577
|
fa44fafdb236d71fb06add9f727c36f017c1c969
|
/tomef/tools/tools.py
|
c131a4fdae29f481d71824582a4d42fb09ffc94c
|
[
"MIT"
] |
permissive
|
unlikelymaths/tomef
|
43738717233d37df662c22051d72bd0554e664b2
|
57b629a3ee932486c55afcf62ef9d8224488ae65
|
refs/heads/master
| 2022-12-21T00:05:57.081200
| 2019-05-14T10:12:47
| 2019-05-14T10:12:47
| 179,456,896
| 1
| 0
|
MIT
| 2022-12-08T03:02:03
| 2019-04-04T08:37:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Tools
# <div style="position: absolute; right:0;top:0"><a href="../evaluation.py.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
#
# Various Scripts. Command line use:
#
# `python evaluation.py --script scriptname`
#
# ## Overview
# `scriptname` in brackets
# - [Export Results](./export_results.ipynb) (export)
# - [Clear Data](./clear_data.ipynb) (cleardata)
# In[ ]:
import argparse
from os.path import join, dirname
def run_tools_():
parser = argparse.ArgumentParser(description="Topic Modeling Evaluation Framework")
parser.add_argument('-s','--script',
action='store',
choices=['letter', 'wiki', 'tweetsodp', 'export', 'cleardata'],
help='Runs a script from the tools folder')
parser.add_argument('-p','--printconfig',
action='store_true',
help='Prints the configuration')
args = parser.parse_args()
if args.script:
if args.script == "letter":
pass
elif args.script == "wiki":
pass
elif args.script == "tweetsodp":
pass
elif args.script == "export":
from tools.export_results import main as export_results_main
export_results_main(join(dirname(__file__),'../'))
elif args.script == 'cleardata':
from tools.clear_data import main as clear_data_main
clear_data_main()
exit()
def run_tools():
try:
get_ipython
except:
run_tools_()
|
[
"markus.plack@gmail.com"
] |
markus.plack@gmail.com
|
02cb7b37f10449199d227b815a2628225cd6b57e
|
0571711577354ea057d78ab1d2fc031f5dd84d36
|
/pipeline/twopoint/calculate_gg_errors.py
|
7a1c0864e00346137851cf5d58cbce6fdd106e94
|
[] |
no_license
|
ssamuroff/mbii
|
404a069cde2a16417acff2ae21ac62ef1312ce14
|
9c4ee124b4a311e2f7ffc26cbfcfd3312d7ff73e
|
refs/heads/master
| 2021-03-24T09:49:31.751031
| 2019-01-09T17:55:22
| 2019-01-09T17:55:22
| 106,590,755
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
import fitsio as fi
import treecorr
import numpy as np
import argparse
import yaml
#import mbii.lego_tools as util
from mbii.pipeline.twopoint.jackknife import multipurpose_errors as errors
period={'massiveblackii':100, 'illustris':75}
def compute(options, binning):
print('Shape data : %s'%options['2pt']['shapes'])
data = fi.FITS(options['2pt']['shapes'])[-1].read()
data_sym = fi.FITS(options['2pt']['shapes_symmetrised'])[-1].read()
if 'npart_cut' in options['2pt'].keys():
nlow = options['2pt']['npart_cut']
print('Imposing additional cut at npart>%d'%nlow)
data = data[(data['npart_dm']>nlow)]
data_sym = data_sym[(data_sym['npart_dm']>nlow)]
else:
nlow=-1
if 'npart_cut_upper' in options['2pt'].keys():
nhigh = options['2pt']['npart_cut_upper']
print('Imposing additional cut at npart<%d'%nhigh)
data = data[(data['npart_dm']<nhigh)]
data_sym = data_sym[(data_sym['npart_dm']<nhigh)]
else:
nhigh = -1
splitflag = options['2pt']['split']
if splitflag is not None:
name = options['2pt']['split']
print('Dividing catalogue by %s'%name)
mask = (data[name]>=options['2pt']['split_val'])
print('%3.3f percent split'%(data[name][mask].size*100./data[name].size))
else:
print('No catalogue split required.')
mask = np.ones(data.size).astype(bool)
if options['2pt']['binning']=='log':
rbins = np.logspace(np.log10(options['2pt']['rmin']), np.log10(options['2pt']['rmax']), binning+1)
elif options['2pt']['binning']=='equal':
rbins = util.equalise_binning(data[mask], data[mask], options['2pt']['rmin'], options['2pt']['rmax'], binning+1)
print('Setting up correlations')
# don't need randoms here if we know the period of the box
print('Computing correlation functions.')
if splitflag:
suffix = '_splitby%s'%options['2pt']['split']
else:
suffix=''
if (nlow>=0):
suffix+='-ndm_part_low%d'%nlow
if (nhigh>0):
suffix+='-ndm_part_high%d'%nhigh
if splitflag:
print('11')
cat1 = data[mask]
cat1_sym = data_sym[mask]
F11,R11 = errors.jackknife('gg', cat1, cat1, cat1_sym, cat1_sym, options, nbins=binning)
export_array('%s/gg_var_11%s.txt'%(options['2pt']['savedir'], suffix), rbins, F11, R11)
cat2 = data[np.invert(mask)]
cat2_sym = data_sym[np.invert(mask)]
print('22')
F22,R22 = errors.jackknife('gg', cat2, cat2, cat2_sym, cat2_sym, options, nbins=binning)
export_array('%s/EE_var_22%s.txt'%(options['2pt']['savedir'], suffix), rbins, F22, R22)
print('12')
F12,R12 = errors.jackknife('gg', cat1, cat2, cat1_sym, cat2_sym, options, nbins=binning)
print('21')
F21,R21 = errors.jackknife('gg', cat2, cat1, cat2_sym, cat1_sym, options, nbins=binning)
export_array('%s/gg_var_12%s.txt'%(options['2pt']['savedir'], suffix), rbins, F12, R12)
export_array('%s/gg_var_21%s.txt'%(options['2pt']['savedir'], suffix), rbins, F21, R21)
print('00')
cat0 = data
cat0_sym = data_sym
F00,R00 = errors.jackknife('gg', cat0, cat0, cat0_sym, cat0_sym, options, nbins=binning)
export_array('%s/gg_var_00%s.txt'%(options['2pt']['savedir'], suffix), rbins, F00, R00)
print('Done')
def export_array(path, edges, result, error):
x = np.sqrt(edges[:-1]*edges[1:])
out = np.vstack((x, result, error))
print('Exporting to %s'%path)
np.savetxt(path, out.T)
|
[
"ssamurof@andrew.cmu.edu"
] |
ssamurof@andrew.cmu.edu
|
6bc148e4670713ed88b82c4a0a14237077fc0130
|
0e27ba14e3ea4184619893686e18557932ca24f0
|
/pegawai/admin.py
|
500eca2b6bab68dee2a6e470c89f80e106814eee
|
[] |
no_license
|
affandyletto/aktualNew
|
2d8b87628729f534507aeabef077bd89359775d4
|
2db3d18e40f276a5a0a33685c180bb5a8bc9e54e
|
refs/heads/master
| 2023-01-19T18:24:16.942342
| 2020-11-21T06:24:48
| 2020-11-21T06:24:48
| 314,744,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.contrib import admin
# Register your models here.
from .models import pegawai
from import_export.admin import ImportExportModelAdmin
@admin.register(pegawai)
class user(ImportExportModelAdmin):
pass
|
[
"letto.affandy@yahoo.com"
] |
letto.affandy@yahoo.com
|
9ecf9ee4f2dde940f3f1270467edae21b47aed08
|
660433016b82875198c198f06f1d58a91e3a752a
|
/bloodbank_proj/asgi.py
|
3d9909e015989f91d884363e11ccddcdef1db45b
|
[] |
no_license
|
TusharSarkar181/Blood_Bank_Management_System
|
7aae9c630e555acdc577f3042b50cf65783aba33
|
4bbc823b99d83e9a5827c95d254e259a80d4e606
|
refs/heads/master
| 2023-07-17T07:47:43.485745
| 2021-09-05T13:33:01
| 2021-09-05T13:33:01
| 383,558,618
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
ASGI config for bloodbank_proj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bloodbank_proj.settings')
application = get_asgi_application()
|
[
"tusharsarkar97@gmail.com"
] |
tusharsarkar97@gmail.com
|
80d8ce6609110537d0ace4a9b6e1e4ac37926939
|
b6ed92f8e38b2464dfbf045bf20c2ffc1cc37438
|
/setup.py
|
6274ecafe705f8205edfcbba9cdd003210420a41
|
[
"Apache-2.0"
] |
permissive
|
arbedout/peekaboo
|
33794b28bfc52e3271fe0510b31dcac17f3ab7d5
|
9199a6803a91bc7c185449df3ca0276d3d8854ea
|
refs/heads/master
| 2021-01-12T22:09:10.595941
| 2015-03-04T10:49:08
| 2015-03-04T10:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Setup script for peekaboo
'''
from setuptools import setup, find_packages
import sys, os, glob
with open('requirements.txt') as f:
requires = f.read().splitlines()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
]
setup(
name = 'peekaboo',
version = '0.7-1',
description = 'Expose hardware info through HTTP',
long_description = open("README.rst").read(),
author = 'Michael Persson',
author_email = 'michael.ake.persson@gmail.com',
url = 'https://github.com/mickep76/peekaboo.git',
license = 'Apache License, Version 2.0',
packages = find_packages(),
classifiers = CLASSIFIERS,
scripts = ['scripts/peekaboo'],
data_files = [('/etc', ['etc/peekaboo.conf']),
('/var/lib/peekaboo/plugins/info', glob.glob('plugins/info/*.py')),
('/var/lib/peekaboo/plugins/status', glob.glob('plugins/status/*.py')),
('/usr/share/peekaboo', ['LICENSE', 'README.rst']),
('/usr/share/peekaboo/contrib', glob.glob('contrib/*'))],
install_requires = requires,
)
|
[
"michael.ake.persson@gmail.com"
] |
michael.ake.persson@gmail.com
|
ff74747105a771c51a28a917d9ce7ebfb5ce40de
|
c062847c2d2d7e1f0a15128b716b496425db84d2
|
/ex7/nation_mood.py
|
b168f24ce125991ea52a721b99afa3cf46ead0bb
|
[] |
no_license
|
aviadlevy/Intro2cs-huji-2014
|
3dae8c15d5292532e1d4cf58d3b655d98ccbe26e
|
d17b71ec5b98961fb536095926112ad1c926ecc9
|
refs/heads/master
| 2021-01-21T00:17:00.168468
| 2015-06-20T21:55:43
| 2015-06-20T21:55:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,894
|
py
|
from data import load_tweets
def most_talkative_state(tweets,find_state):
"""Return the state that has the largest number of tweets containing term.
"""
state = []
#create a list of all tweets locations
for index in range(len(tweets)):
state.append(find_state(tweets[index]))
if len(state) == 0:
return
#return the state the appear the most on the list we built using key in
# max (the max value relevant to the count)
return max(state, key = state.count)
def average_sentiments(tweets_by_state,word_sentiments):
"""Calculate the average sentiment of the states by averaging over all
the tweets from each state. Return the result as a dictionary from state
names to average sentiment values (numbers).
If a state has no tweets with sentiment values, leave it out of the
dictionary entirely. Do NOT include states with no tweets, or with tweets
that have no sentiment, as 0. 0 represents neutral sentiment, not unknown
sentiment.
tweets_by_state -- A dictionary from state names to lists of tweets
"""
sentimentDict = {}
for state,tweets in tweets_by_state.items():
summarizeSentiment = 0
numOfCountedTweets = 0
for index in range(len(tweets)):
#if we got value from 'get_sentiment' func, sum it and add 1 to
# the counter (that count how many tweets we summarized
if tweets[index].get_sentiment(word_sentiments) != None:
summarizeSentiment += \
tweets[index].get_sentiment(word_sentiments)
numOfCountedTweets += 1
#if we summarized, update the dictionary with value - state,
# key - average
if numOfCountedTweets != 0:
sentimentDict.update({state : summarizeSentiment
/ numOfCountedTweets})
return sentimentDict
def group_tweets_by_hour(tweets):
"""Return a list of lists of tweets that are gouped by the hour
they were posted.
The indexes of the returned list represent the hour when they were posted
- the integers 0 through 23.
tweets_by_hour[i] is the list of all
tweets that were posted between hour i and hour i + 1. Hour 0 refers to
midnight, while hour 23 refers to 11:00PM.
To get started, read the Python Library documentation for datetime
objects:
http://docs.python.org/py3k/library/datetime.html#datetime.datetime
tweets -- A list of tweets to be grouped
"""
llistOfLists = [[] for hour in range(24)] #create list of 24 lists inside
for index in range(len(tweets)):
time = tweets[index].get_time()
#enter tweet in the hour it posted (using .hour that represent the
# hour of 'datetime' class from 0 to 23 (just like list index))
llistOfLists[time.hour].append(tweets[index])
return llistOfLists
|
[
"aviadlevy1@gmail.com"
] |
aviadlevy1@gmail.com
|
be2207f28e2b5bb0f8ffca81258406938785ee7f
|
e71658fa2936e20d2d1b11e198562fc7c935c975
|
/Dados Brutos/spider.py
|
dfea85e2ec863754c20ccd701db6b2ba65570222
|
[] |
no_license
|
visualizacao-ufpe/producao_academica
|
3cbe855fe3baed217f60410c18bfb51839f978ef
|
c465da65b0017afee1d4a89270a20118be8a86a5
|
refs/heads/master
| 2021-01-12T02:51:54.678738
| 2017-01-05T15:27:39
| 2017-01-05T15:27:39
| 78,122,066
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,478
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
import time
import json
class Author(object):
"""docstring for Author"""
def __init__(self, order, name, category):
self.order = order
self.name = name
self.category = category
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class Document(object):
def __init__(self, ies, program, year, name, type_, subtype_, authors, area, researchLine, researchProjet):
self.ies = ies
self.program = program
self.year = year
self.name = name
self.type_ = type_
self.subtype_ = subtype_
self.authors = authors
self.area = area
self.researchLine = researchLine
self.researchProjet = researchProjet
self.extension = None
def setExtension(self, DocExtension):
self.extension = DocExtension
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class DocExtension(object):
def __init(self):
self.info = "Doc Extension"
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class Periodico(DocExtension):
def __init__(self, issn, nature, volume, fasciculo, serie, n_finalPage, n_startPage, idiom, advertising, url, observation, editor, city, doi):
self.issn = issn
self.nature = nature
self.volume = volume
self.fasciculo = fasciculo
self.serie = serie
self.n_finalPage = n_finalPage
self.n_startPage = n_startPage
self.idiom = idiom
self.advertising = advertising
self.url = url
self.observation = observation
self.editor = editor
self.city = city
self.doi = doi
class Livro(DocExtension):
def __init__(self, nature, editor, city_country, page_count, isbn, url, observation, title, year_fst_pusblish, tiragem, reedition, reimpresion, advertising_mode, idiom, editor_type, funding, name_funding, editorial_conselor, distribution, parecer, indice, contribution_type, award, award_date, reference_work, institution, indication_date, content_nature, author_info, page_count_contribution, editorial_city):
self.nature = nature
self.editor = editor
self.city_country = city_country
self.page_count = page_count
self.isbn = isbn
self.url = url
self.observation = observation
self.title = title
self.year_fst_pusblish = year_fst_pusblish
self.tiragem = tiragem
self.reedition = reedition
self.reimpresion = reimpresion
self.advertising_mode = advertising_mode
self.idiom = idiom
self.editor_type = editor_type
self.funding = funding
self.name_funding = name_funding
self.editorial_conselor = editorial_conselor
self.distribution = distribution
self.parecer = parecer
self.indice = indice
self.contribution_type = contribution_type
self.award = award
self.award_date = award_date
self.reference_work = reference_work
self.institution = institution
self.indication_date = indication_date
self.content_nature = content_nature
self.author_info = author_info
self.page_count_contribution = page_count_contribution
self.editorial_city = editorial_city
class Anais(DocExtension):
def __init__(self, nature, title, volume, fasciculo, serie, n_finalPage, n_startPage, event_name, event_city, event_country, idiom, isbn_issn, advertising, url, observation, edition):
self.nature = nature
self.title = title
self.volume = volume
self.fasciculo = fasciculo
self.serie = serie
self.n_finalPage = n_finalPage
self.n_startPage = n_startPage
self.event_name = event_name
self.event_city = event_city
self.event_country = event_country
self.idiom = idiom
self.isbn_issn = isbn_issn
self.advertising = advertising
self.url = url
self.observation = observation
self.edition = edition
driver = webdriver.Chrome()
driver.get("https://sucupira.capes.gov.br/sucupira/public/consultas/coleta/producaoIntelectual/listaProducaoIntelectual.jsf")
assert "Plataforma Sucupira" in driver.title
universidade_in = driver.find_element_by_id("form:j_idt45:inst:input")
universidade_in.clear();
universidade_in.send_keys("UFPE")
time.sleep(3)
select = Select(driver.find_element_by_id("form:j_idt45:inst:listbox"))
select.select_by_visible_text("25001019 UNIVERSIDADE FEDERAL DE PERNAMBUCO (UFPE)")
time.sleep(2)
course = Select(driver.find_element_by_name("form:j_idt45:j_idt122"))
course.select_by_visible_text(u"CIÊNCIAS DA COMPUTAÇÃO (25001019004P6)")
doc_type = Select(driver.find_element_by_name("form:tipo"))
doc_type.select_by_visible_text(u"BIBLIOGRÁFICA")
documents_links = []
for x in xrange(2013,2017):
submit = driver.find_element_by_id("form:consultar")
year = driver.find_element_by_id("form:j_idt45:ano")
year.clear()
year.send_keys(x)
submit.click()
time.sleep(3)
#checking if there is results available
warning = ""
try:
warning = driver.find_element_by_class_name("aviso").text
except Exception as e:
pass
if(warning is not None and warning == u'N\xe3o existem dados cadastrados para a pesquisa realizada.'):
print "Sem resultados para o ano " + str(x)
else:
reg_year = int(driver.find_elements(By.XPATH, "//div[@id='form:j_idt88:div_paginacao']/ul/li")[0].text.split("de")[1].split(" ")[1])
for x in xrange(1,reg_year, 50):
links_page = len(driver.find_elements(By.XPATH, "//table/tbody/tr/td/a"))
default_handler = driver.window_handles[0]
links = driver.find_elements(By.XPATH, "//table/tbody/tr/td/a[@href]")
for link in links:
documents_links.append(link.get_attribute("href"))
if reg_year > 50:
driver.find_element_by_class_name("pag-proxima").click()
time.sleep(3)
time.sleep(3)
print "["
for link in documents_links:
driver.get(link)
time.sleep(1)
authors = driver.find_elements(By.XPATH, "//table[@class='listagem']")[0].find_elements_by_tag_name("tr")
authors_arr = []
for z in xrange(1,len(authors)):
data = authors[z].find_elements_by_tag_name("td")
authors_arr.append(Author(int(data[0].text), data[1].text, data[2].text))
ies = driver.find_element_by_id("formView:ies").text
program = driver.find_element_by_id("formView:programa").text
year = driver.find_element_by_id("formView:periodo").text
name = driver.find_element_by_id("formView:nome").text
type_ = driver.find_element_by_id("formView:tipo").text
subtype_ = driver.find_element_by_id("formView:subtipo").text
context = driver.find_elements(By.XPATH, "//div[@class='form-container']/div[@class='form-grid-3']")[-3:]
area = context[0].text
researchLine = context[1].text
researchProjet = context[2].text
doc = Document(ies, program, year, name, type_, subtype_, authors_arr, area, researchLine, researchProjet)
subdata = driver.find_elements(By.XPATH, "//table[@class='listagem']/tbody/tr/td/div/div[@class='form-grid-3']")
if subtype_ == u"ARTIGO EM PERIÓDICO":
doc.setExtension(Periodico(
subdata[0].text,
subdata[1].text,
subdata[2].text,
subdata[3].text,
subdata[4].text,
subdata[5].text,
subdata[6].text,
subdata[7].text,
subdata[8].text,
subdata[9].text,
subdata[10].text,
subdata[11].text,
subdata[12].text,
subdata[13].text
))
elif subtype_ == u"LIVRO":
doc.setExtension(Livro(
subdata[0].text,
subdata[1].text,
subdata[2].text,
subdata[3].text,
subdata[4].text,
subdata[5].text,
subdata[6].text,
subdata[7].text,
subdata[8].text,
subdata[9].text,
subdata[10].text,
subdata[11].text,
subdata[12].text,
subdata[13].text,
subdata[14].text,
subdata[15].text,
subdata[16].text,
subdata[17].text,
subdata[18].text,
subdata[19].text,
subdata[20].text,
subdata[21].text,
subdata[22].text,
subdata[23].text,
subdata[24].text,
subdata[25].text,
subdata[26].text,
subdata[27].text,
subdata[28].text,
subdata[29].text,
subdata[30].text
))
elif subtype_ == u"TRABALHO EM ANAIS":
periodico = Anais(
subdata[0].text,
subdata[1].text,
subdata[2].text,
subdata[3].text,
subdata[4].text,
subdata[5].text,
subdata[6].text,
subdata[7].text,
subdata[8].text,
subdata[9].text,
subdata[10].text,
subdata[11].text,
subdata[12].text,
subdata[13].text,
subdata[14].text,
subdata[15].text)
doc.setExtension(periodico)
else:
print "Novo subtipo" + subtype_
print doc.toJSON() + ","
print "]"
driver.close()
|
[
"nivan.ferreira@gmail.com"
] |
nivan.ferreira@gmail.com
|
3f64bff5e5477cdf4957e7b0c3668de6b97d9d85
|
46dccb38e665324156c1eb868e234fcb32a9d222
|
/Bianca/Exercitii_curs/Python-exercitii/Curs4/Parcurgere_liste.py
|
e39e44fba05753566e031a71e3fd93a5bae1c994
|
[] |
no_license
|
BiancavD/PEP19G04
|
dc654904e00958fdb2a16521d50447bc437cac03
|
2c14b9447c00ec18d7bd8c7c6754ae3fdca5fd8e
|
refs/heads/master
| 2020-08-28T15:58:26.013068
| 2019-12-13T13:44:45
| 2019-12-13T13:44:45
| 217,746,982
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
#a = [23, 34, 425, -32, 73, 6, 256, 9, 33, 45.7]
v=[23,11,42,53,48,25,16]
#print("lungimea vectorului este:", len(v[2:4]))
"""for x in v:
print(x)"""
#sa se afiseze elementele pare din vectorul v
"""for x in v:
if x % 2 == 0:
print(x)"""
#sa se afiseze elementele de pe pozitiile impare
"""for i in range(len(v)):
if i % 2 != 0:
print(v[i])"""
###Afisare din 2 in 2
""" a = [23, 34, 425, -32, 73, 6, 256, 9, 33, 45.7]
for i in range(0,len(a),2):
print(a[i])"""
####Afisare elemente fara 0
v = [23, 34, 425, -32, 73, 6, 0, 9, 33, 45.7]
for i in range(len(v)):
if v[i] == 0:
continue
print(v[i]) ####
### sa se opreasca cand ajunge la 0
v = [23, 34, 425, -32, 73, 6, 256, 9, 33, 45.7]
for i in range(len(v)):
if v[i]== 0:
break
print(v[i]) ###
#varianta mai eficienta
"""print()
for i in range(1, len(v), 2):
print(v[i])"""
|
[
"mihaita-cristian.manda@atos.net"
] |
mihaita-cristian.manda@atos.net
|
b9f0bdc682f5444b42f1881c2f9fd3147ee47dc5
|
bb04717f74c5cd6593083ce68815923256f8843a
|
/player/migrations/0001_initial.py
|
fe09bf5f4c1c90401b21dafa20261667abecc3aa
|
[] |
no_license
|
xujingao13/new_teamwork
|
77eb6941a3067dfc131ab34d580a7d8fb2b69517
|
447f0497c6a9e41a7b839ad3e424a9d5465d2c95
|
refs/heads/master
| 2021-01-13T02:11:16.987281
| 2015-12-30T15:24:00
| 2015-12-30T15:24:00
| 40,200,354
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import datetime
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ChessPlayer',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='', verbose_name='头像')),
('nick_name', models.CharField(max_length=256, verbose_name='游戏状态')),
('game_num', models.IntegerField(verbose_name='游戏次数', default=0)),
('game_grade', models.IntegerField(verbose_name='游戏积分', default=0)),
('game_state', models.CharField(max_length=128, verbose_name='游戏状态')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('publisher_id', models.IntegerField()),
('receiver_id', models.IntegerField()),
('type', models.CharField(max_length=128, verbose_name='信息类型')),
('content', models.CharField(max_length=400, verbose_name='信息内容')),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('user1_id', models.IntegerField()),
('user2_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('owner_id', models.IntegerField()),
('guest_id', models.IntegerField()),
('whose_turn', models.IntegerField()),
('game_state', models.CharField(max_length=128, verbose_name='游戏状态')),
('chess_board', models.CharField(max_length=450, verbose_name='棋盘状态')),
('last_chess_board', models.CharField(max_length=450, verbose_name='上一次棋盘状态')),
('last_steptime', models.DateTimeField(blank=True, default=datetime.datetime(2015, 8, 5, 20, 42, 37, 294029))),
('pausestart', models.DateTimeField(blank=True, default=datetime.datetime(2015, 8, 5, 20, 42, 37, 294029))),
],
),
migrations.CreateModel(
name='WatchingGame',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('room_id', models.IntegerField()),
],
),
]
|
[
"xujingao13@gmail.com"
] |
xujingao13@gmail.com
|
db2142bda638e20badb1ee6c14cf9429f34b6309
|
4590f24f3ac94e5c4669674ec2f23f15a523af57
|
/finalv2.py
|
a693d0552881b292652cad6b92c86fdc0d622f04
|
[] |
no_license
|
shaavi/Spectre_FYP
|
1b5fa707080981bc558bbe16859d0a9b3dd15184
|
63b7770f46e4c8510648d5d169098178614506f0
|
refs/heads/master
| 2020-06-11T17:06:22.789407
| 2020-03-02T12:23:43
| 2020-03-02T12:23:43
| 194,032,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[101]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage import io
from skimage import measure
#read image
image = cv2.imread('H:/FYP/interim/images/222.jpg',0)
#preprocess using median blur
img = cv2.medianBlur(image,5)
plt.imshow(img)
# In[102]:
#erosion using morphological operations
ret,th1 = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(th1, kernel, iterations = 4)
plt.imshow(erosion)
# In[103]:
# perform a connected component analysis on the eroded image, then initialize a mask to store only the "large" components
labels = measure.label(erosion, neighbors=8, background=0)
mask1 = np.zeros(erosion.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(erosion.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 30000:
mask1 = cv2.add(mask1, labelMask)
plt.imshow(mask1)
# In[104]:
mask = cv2.dilate(mask1,kernel,iterations = 6)
plt.imshow(mask)
# In[105]:
#extract the brain using the mask
img = io.imread("H:/FYP/interim/images/222.jpg")
mask2 = np.where((mask<200),0,1).astype('uint8')
brain_img = img*mask2[:,:,np.newaxis]
plt.imshow(brain_img)
# In[106]:
# Load image, grayscale, Otsu's threshold, and extract ROI
image = brain_img
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
x,y,w,h = cv2.boundingRect(thresh)
ROI = image[y:y+h, x:x+w]
plt.imshow(ROI)
# In[108]:
# Color segmentation on ROI
hsv = cv2.cvtColor(ROI, cv2.COLOR_BGR2HSV)
lower = np.array([0, 0, 125])
upper = np.array([179, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
plt.imshow(mask)
# In[109]:
plt.imshow(hsv)
# In[110]:
# Crop left and right half of mask
x, y, w, h = 0, 0, ROI.shape[1]//2, ROI.shape[0]
left = mask[y:y+h, x:x+w]
right = mask[y:y+h, x+w:x+w+w]
# Count pixels
left_pixels = cv2.countNonZero(left)
right_pixels = cv2.countNonZero(right)
print('Left pixels:', left_pixels)
print('Right pixels:', right_pixels)
# In[111]:
# Crop top and bottom half of mask
x, y, w, h = 0, 0, mask.shape[0], mask.shape[1]//2
bottom = mask[y+h:y+h+h, x:x+w]
top = mask[y:y+h, x:x+w]
# Count pixels
top_pixels = cv2.countNonZero(top)
bottom_pixels = cv2.countNonZero(bottom)
print('Top pixels:', top_pixels)
print('Bottom pixels:', bottom_pixels)
# In[112]:
if left_pixels > right_pixels:
print("Left")
else:
print("Right")
if top_pixels > bottom_pixels:
print("Top")
else:
print("Bottom")
# In[113]:
plt.imshow(left)
# In[114]:
plt.imshow(right)
# In[115]:
plt.imshow(top)
# In[116]:
plt.imshow(bottom)
# In[117]:
plt.imshow(mask)
# In[124]:
#img = cv2.imread('H:/FYP/interim/images/3322.jpg')
#img = mask
#gray_image = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# convert the grayscale image to binary image
ret,thresh = cv2.threshold(mask,127,255,0)
# calculate moments of binary image
M = cv2.moments(thresh)
I = cv2.moments(mask)
# calculate x,y coordinate of center of tumor
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
print('tumor center:', cX, cY)
# calculate x,y coordinate of center of image
iX = ROI.shape[1]//2
iY = ROI.shape[0]//2
print('image center:', iX, iY)
# put text and highlight the center
#center = cv2.circle(mask, (iX, iY), 5, (22, 100, 8), -1)
#cv2.putText(mask, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (22, 100, 8), 2)
# put text and highlight the center
center = cv2.circle(mask, (cX, cY), 5, (22, 100, 80), -1)
center = cv2.putText(center, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (22, 100, 8), 2)
center = cv2.circle(center, (iX, iY), 5, (22, 100, 8), -1)
center = cv2.putText(center, "centroid", (iX - 25, iY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (22, 100, 8), 2)
plt.imshow(center)
# In[120]:
#calculate distance of tumor from center
import math
p1 = [iX, iY]
p2 = [cX, cY]
distance = math.sqrt( ((p1[0]-p2[0])**2)+((p1[1]-p2[1])**2) )
print('distance = ', distance)
# In[87]:
print('Left pixels:', left_pixels)
print('Right pixels:', right_pixels)
print('Top pixels:', top_pixels)
print('Bottom pixels:', bottom_pixels)
print(" ")
print("Location of tumor:")
if left_pixels > right_pixels:
print(" Left")
else:
print(" Right")
if top_pixels > bottom_pixels:
print(" Top")
else:
print(" Bottom")
# In[ ]:
|
[
"shavindip@gmail.com"
] |
shavindip@gmail.com
|
522bd050c87ec2e3215a3c729553e1d611c0549a
|
824b582c2e0236e987a29b233308917fbdfc57a7
|
/sdk/python/pulumi_google_native/orgpolicy/v2/get_folder_policy.py
|
14f2f3278537c86a7f93b5c154315a900a2b904d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
24601/pulumi-google-native
|
ce8faf8455609a9572a8cbe0638c66427bf0ae7f
|
b219a14201c6c58eaa10caaeacbdaab528931adf
|
refs/heads/master
| 2023-08-23T05:48:31.819709
| 2021-10-08T18:50:44
| 2021-10-08T18:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,527
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFolderPolicyResult',
'AwaitableGetFolderPolicyResult',
'get_folder_policy',
'get_folder_policy_output',
]
@pulumi.output_type
class GetFolderPolicyResult:
def __init__(__self__, name=None, spec=None):
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if spec and not isinstance(spec, dict):
raise TypeError("Expected argument 'spec' to be a dict")
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def name(self) -> str:
"""
Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, "projects/123/policies/compute.disableSerialPortAccess". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def spec(self) -> 'outputs.GoogleCloudOrgpolicyV2PolicySpecResponse':
"""
Basic information about the Organization Policy.
"""
return pulumi.get(self, "spec")
class AwaitableGetFolderPolicyResult(GetFolderPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFolderPolicyResult(
name=self.name,
spec=self.spec)
def get_folder_policy(folder_id: Optional[str] = None,
policy_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFolderPolicyResult:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
__args__ = dict()
__args__['folderId'] = folder_id
__args__['policyId'] = policy_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:orgpolicy/v2:getFolderPolicy', __args__, opts=opts, typ=GetFolderPolicyResult).value
return AwaitableGetFolderPolicyResult(
name=__ret__.name,
spec=__ret__.spec)
@_utilities.lift_output_func(get_folder_policy)
def get_folder_policy_output(folder_id: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFolderPolicyResult]:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
...
|
[
"noreply@github.com"
] |
24601.noreply@github.com
|
86bf68a3fdb54d1cb09fca3faa9ef12d0f6fa966
|
ee53b0262007b2f0db0fe15b2ad85f65fafa4e25
|
/Leetcode/849. Maximize Distance to Closest Person.py
|
f36d9de210337b439e5c96e96c00caecda775ca7
|
[] |
no_license
|
xiaohuanlin/Algorithms
|
bd48caacb08295fc5756acdac609be78e143a760
|
157cbaeeff74130e5105e58a6b4cdf66403a8a6f
|
refs/heads/master
| 2023-08-09T05:18:06.221485
| 2023-08-08T11:53:15
| 2023-08-08T11:53:15
| 131,491,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
'''
In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty.
There is at least one empty seat, and at least one person sitting.
Alex wants to sit in the seat such that the distance between him and the closest person to him is maximized.
Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation:
If Alex sits in the second open seat (seats[2]), then the closest person has distance 2.
If Alex sits in any other open seat, the closest person has distance 1.
Thus, the maximum distance to the closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation:
If Alex sits in the last seat, the closest person is 3 seats away.
This is the maximum distance possible, so the answer is 3.
Note:
1 <= seats.length <= 20000
seats contains only 0s or 1s, at least one 0, and at least one 1.
'''
import unittest
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
i = 0
start = -1
max_dis = 0
while i < len(seats):
if seats[i] == 0:
if i == len(seats) - 1:
# print(i, start)
dis = i - start
else:
i += 1
continue
else:
if start == -1:
# print(i, start)
dis = i
start = i
else:
dis = (i - start) // 2
# print(mid, dis)
start = i
# print(dis, max_dis)
if dis > max_dis:
max_dis = dis
i += 1
return max_dis
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
([1,0,0,0,1,0,1], 2),
([1,0,0,0], 3),
([0,0,0,1], 3),
([0,1,0,0,0,0], 4),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().maxDistToClosest(first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
[
"derek.xiao@loftk.us"
] |
derek.xiao@loftk.us
|
63f6862c5fa020fc79e11cdb16aee06ddb1ff1a0
|
d5d35d20ec811cbaa792e681d559361cd7f38159
|
/challenge/DidacticVampireText.py
|
70cdffe7dc20b01345fe0e2f5d252051a8275136
|
[] |
no_license
|
markieboy/hacker.org
|
afe43f0b4213ec135f8b095bcc7b1a7a755581d8
|
da1689bdcc2fe91a81a30385680fd367f2d6e9cf
|
refs/heads/master
| 2021-06-21T12:07:21.503999
| 2017-08-11T08:38:05
| 2017-08-11T08:38:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
#!/usr/bin/env python3
# Q: http://www.hacker.org/challenge/chal.php?id=139
# A: http://www.hacker.org/challenge/chal.php?answer=sunshine&id=139&go=Submit
import re
import urllib.request
import hacker_org_util
PROBLEM_ID = '139'
def main():
source = urllib.request.urlopen(hacker_org_util.build_challenge_url(PROBLEM_ID)).read().decode()
m = re.search('<p>(.*)<p>', source, flags=re.DOTALL)
text = m.group(1)
print(''.join(re.findall(r'[A-Z]', text)))
if __name__ == '__main__':
main()
|
[
"charles.wangkai@gmail.com"
] |
charles.wangkai@gmail.com
|
1f282037ba707bdcb0c2fbd47ed08bb8e0e60104
|
5aa14c620a383d8429c144e5af46b0322c674439
|
/tests/python/Lut1DTransformTest.py
|
0b6073a7cd51dad23173cb33a42118d333820dbb
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
asdlei99/OpenColorIO
|
ae421f6c14870ffe735c73107b76f6746bd563ee
|
9b23e9623792d8cc6e6c1dfd5394335ee148bcf3
|
refs/heads/master
| 2023-03-13T16:14:19.693576
| 2021-03-03T03:11:10
| 2021-03-03T03:11:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,698
|
py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import logging
import unittest
logger = logging.getLogger(__name__)
try:
import numpy as np
except ImportError:
logger.warning(
"NumPy could not be imported. "
"Test case will lack significant coverage!"
)
np = None
import PyOpenColorIO as OCIO
class Lut1DTransformTest(unittest.TestCase):
def test_default_constructor(self):
"""
Test the default constructor.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_FORWARD)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [1, 1, 1])
def test_direction(self):
"""
Test the setDirection() and getDirection() methods.
"""
lut = OCIO.Lut1DTransform()
for direction in OCIO.TransformDirection.__members__.values():
lut.setDirection(direction)
self.assertEqual(lut.getDirection(), direction)
# Wrong type tests.
for invalid in (None, 1, 'test'):
with self.assertRaises(TypeError):
lut.setDirection(invalid)
def test_format_metadata(self):
"""
Test the getFormatMetadata() method.
"""
lut = OCIO.Lut1DTransform()
format_metadata = lut.getFormatMetadata()
self.assertIsInstance(format_metadata, OCIO.FormatMetadata)
self.assertEqual(format_metadata.getElementName(), 'ROOT')
self.assertEqual(format_metadata.getName(), '')
self.assertEqual(format_metadata.getID(), '')
format_metadata.setName('name')
format_metadata.setID('id')
self.assertEqual(format_metadata.getName(), 'name')
self.assertEqual(format_metadata.getID(), 'id')
def test_file_output_bit_depth(self):
"""
Test get/setFileOutputBitDepth.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
lut.setFileOutputBitDepth(OCIO.BIT_DEPTH_UINT10)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_hue_adjust(self):
"""
Test get/setHueAdjust.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
lut.setHueAdjust(OCIO.HUE_DW3)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
with self.assertRaises(OCIO.Exception):
lut.setHueAdjust(OCIO.HUE_WYPN)
def test_input_half_domain(self):
"""
Test get/getInputHalfDomain.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getInputHalfDomain())
lut.setInputHalfDomain(True)
self.assertTrue(lut.getInputHalfDomain())
def test_output_raw_halfs(self):
"""
Test get/setOutputRawHalfs.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getOutputRawHalfs())
lut.setOutputRawHalfs(True)
self.assertTrue(lut.getOutputRawHalfs())
def test_length(self):
"""
Test get/setLength.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
lut.setValue(0, 0.1, 0.2, 0.3)
lut.setLength(3)
self.assertEqual(lut.getLength(), 3)
# Changing the length reset LUT values to identity.
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
def test_constructor_with_keywords(self):
"""
Test Lut1DTransform constructor with keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(
length=65536,
inputHalfDomain=True,
outputRawHalfs=True,
fileOutputBitDepth=OCIO.BIT_DEPTH_UINT10,
hueAdjust=OCIO.HUE_DW3,
interpolation=OCIO.INTERP_BEST,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
lut = OCIO.Lut1DTransform(
length=4,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 4)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
def test_constructor_with_positional(self):
"""
Test Lut1DTransform constructor without keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(65536, True, True, OCIO.BIT_DEPTH_UINT10,
OCIO.HUE_DW3, OCIO.INTERP_BEST,
OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_array(self):
"""
Get & set Lut array values.
"""
lut = OCIO.Lut1DTransform(length=3)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [0.5, 0.5, 0.5])
r, g, b = lut.getValue(2)
self.assertEqual([r, g, b], [1, 1, 1])
lut.setValue(0, 0.1, 0.2, 0.3)
r, g, b = lut.getValue(0)
# Values are stored as float.
self.assertAlmostEqual(r, 0.1, delta=1e-6)
self.assertAlmostEqual(g, 0.2, delta=1e-6)
self.assertAlmostEqual(b, 0.3, delta=1e-6)
if not np:
logger.warning("NumPy not found. Skipping part of test!")
return
data = lut.getData()
expected = np.array([0.1, 0.2, 0.3,
0.5, 0.5, 0.5,
1., 1., 1.]).astype(np.float32)
self.assertEqual(data.all(), expected.all())
data[6] = 0.9
data[7] = 1.1
data[8] = 1.2
lut.setData(data)
r, g, b = lut.getValue(2)
self.assertAlmostEqual(r, 0.9, delta=1e-6)
self.assertAlmostEqual(g, 1.1, delta=1e-6)
self.assertAlmostEqual(b, 1.2, delta=1e-6)
def test_equals(self):
"""
Test equals.
"""
lut = OCIO.Lut1DTransform()
lut2 = OCIO.Lut1DTransform()
self.assertTrue(lut.equals(lut2))
lut.setValue(0, 0.1, 0.2, 0.3)
self.assertFalse(lut.equals(lut2))
|
[
"noreply@github.com"
] |
asdlei99.noreply@github.com
|
bf56c90a7fa0b42362e0679ec9a27b21f6edd258
|
7359acb9f96a7e4196ef1472cae543b17a45717a
|
/crawl2.py
|
b5bba826d394773673dac58a635b25978107026e
|
[] |
no_license
|
miyagilabs/revisionCrawler
|
e4b8240e6eda09f29b4a7e7bd5bebf85d4eab7f6
|
355648dcbd099d9f117343bdcf75abf2510d1771
|
refs/heads/master
| 2021-01-23T14:15:09.463020
| 2017-06-12T16:47:25
| 2017-06-12T16:47:25
| 93,249,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
import urllib2
# THIS IS WHAT IT IS DOING
# CHANGE => REVISION 1 COMMENTS =>
DEBUG = False
project_url = "https://gerrit.wikimedia.org/r"
change_id = "356858"
comments_url = project_url + "/changes/" + change_id + "/revisions/1/comments"
comments_content = urllib2.urlopen(comments_url).read()
# The response strangely has 4 characters ")]}'" that breaks JSON parsing.
if comments_content[:4] == ")]}'":
comments_content = comments_content[4:]
if DEBUG:
print comments_content
import json
comments_content_in_json = json.loads(comments_content)
print len(comments_content_in_json["SpamBlacklistHooks.php"])
for comment_map in comments_content_in_json["SpamBlacklistHooks.php"]:
print comment_map["author"]["name"] + "says: \"\n " + comment_map["message"]
print "\""
|
[
"cagdas.gerede@gmail.com"
] |
cagdas.gerede@gmail.com
|
3e5e9331b99206c659a89d95766e9583bd254a66
|
184dcfbb4a6ab18688ad14b337acbf733a8a9ee6
|
/Recommenders.py
|
f4c12ef35003be81962263ed2d2f5d66508f20f7
|
[] |
no_license
|
Viole-Grace/RedHat_2019
|
58aa46a739612db164353143c5c439db5597305b
|
9aed7adc4a31f0d129a5d53efd028f4fb4995982
|
refs/heads/master
| 2020-05-09T11:55:39.870829
| 2019-04-13T02:07:30
| 2019-04-13T02:07:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
import numpy as np
import pandas
class popularity_recommender_py():
def __init__(self):
self.train_data = None
self.user_id = None
self.item_id = None
self.popularity_recommendations = None
def create(self, train_data, user_id, item_id):
self.train_data = train_data
self.user_id = user_id
self.item_id = item_id
train_data_grouped = train_data.groupby([self.item_id]).agg({self.user_id: 'count'}).reset_index()
train_data_grouped.rename(columns = {'user_id': 'score'},inplace=True)
train_data_sort = train_data_grouped.sort_values(['score', self.item_id], ascending = [0,1])
train_data_sort['Rank'] = train_data_sort['score'].rank(ascending=0, method='first')
self.popularity_recommendations = train_data_sort.head(10)
def recommend(self, user_id):
user_recommendations = self.popularity_recommendations
user_recommendations['user_id'] = user_id
cols = user_recommendations.columns.tolist()
cols = cols[-1:] + cols[:-1]
user_recommendations = user_recommendations[cols]
return user_recommendations
class item_similarity_recommender_py():
def __init__(self):
self.train_data = None
self.user_id = None
self.item_id = None
self.cooccurence_matrix = None
self.songs_dict = None
self.rev_songs_dict = None
self.item_similarity_recommendations = None
def get_user_items(self, user):
user_data = self.train_data[self.train_data[self.user_id] == user]
user_items = list(user_data[self.item_id].unique())
return user_items
def get_item_users(self, item):
item_data = self.train_data[self.train_data[self.item_id] == item]
item_users = set(item_data[self.user_id].unique())
return item_users
def get_all_items_train_data(self):
all_items = list(self.train_data[self.item_id].unique())
return all_items
def construct_cooccurence_matrix(self, user_songs, all_songs):
user_songs_users = []
for i in range(0, len(user_songs)):
user_songs_users.append(self.get_item_users(user_songs[i]))
cooccurence_matrix = np.matrix(np.zeros(shape=(len(user_songs), len(all_songs))), float)
for i in range(0,len(all_songs)):
songs_i_data = self.train_data[self.train_data[self.item_id] == all_songs[i]]
users_i = set(songs_i_data[self.user_id].unique())
for j in range(0,len(user_songs)):
users_j = user_songs_users[j]
users_intersection = users_i.intersection(users_j)
#Calculate cooccurence_matrix[i,j] as Jaccard Index
if len(users_intersection) != 0:
users_union = users_i.union(users_j)
cooccurence_matrix[j,i] = float(len(users_intersection))/float(len(users_union))
else:
cooccurence_matrix[j,i] = 0
return cooccurence_matrix
def generate_top_recommendations(self, user, cooccurence_matrix, all_songs, user_songs):
print("Non zero values in cooccurence_matrix :%d" % np.count_nonzero(cooccurence_matrix))
user_sim_scores = cooccurence_matrix.sum(axis=0)/float(cooccurence_matrix.shape[0])
user_sim_scores = np.array(user_sim_scores)[0].tolist()
sort_index = sorted(((e,i) for i,e in enumerate(list(user_sim_scores))), reverse=True)
columns = ['user_id', 'song', 'score', 'rank']
df = pandas.DataFrame(columns=columns)
rank = 1
for i in range(0,len(sort_index)):
if ~np.isnan(sort_index[i][0]) and all_songs[sort_index[i][1]] not in user_songs and rank <= 10:
df.loc[len(df)]=[user,all_songs[sort_index[i][1]],sort_index[i][0],rank]
rank = rank+1
if df.shape[0] == 0:
print("The current user has no songs for training the item similarity based recommendation model.")
return -1
else:
return df
def create(self, train_data, user_id, item_id):
self.train_data = train_data
self.user_id = user_id
self.item_id = item_id
#make recommendations
def recommend(self, user):
user_songs = self.get_user_items(user)
print("No. of unique songs for the user: %d" % len(user_songs))
all_songs = self.get_all_items_train_data()
print("no. of unique songs in the training set: %d" % len(all_songs))
cooccurence_matrix = self.construct_cooccurence_matrix(user_songs, all_songs)
df_recommendations = self.generate_top_recommendations(user, cooccurence_matrix, all_songs, user_songs)
return df_recommendations
def get_similar_items(self, item_list):
user_songs = item_list
all_songs = self.get_all_items_train_data()
print("no. of unique songs in the training set: %d" % len(all_songs))
cooccurence_matrix = self.construct_cooccurence_matrix(user_songs, all_songs)
user = ""
df_recommendations = self.generate_top_recommendations(user, cooccurence_matrix, all_songs, user_songs)
return df_recommendations
|
[
"noreply@github.com"
] |
Viole-Grace.noreply@github.com
|
efcdcd2e3b9ad9c39bfda4e7240953fca1add4af
|
01ae99d512962035059117782170c85f87060dd9
|
/packages/package.py
|
6b12ccbe6f345ab5fbd9a5be62714e593a5bc550
|
[] |
no_license
|
JkbLskw/chocolateyUpgradeUtilityCore
|
711bc9697b5a639c77de5902323b3a11e89ab0ae
|
83496b9c9f2e3c560694a38acdec7a6f9f8621c8
|
refs/heads/master
| 2022-10-18T01:19:57.829411
| 2022-10-16T22:18:38
| 2022-10-16T22:18:38
| 180,997,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
class Package(object):
def __init__(self):
self.name = None
self.type = None
self.extern = None
self.chocolatey = None
|
[
"5z4hsex7Yc3f4XOkXRrP"
] |
5z4hsex7Yc3f4XOkXRrP
|
8defd0b690371cf2651951fb928ae9eb9d6912f7
|
c8488d1538537c1b9a0aca65e9d5fc22ad0db37f
|
/engine/error/ecs.py
|
ed34a529e9f5ee833d3f64f7b2e9895870374308
|
[
"MIT"
] |
permissive
|
TWoolhouse/Libraries
|
67e5a235153acf80a71cb938b7c83de242c07860
|
5cf6b3a80d80a5cc69bea5431a29731dd6b39d76
|
refs/heads/master
| 2022-04-30T11:13:21.142631
| 2022-03-29T23:20:13
| 2022-03-29T23:20:13
| 157,422,268
| 1
| 0
|
MIT
| 2021-01-05T16:34:13
| 2018-11-13T17:50:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
from .base import ECSError
__all__ = ["ComponentTypeError", "EntityLimitError", "GetComponentError", "InitializeComponent", "TerminateComponent"]
class ComponentTypeError(ECSError):
def __init__(self, component, expected=None):
self.component = component
self.expected = expected
def __str__(self) -> str:
if self.expected is None:
return f"{self.component} is not the correct type"
return f"Expected: '{self.expected}' Got {self.component}"
class EntityLimitError(ECSError):
def __init__(self, world, limit):
self.world = world
self.limit = limit
def __str__(self) -> str:
return "Entity Limit: {} reached in World: {}".format(self.limit, self.world)
class GetComponentError(ECSError):
def __init__(self, entity, component):
self.entity = entity
self.component = component
def __str__(self) -> str:
return "{}<{}> Does not Exist".format(self.entity, self.component.__name__)
class InitializeComponent(ECSError):
def __init__(self, entity, component, value):
self.entity = entity
self.component = component
self.value = value
def __str__(self) -> str:
return "{}<{}> Failed to Initialize with '{}'".format(self.entity, self.component.__class__.__name__, self.value)
class TerminateComponent(ECSError):
def __init__(self, entity, component, value):
self.entity = entity
self.component = component
self.value = value
def __str__(self) -> str:
return "{}<{}> Failed to Terminate with '{}'".format(self.entity, self.component.__class__.__name__, self.value)
class ParentError(ECSError):
def __init__(self, entity):
self.entity = entity
def __str__(self) -> str:
return "{} Has no parent"
|
[
"thomasrwoolhouse@gmail.com"
] |
thomasrwoolhouse@gmail.com
|
746f538f4f59613057ed9e33923e1a08e11e714b
|
1524720d6480ad0a51b6fd8ff709587455bf4c5d
|
/tums/trunk/lite/nevow/scripts/consolejstest.py
|
0a952bcc2cf4f3a02ccf1aa2154442b69701dc9e
|
[] |
no_license
|
calston/tums
|
2bd6d3cac5232d2ccb7e9becfc649e302a310eab
|
b93e3e957ff1da5b020075574942913c8822d12a
|
refs/heads/master
| 2020-07-12T03:46:43.639800
| 2018-05-12T10:54:54
| 2018-05-12T10:54:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
# Copyright (c) 2006 Divmod.
# See LICENSE for details.
"""
Out-of-browser conversion of javascript test modules that use Athena's "//
import" syntax into monolithic scripts suitable for feeding into a plain
javascript interpreter
"""
from sys import argv
from twisted.python.util import sibpath
import nevow, subprocess
_DUMMY_MODULE_NAME = 'ConsoleJSTest'
def getDependencies(fname, ignore=('Divmod.Runtime', 'MochiKit.DOM'),
bootstrap=nevow.athena.LivePage.BOOTSTRAP_MODULES,
packages=None):
"""
Get the javascript modules that the code in the file with name C{fname}
depends on, recursively
@param fname: javascript source file name
@type fname: C{str}
@param ignore: names of javascript modules to exclude from dependency list
@type ignore: sequence
@param boostrap: names of javascript modules to always include, regardless
of explicit dependencies (defaults to L{nevow.athena.LivePage}'s list of
bootstrap modules)
@type boostrap: sequence
@param packages: all javascript packages we know about. defaults to the
result of L{nevow.athena.allJavascriptPackages}
@type packages: C{dict}
@return: modules included by javascript in file named C{fname}
@rtype: dependency-ordered list of L{nevow.athena.JSModule} instances
"""
if packages is None:
packages = nevow.athena.allJavascriptPackages()
packages[_DUMMY_MODULE_NAME] = fname
# TODO if a module is ignored, we should ignore its dependencies
return ([nevow.athena.JSModule.getOrCreate(m, packages)
for m in bootstrap if m not in ignore] +
[dep for dep in nevow.athena.JSModule(
_DUMMY_MODULE_NAME, packages).allDependencies()
if dep.name not in bootstrap
and dep.name != _DUMMY_MODULE_NAME
and dep.name not in ignore])
def generateTestScript(fname, after={'Divmod.Base': ('Divmod.Base.addLoadEvent = function() {};',)},
dependencies=None):
"""
Turn the contents of the Athena-style javascript test module in the file
named C{fname} into a plain javascript script. Recursively includes any
modules that are depended on, as well as the utility module
nevow/test/testsupport.js.
@param fname: javascript source file name
@type fname: C{str}
@param after: mapping of javascript module names to sequences of lines of
javascript source that should be injected into the output immediately
after the source of the named module is included
@type after: C{dict}
@param dependencies: the modules the script depends on. Defaults to the
result of L{getDependencies}
@type dependencies: dependency-ordered list of L{nevow.athena.JSModule}
instances
@return: converted javascript source text
@rtype: C{str}
"""
if dependencies is None:
dependencies= getDependencies(fname)
load = lambda fname: 'load(%r);' % (fname,)
initialized = set()
js = [load(sibpath(nevow.__file__, 'test/testsupport.js'))]
for m in dependencies:
segments = m.name.split('.')
if segments[-1] == '__init__':
segments = segments[:-1]
initname = '.'.join(segments)
if initname not in initialized:
initialized.add(initname)
if '.' in initname:
prefix = ''
else:
prefix = 'var '
js.append('%s%s = {};' % (prefix, initname))
js.append(load(m.mapping[m.name]))
if m.name in after:
js.extend(after[m.name])
js.append(file(fname).read())
return '\n'.join(js)
def run():
"""
Read a single filename from the command line arguments, replace any module
imports with the body of the module in question and pipe the result to the
spidermonkey javascript interpreter
"""
# TODO: support more than one filename at a time
js = generateTestScript(argv[1])
subprocess.Popen('/usr/bin/smjs', stdin=subprocess.PIPE).communicate(js)
|
[
"junwin@gmail.com"
] |
junwin@gmail.com
|
614adee46ed6432a0f1bc2c1bc7d3dff42797991
|
c10633b49f927dd5c0dad517a1cd2bf60e59e501
|
/multi.py
|
cf4a74b7102c2ec2f08848e0d01bd930b0470266
|
[
"MIT"
] |
permissive
|
mohabedalgani/GFSA
|
d011e45be3f4f3adc99f568c36cf64d25a5f0e49
|
30a9ebad3d6b7f4f275ab2ea4b8509f64ab7ff40
|
refs/heads/master
| 2022-12-30T02:49:44.440909
| 2020-10-20T14:19:02
| 2020-10-20T14:19:02
| 292,627,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
'''
This file is part of GFLIB toolbox
First Version Sept. 2018
Cite this project as:
Mezher M., Abbod M. (2011) Genetic Folding: A New Class of Evolutionary Algorithms.
In: Bramer M., Petridis M., Hopgood A. (eds) Research and Development in Intelligent Systems XXVII.
SGAI 2010. Springer, London
Copyright (C) 20011-2018 Mohd A. Mezher (mohabedalgani@gmail.com)
'''
from warnings import filterwarnings
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
# create folder for graphs generated during the run
if not os.path.exists('images/'):
os.makedirs('images/')
else:
files = glob.glob('images/*')
for f in files:
os.remove(f)
from inipop import inipop
from genpop import genpop
from tipicalsvm import typicalsvm
filterwarnings('ignore')
print('Running multi classification ...\n\n')
print('Type the maximum length of the chromosome: ')
max_chromosome_length = int(input()) # the maximum total length of the chromosome
DATA_PATH = 'data/multi/' # Dataset path for binary classification
params = dict()
params['type'] = 'multi' # problem type
params['data'] = 'wine_scale.txt' # path to data file
params['kernel'] = 'rbf' # rbf,linear,polynomial,gf
params['mutProb'] = 0.1 # mutation probability
params['crossProb'] = 0.5 # crossover probability
params['maxGen'] = 5 # max generation
params['popSize'] = 10 # population size
params['crossVal'] = 5 # number of cross validation slits
params['opList'] = ['Plus_s', 'Minus_s', 'Plus_v', 'Minus_v',
'Sine', 'Cosine', 'Tanh', 'Log', 'x', 'y'] # Operators and operands
print(f'''Data Set : {DATA_PATH + params['data']}\n\n''')
kernels = ['poly', 'rbf', 'linear', 'gf']
totalMSE = dict()
for ker in kernels:
totalMSE[ker] = list()
for i in range(5):
temp = []
for index, kernel in enumerate(kernels):
params['kernel'] = kernel
print(f'''SVM Kernel : {params['kernel']} \n''')
if kernel == 'gf':
print(f'''Max Generation : {params['maxGen']}\n''')
print(f'''Population Size : {params['popSize']}\n''')
print(f'''CrossOver Probability : {params['crossProb']}\n''')
print(f'''Mutation Probability : {params['mutProb']}\n\n''')
pop = inipop(params, max_chromosome_length)
mse = genpop(pop, params, i)
else:
mse = typicalsvm(params)
totalMSE[kernel].append(mse)
print('\n')
# Boxplot of errors for each kernel
plt.boxplot([totalMSE['poly'], totalMSE['rbf'], totalMSE['linear'], totalMSE['gf']])
plt.xticks(np.arange(1,5), kernels)
plt.title('MSE for each svm kernel')
plt.xlabel('SVM kernel')
plt.ylabel('Test Error Rate')
plt.ioff()
plt.savefig('images/mse.png')
plt.show()
|
[
"noreply@github.com"
] |
mohabedalgani.noreply@github.com
|
819b1e3786aa43a656e60a23ca7029b7e0c4009f
|
5c1aa426d983ff26ff0135d506ea7e40d2ef0d0d
|
/Portfolio/main/urls.py
|
20219270ab13dae1b1bbae51c747a3b2f5a1b608
|
[
"MIT"
] |
permissive
|
kprakhar27/Django-course
|
2a070c31ee821efb08113a89c3f048d7b1d1e207
|
3643a98ab2fe78b533e549ad05f1f7eededaaeff
|
refs/heads/main
| 2023-05-26T05:17:42.504828
| 2021-05-31T19:37:47
| 2021-05-31T19:37:47
| 354,726,285
| 2
| 0
| null | 2021-05-30T20:37:02
| 2021-04-05T05:12:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 228
|
py
|
from django.urls import path
from main import views
urlpatterns = [
path('projects/', views.projects, name='projects'),
path('languages/', views.languages, name='languages'),
path('', views.index, name='index'),
]
|
[
"kumarprkhr@gmail.com"
] |
kumarprkhr@gmail.com
|
735d952b9b73db8a38c1a772c6a5c61bceced913
|
e1dd6d9dccb822d472b7f4f9e8446dd9202eb5a1
|
/sdk/test/test_io_k8s_api_rbac_v1alpha1_cluster_role_list.py
|
f7e76f339f1516a69e8e00215b9d2dd97d478213
|
[] |
no_license
|
swiftdiaries/argo_client
|
8af73e8df6a28f9ea5f938b5894ab8b7825e4cc2
|
b93758a22d890cb33cbd81934042cfc3c12169c7
|
refs/heads/master
| 2020-05-17T12:11:57.556216
| 2019-07-24T23:23:33
| 2019-07-24T23:23:33
| 183,701,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
# coding: utf-8
"""
Argo API Client
Generated python client for the Argo Workflows # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo.sdk
from models.io_k8s_api_rbac_v1alpha1_cluster_role_list import IoK8sApiRbacV1alpha1ClusterRoleList # noqa: E501
from argo.sdk.rest import ApiException
class TestIoK8sApiRbacV1alpha1ClusterRoleList(unittest.TestCase):
"""IoK8sApiRbacV1alpha1ClusterRoleList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiRbacV1alpha1ClusterRoleList(self):
"""Test IoK8sApiRbacV1alpha1ClusterRoleList"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.sdk.models.io_k8s_api_rbac_v1alpha1_cluster_role_list.IoK8sApiRbacV1alpha1ClusterRoleList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"adhita94@gmail.com"
] |
adhita94@gmail.com
|
0ef7ffea33b2244997b5c41254c53c90db1083aa
|
ee8cb974f12977894f7f0fda5b8129570224618b
|
/gim/core/migrations/0013_auto__add_field_issueevent_related_content_type__add_field_issueevent_.py
|
43503150f2cbcde634778bdb46e6ccbccd7ff624
|
[] |
no_license
|
derekey/github-issues-manager
|
996b3c7b9acd0362b7d99948d45a15ea05d58cc2
|
63a405b993e77f10b9c2b6d9790aae7576d9d84f
|
refs/heads/develop
| 2021-01-21T01:03:01.739800
| 2014-11-09T21:26:49
| 2014-11-09T21:26:49
| 42,234,954
| 1
| 0
| null | 2015-09-10T09:22:40
| 2015-09-10T09:22:39
| null |
UTF-8
|
Python
| false
| false
| 27,500
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IssueEvent.related_content_type'
db.add_column(u'core_issueevent', 'related_content_type',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True),
keep_default=False)
# Adding field 'IssueEvent.related_object_id'
db.add_column(u'core_issueevent', 'related_object_id',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IssueEvent.related_content_type'
db.delete_column(u'core_issueevent', 'related_content_type_id')
# Deleting field 'IssueEvent.related_object_id'
db.delete_column(u'core_issueevent', 'related_object_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.commit': {
'Meta': {'ordering': "('committed_at',)", 'object_name': 'Commit'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits_authored'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'author_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'authored_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'committed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'committer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits__commited'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'committer_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'committer_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents_rel_+'", 'to': u"orm['core.Commit']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commits'", 'to': u"orm['core.Repository']"}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'tree': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'core.githubuser': {
'Meta': {'ordering': "('username',)", 'object_name': 'GithubUser'},
'_available_repositories': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'available_repositories_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_organization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organizations_rel_+'", 'to': u"orm['core.GithubUser']"}),
'organizations_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organizations_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'core.issue': {
'Meta': {'unique_together': "(('repository', 'number'),)", 'object_name': 'Issue'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'base_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'base_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'closed_by_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'commits': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Commit']"}),
'commits_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'commits_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_pr_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'head_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'head_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_pull_request': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Label']"}),
'mergeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merged_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'merged_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_prs'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues'", 'null': 'True', 'to': u"orm['core.Milestone']"}),
'nb_additions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_changed_files': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_commits': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_deletions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'pr_comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'pr_fetched_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_issues'", 'to': u"orm['core.GithubUser']"})
},
u'core.issuecomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'IssueComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.issueevent': {
'Meta': {'ordering': "('created_at', 'github_id')", 'object_name': 'IssueEvent'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['core.Issue']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues_events'", 'to': u"orm['core.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues_events'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.label': {
'Meta': {'ordering': "('label_type', 'order', 'typed_name')", 'unique_together': "(('repository', 'name'),)", 'object_name': 'Label', 'index_together': "(('repository', 'label_type', 'order'),)"},
'api_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['core.LabelType']"}),
'name': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'labels'", 'to': u"orm['core.Repository']"}),
'typed_name': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.labeltype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('repository', 'name'),)", 'object_name': 'LabelType'},
'edit_details': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'regex': ('django.db.models.fields.TextField', [], {}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_types'", 'to': u"orm['core.Repository']"})
},
u'core.milestone': {
'Meta': {'ordering': "('number',)", 'unique_together': "(('repository', 'number'),)", 'object_name': 'Milestone'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.GithubUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.pullrequestcomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'PullRequestComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'entry_point': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.PullRequestCommentEntryPoint']"}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.pullrequestcommententrypoint': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'PullRequestCommentEntryPoint'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'diff_hunk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Issue']"}),
'original_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'original_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pr_comments_entry_points'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.repository': {
'Meta': {'ordering': "('owner', 'name')", 'unique_together': "(('owner', 'name'),)", 'object_name': 'Repository'},
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'repositories'", 'symmetrical': 'False', 'to': u"orm['core.GithubUser']"}),
'collaborators_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'collaborators_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'has_issues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_fork': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issues_events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'milestones_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_repositories'", 'to': u"orm['core.GithubUser']"}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prs_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'prs_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'prs_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
|
[
"s.angel@twidi.com"
] |
s.angel@twidi.com
|
6d23fa4b5b3ab1a873dca811f647611e1b4bb2c3
|
e40945130b3819a0a3c6ad3e66f0f8de6cba8a62
|
/xym/order/order_test.py
|
3c3d31eab542857ad86d2da543821a7778a3831f
|
[] |
no_license
|
jinge324/test002
|
1d778e9f15d48eba19947576acc6e42994c35d93
|
5d4fac1e770d5aae41aa7ba1be8650195d8cc09c
|
refs/heads/fenzhi
| 2022-09-10T18:17:38.005875
| 2020-05-26T02:52:56
| 2020-05-26T02:52:56
| 262,921,532
| 0
| 0
| null | 2020-05-26T02:52:58
| 2020-05-11T02:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 805
|
py
|
import json
import xym.request.myrequests
import xym.test_cases.test_fee_data.fee_one_data
class order_qw():
def iop(self):
run = xym.request.myrequests.RunMain
headers = xym.test_cases.test_fee_data.fee_one_data.headers
payurl = 'https://api.imways.com/mall/electricity/test_cases/prepay?feePaymentMode=CASH&isCardCharged=1&storeId=8606°ree=&amount=1'
paydata = {
'entityId': 25,
'rechargeInfo': {
'assetRechargeMode': "CASH",
'posInfo': {'enablePoundage': 'true'},
'remark': 'iowefjio'
},
'remark': 'jieofjwaoi'
}
payfee = run.run_main(payurl, 'POST', headers, json.dumps(paydata))
print(payfee)
if __name__ == "__main__":
order_qw.iop()
|
[
"iop"
] |
iop
|
893136904401af906e7bdbcf75c63539d98f9364
|
5cb7b9fe09b1dd20c0664d0c86c375ffe353903c
|
/static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_shlex.py
|
ba0f3d1fcae7670d0a08cc51cf4cc0b57557c939
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
shiblon/pytour
|
6d0ee4a679cf7e6ffd8ac6326b8bb0d9071a7c73
|
71a181ec16fd38b0af62f55e28a50e91790733b9
|
refs/heads/master
| 2021-01-17T10:09:18.822575
| 2020-09-23T20:05:58
| 2020-09-23T20:05:58
| 23,226,350
| 2
| 3
|
Apache-2.0
| 2020-02-17T22:36:02
| 2014-08-22T13:33:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,315
|
py
|
# -*- coding: utf-8 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
|
[
"shiblon@gmail.com"
] |
shiblon@gmail.com
|
24ab3b6bc9948f727288670a20148e9be24cf5a9
|
22dfd0857377a1b8747cd1481d2c3c9e5dbfd0e4
|
/edgedetect.py
|
b97065484be499d7e7c1ec4c82e3ba6cfdbeb2c2
|
[] |
no_license
|
msbomrel/cheque
|
d60a7ff26df1d523d4e7a63eaa95d145857a57f6
|
94d8dc383f7385063513e246d992685660e17b2b
|
refs/heads/master
| 2021-01-02T22:53:52.128668
| 2017-09-15T01:39:49
| 2017-09-15T01:39:49
| 99,415,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
# import the necessary packages
from Services.transform import four_point_transform
import cv2
import glob
from os.path import join, dirname, realpath
UPLOAD_FOLDER = join(dirname(realpath(__file__)),'static/images/')
def detect():
print "I am called"
files = glob.glob(UPLOAD_FOLDER +'*')
for f in files:
print f
image = cv2.imread(f)
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 100, 100)
print "I am here in loop"
# show the original image and the edge detected image
_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
print peri
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
print len(approx)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the piece of paper
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 1)
warped = four_point_transform(orig, screenCnt.reshape(4, 2))
cropped = cv2.resize(warped, (1500,750), interpolation=cv2.INTER_LINEAR)
# cv2.imshow('', warped)
# cv2.waitKey(0)
s = UPLOAD_FOLDER +'firstImage'+'.png'
cv2.imwrite(s, cropped)
return 'OKay'
|
[
"mohan.bomrel@deerwalk.edu.np"
] |
mohan.bomrel@deerwalk.edu.np
|
28559c924e5a546d830cba07e2a3cf237ab9c05e
|
dc40f134d3713c0a4951588182bbe64b580761ef
|
/web/telebble/fetch.py
|
56f38b729dd8286332285600c1d7d7ee69216440
|
[
"MIT"
] |
permissive
|
hkpeprah/television-2.0
|
79eaa553dc1246fc9fb468fd14830ef7fdccc3b7
|
8d6ce9cc7c8e22447769845159464422153cb8f6
|
refs/heads/master
| 2021-01-15T15:34:02.739470
| 2015-10-18T00:02:44
| 2015-10-18T00:05:15
| 38,463,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,586
|
py
|
import datetime
import constants
import models
import sources
import utils
def fetch_from_funimation():
data = sources.funimation.list_latest_episodes()
series = data.get('series', [])
episodes = data.get('episodes', [])
source_type = sources.FUNIMATION_SOURCE
network = models.Network.objects(source_type=source_type).first()
if network is None:
network = models.Network()
network._type = 'Subscription'
network.name = 'Funimation'
network.description = 'You Should be Watching'
network.country = 'US'
network.source_type = source_type
network.save()
for series_data in series:
extra_data = series_data.get('extra_data', {})
_id = extra_data.get('series_id')
query = models.Series.objects(source_id=_id, source_type=source_type)
if query.first() is not None:
continue
series = models.Series()
series.name = series_data['name']
series.description = series_data['description'] or ''
series.genres = series_data['genres']
series.image = series_data['image'] or ''
series.runtime = series_data['runtime']
series.network = network
series.source_id = _id
series.source_type = source_type
series.extra_data = extra_data
series.save()
network.update(add_to_set__series=[series])
network.save()
for episode_data in episodes:
extra_data = episode_data.get('extra_data', {})
series_id = extra_data.get('series_id')
_id = extra_data.get('media_id')
query = models.Media.objects(source_id=_id)
query = filter(lambda m: m.source_type == source_type, query)
media = query[0] if len(query) > 0 else models.Media()
media.name = episode_data['name']
media.summary = episode_data['summary']
media.image = episode_data['image']
media.season = int(episode_data['season'])
media.number = int(episode_data['number'])
media.timestamp = episode_data['timestamp']
media._runtime = episode_data['runtime']
series = models.Series.objects(source_type=source_type, source_id=series_id).first()
media.series = series
media.source_id = _id
media.extra_data = extra_data
media.save()
series.update(add_to_set__media=[media])
series.save()
def fetch_from_crunchyroll():
data = sources.crunchyroll.list_latest_episodes(100)
episodes = data.get('episodes', [])
series = data.get('series', [])
source_type = sources.CRUNCHYROLL_SOURCE
network = models.Network.objects(source_type=source_type).first()
if network is None:
network = models.Network()
network._type = 'Subscription'
network.name = 'Crunchyroll'
network.description = 'Official, legal streaming anime videos including Naruto Shippuden, ' + \
'Attack on Titan, Sword Art Online, Skip Beat, and Shugo Chara. Start watching now.'
network.country = 'US'
network.source_type = source_type
network.save()
for series_data in series:
extra_data = series_data.get('extra_data', {})
_id = extra_data.get('series_id')
query = models.Series.objects(source_id=_id, source_type=source_type)
if query.first() is not None:
continue
series = models.Series()
series.name = series_data['name']
series.description = series_data['description'] or ''
series.genres = series_data['genres']
series.image = series_data['image'] or ''
if series_data['runtime']:
series.runtime = series_data['runtime']
series.network = network
series.source_id = _id
series.source_type = source_type
series.extra_data = extra_data
series.save()
network.update(add_to_set__series=[series])
network.save()
for episode_data in episodes:
extra_data = episode_data.get('extra_data', {})
series_id = extra_data.get('series_id')
_id = extra_data.get('media_id')
media_objects = models.Media.objects(source_id=_id)
filtered_media_objects = filter(lambda m: m.source_type == source_type, media_objects)
if len(filtered_media_objects) > 0:
continue
media = models.Media()
media.name = episode_data['name']
media.summary = episode_data['summary']
media.image = episode_data['image'] or ''
media.season = episode_data['season'] or 1
media.number = episode_data['number']
media.timestamp = utils.iso_to_timestamp(episode_data['timestamp'])
if episode_data['runtime'] is not None:
media._runtime = episode_data['runtime']
series = models.Series.objects(source_type=source_type, source_id=series_id).first()
media.series = series
media.source_id = _id
media.extra_data = extra_data
media.save()
series.update(add_to_set__media=[media])
series.save()
def parse_television_data(data):
source_type = sources.TELEVISION_SOURCE
shows = data.get('shows', [])
networks = data.get('networks', [])
episodes = data.get('episodes', [])
for network_data in networks:
_id = network_data.get('extra_data', {}).get('id')
query = models.Network.objects(source_id=_id, source_type=source_type)
if query.first() is not None:
continue
network = models.Network()
network._type = 'None'
network.name = network_data['name']
network.description = network_data.get('description', '')
network.timezone = network_data['timezone']
network.country = network_data['country_code']
network.source_id = _id
network.source_type = source_type
network.save()
for show_data in shows:
extra_data = show_data.get('extra_data', {})
_id = extra_data.get('id')
network_id = extra_data.get('network_id')
query = models.Series.objects(source_id=_id, source_type=source_type)
if query.first() is not None:
continue
series = models.Series()
series.name = show_data['name'] or ''
series.description = show_data['description']
series.genres = show_data['genres']
series.image = show_data.get('image', '')
series.runtime = show_data['runtime']
network_query = models.Network.objects(source_id=network_id, source_type=source_type)
network = network_query.first()
series.network = network
series.source_id = _id
series.source_type = source_type
series.extra_data = extra_data
series.save()
network.update(add_to_set__series=[series])
network.save()
for episode_data in episodes:
extra_data = episode_data.get('extra_data', {})
series_id = extra_data['series_id']
network_id = extra_data['network_id']
episode_number = episode_data['number']
if episode_number is None:
continue
network = models.Network.objects(source_id=network_id, source_type=source_type).first()
series = models.Series.objects(source_id=series_id, source_type=source_type).first()
exists = False
media = filter(lambda m: m.number == episode_number, series.media)
if len(media) > 0:
continue
media = models.Media()
media.name = episode_data['name'] or ''
media.summary = episode_data['summary']
media.image = episode_data['image'] or ''
media.season = episode_data['season'] or 1
media.number = episode_number
media.timestamp = utils.iso_to_timestamp(episode_data['timestamp'])
if episode_data['runtime'] is not None:
media._runtime = int(episode_data['runtime'])
media.series = series
media.extra_data = extra_data
media.save()
series.update(add_to_set__media=[media])
series.save()
def fetch_from_television(year, month, day):
date = datetime.datetime(year, month, day)
countries = constants.COUNTRY_CODES
for country in countries:
data = sources.television.list_latest_episodes_for_country(country, date)
parse_television_data(data)
def search_from_television(query, limit=None):
countries = constants.COUNTRY_CODES
data = sources.television.search_for_series(query, countries, limit)
parse_television_data(data)
|
[
"ford.peprah@uwaterloo.ca"
] |
ford.peprah@uwaterloo.ca
|
dfb96190100b8be320491ef9b7eae480f6f820c9
|
49361b00b942f2fb9e05d8bbab368c684c90c95b
|
/projectweb/urls.py
|
37c2c450f406420c7f22e4b8b6f9a37cea295bd3
|
[] |
no_license
|
vinodh1988/python-service
|
224e8a7b98acf17279c1f30e59d8cd1d41a7094a
|
b04b5fcc01f5fc88a6a59589f861ccaac845cb6a
|
refs/heads/master
| 2023-08-05T03:35:27.692968
| 2021-09-09T12:46:13
| 2021-09-09T12:46:13
| 403,600,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
"""projectweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('market-api/',include('firstrest.urls')),
path('common/',include('firstrest.generalurls')),
path('account/', include('user_app.api.urls')),
]
|
[
"vinodhkc@vinodhconnects.onmicrosoft.com"
] |
vinodhkc@vinodhconnects.onmicrosoft.com
|
0eb24ff5044be2fee91c60335d5eff66d6f6606b
|
f261b4ac05694cac34ae726d874bd723087a239a
|
/weather_application/asgi.py
|
7b86d336eee56295bfdf939caf9526556bddae41
|
[] |
no_license
|
AGolobokov/epamWeatherApp
|
50f44bdff517d4f6bb20a7172594743c896d74c0
|
9ab74d2417df4e747056d82d3d529b06a9eb2989
|
refs/heads/master
| 2023-07-25T12:35:21.624902
| 2021-09-02T22:25:19
| 2021-09-02T22:25:19
| 402,088,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
ASGI config for weather_application project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weather_application.settings')
application = get_asgi_application()
|
[
"bullet93639@mail.ru"
] |
bullet93639@mail.ru
|
49bf2c1b0d24319a2822b8d511a0eb5a55bf56af
|
0b3e7dd58b9e521d3af45332a7c343c8eb6792f6
|
/deadloop.py
|
a692a56975326203b609a1c01c694faa5cc20a9f
|
[] |
no_license
|
hanshantong/gitlearnpy
|
13c1dd4b3b1df35225f568eb9104161ad53f36b6
|
1fde167811641838d9bbe8d1fd3c8d13a8374694
|
refs/heads/master
| 2021-07-25T20:24:04.162552
| 2018-11-07T09:52:18
| 2018-11-07T09:52:18
| 145,381,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
__author__ = "tongzi"
import threading,multiprocessing
def loop():
x = 0
while True:
x = x ^ 1
print("The number of cpu core is: ",multiprocessing.cpu_count())
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=loop)
t.start()
|
[
"1356466973@qq.com"
] |
1356466973@qq.com
|
67ae5942e03cb4f245ecd286713c9620a53ef868
|
4b4bfaf66d8dbe28987656ac603e707e36b2d493
|
/package/HTMLTestRunner11.py
|
cc28b1862f6fce856fa7a77fbad9baab22266dfc
|
[] |
no_license
|
xieguoyong/SeleniumDoctor
|
846e63160a79615e3865e249f274298e8f334bdc
|
d0238e33421903907966b6add81d2287b17810bb
|
refs/heads/master
| 2020-03-27T04:33:09.731273
| 2018-09-03T08:55:21
| 2018-09-03T08:55:21
| 145,949,808
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,265
|
py
|
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.2"
"""
Change History
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import io
import sys
import time
import unittest
from xml.sax import saxutils
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level < 1) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level > 1) {
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById('div_'+tid).style.display = 'none'
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
/*border: solid #627173 1px; */
padding: 10px;
background-color: #E6E6D6;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
width: 500px;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 1px solid #777;
padding: 2px;
}
#total_row { font-weight: bold; }
.passClass { background-color: #6c6; }
.failClass { background-color: #c60; }
.errorClass { background-color: #c00; }
.passCase { color: #6c6; }
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Failed</a>
<a href='javascript:showCase(2)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>View</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>%(status)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
self.outputBuffer = io.BytesIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
self.stream = stream
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test):
"Run the given test case or test suite."
result = _TestResult(self.verbosity)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
print(sys.stderr, '\nTime Elapsed: %s' % (self.stopTime-self.startTime))
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if not cls in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count: status.append('Pass %s' % result.success_count)
if result.failure_count: status.append('Failure %s' % result.failure_count)
if result.error_count: status.append('Error %s' % result.error_count )
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
)
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
else: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
for tid, (n,t,o,e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
uo = o.decode('latin-1')
else:
uo = o
if isinstance(e,str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
ue = e
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = saxutils.escape(str(uo)+ue),
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none'),
desc = desc,
script = script,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
[
"xie.guoyong@dr-elephant.com"
] |
xie.guoyong@dr-elephant.com
|
bea0d53b9ad8af2ff72f2f2b917b932eaff47d93
|
59f5ce54604b69624cf559c804bcf7ee14e4685c
|
/scripts/mmaction2_tsm/dataset_generation_script/gen_label_ava.py
|
aa49c3b5d0f3d319ec7ae099643408a39d43b934
|
[] |
no_license
|
ZJU-lishuang/shengshi_github
|
334737f9072d0ee40b19f480f1e856a98b1d0d29
|
f01905c5b9aa587232e3532be96bca4f84839cd8
|
refs/heads/master
| 2023-07-17T02:08:52.072460
| 2021-09-01T02:54:48
| 2021-09-01T02:54:48
| 188,337,859
| 0
| 0
| null | 2023-07-06T21:27:29
| 2019-05-24T02:20:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,034
|
py
|
import os
import xml.etree.ElementTree as ET
import numpy as np
# import mmcv
import shutil
# import re
def subfiles(path):
"""Yield directory names not starting with '.' under given path."""
for entry in os.scandir(path):
if not entry.name.startswith('.') and not entry.is_dir():
yield entry.name
def loadAllTagFile( DirectoryPath, tag ):# download all files' name
result = []
for file in subfiles(DirectoryPath):
# for file in os.listdir(DirectoryPath):
file_path = os.path.join(DirectoryPath, file)
if os.path.splitext(file_path)[1] == tag:
result.append(file_path)
return result
def checkdir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def GetFileFromThisRootDir(dir,ext = None):
allfiles = []
needExtFilter = (ext != None)
for root,dirs,files in os.walk(dir):
for filespath in files:
filepath = os.path.join(root, filespath)
extension = os.path.splitext(filepath)[1]
if needExtFilter and extension in ext:
allfiles.append(filepath)
elif not needExtFilter:
allfiles.append(filepath)
return allfiles
def gen_dataset():
# dataset_xml = '/home/lishuang/Disk/gitlab/traincode/video_action_det/data/ava_xianchang/天府机场标签'
# dataset_xml = '/home/lishuang/Disk/dukto/异常行为标注/action_train'
dataset_xml='../action_dataset/annotations_addv1'
dst_dir_path="./result_csv_track_shortvideo_addv1/"
xml_names = GetFileFromThisRootDir(dataset_xml, '.xml')
xml_names.sort()
total_ann = {}
total_action_labels = []
for xml_path in xml_names:
img_basename = os.path.basename(xml_path)
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('imagesize')
w = int(size.find('ncols').text)
h = int(size.find('nrows').text)
video_id, frame_id = os.path.splitext(img_basename)[0].rsplit('_', 1)
if video_id == 'frame':
video_id=os.path.basename(os.path.dirname(xml_path))
if video_id =="default":
video_id = os.path.basename(os.path.dirname(os.path.dirname(xml_path)))
if video_id not in total_ann:
total_ann[video_id]={}
timestamp = int(frame_id)
for obj in root.findall('object'):
name = obj.find('name').text
if name != '异常行为人' and name != '正常行为人':
continue
labels = obj.find('attributes').text
for label in labels.split(','):
if '人物ID' in label:
entity_id = label
if '异常行为' in label:
action_label = label
if name == "正常行为人":
action_label = "异常行为=正常"
for label in labels.split(','):
if 'track_id' in label:
entity_id = video_id + '_' + label
polygon = obj.find('polygon')
pts = polygon.findall('pt')
bbox = [
float(pts[0].find('x').text) ,
float(pts[0].find('y').text) ,
float(pts[2].find('x').text) ,
float(pts[2].find('y').text)
]
action_label = action_label.strip()
entity_id = entity_id.strip()
if entity_id not in total_ann[video_id]:
total_ann[video_id][entity_id] = {}
if action_label not in total_ann[video_id][entity_id]:
total_ann[video_id][entity_id][action_label] = {}
if timestamp not in total_ann[video_id][entity_id][action_label]:
total_ann[video_id][entity_id][action_label][timestamp] = {}
total_ann[video_id][entity_id][action_label][timestamp]['bbox'] = bbox
if action_label not in total_action_labels:
total_action_labels.append(action_label)
def custom_action_labels():
return [
'异常行为=头撞墙', '异常行为=砸门', '异常行为=正常', '异常行为=扇巴掌', '异常行为=掐脖子', '异常行为=举高', '异常行为=撞桌', '异常行为=打斗',
'异常行为=打滚', '异常行为=快速移动', '异常行为=举标语', '异常行为=发传单'
]
checkdir(dst_dir_path)
short_video=True
#25帧为1秒,1分钟1500帧
for video_name,total_ids in total_ann.items():
#单个视频
video_id = 0
for entity_id,actions in total_ids.items():
#单个跟踪目标
for action_name,total_frame in actions.items():
#单个行为动作时
class_name = action_name.replace("异常行为=", "")
dst_class_path = os.path.join(dst_dir_path, class_name)
checkdir(dst_class_path)
#init
track_data="action_name,video_name,entity_id,frame_id,xmin,ymin,xmax,ymax\n"
if short_video:
frame_start = -1
frame_end = -1
for frame_id, person_ann in total_frame.items():
xmin, ymin, xmax, ymax = person_ann['bbox']
track_data += f"{action_name},{video_name},{entity_id},{frame_id},{xmin},{ymin},{xmax},{ymax}\n"
if short_video:
if frame_start < 0:
frame_start=frame_id
frame_end = frame_id
if frame_start >= 0 and frame_end - frame_start >= 150 and frame_end - frame_start <= 300:
frame_start = frame_end
txt_path = f"result_{video_name}_{video_id}.txt"
txt_path = os.path.join(dst_class_path, txt_path)
with open(txt_path, 'w') as f:
f.write(track_data)
video_id += 1
track_data = "action_name,video_name,entity_id,frame_id,xmin,ymin,xmax,ymax\n"
elif frame_start >= 0 and frame_end-frame_start >=300:
frame_start=frame_end
track_data = "action_name,video_name,entity_id,frame_id,xmin,ymin,xmax,ymax\n"
if short_video and frame_end - frame_start >= 75:
txt_path = f"result_{video_name}_{video_id}.txt"
txt_path = os.path.join(dst_class_path, txt_path)
with open(txt_path, 'w') as f:
f.write(track_data)
video_id += 1
if not short_video:
txt_path=f"result_{video_name}_{video_id}.txt"
txt_path=os.path.join(dst_class_path,txt_path)
with open(txt_path, 'w') as f:
f.write(track_data)
video_id+=1
if __name__ == '__main__':
gen_dataset()
|
[
"qqlishuang@gmail.com"
] |
qqlishuang@gmail.com
|
1f0bb0dbee3cb5c8fdeb8522e150fe9eb161a007
|
7b869138fb194dcd4901d9b4f299fcde67148e85
|
/550C/DivisibleByEight.py
|
5f0f62b2924791aa211a50aaad6b93d39f171284
|
[] |
no_license
|
CrabBucket/Super-Competitive-Competivie-Programming
|
badc4b17687d66d5c2eb11c026392e2c08eabf59
|
706dbcb8405fbb10ff8e88b2f72c56acfacdeb0c
|
refs/heads/master
| 2022-12-24T02:53:14.226783
| 2020-09-20T18:38:16
| 2020-09-20T18:38:16
| 271,158,791
| 0
| 0
| null | 2020-07-02T00:01:01
| 2020-06-10T02:35:03
|
C++
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
totaliters = 0
def removeEle(string,index):
return string[:index] + string[index+1:]
dptable = {}
def byEight(num):
global totaliters
if num in dptable:
return dptable[num]
#print(num)
if num%8 == 0:
#print(num)
dptable[num] = (num,True)
return (num,True)
else:
totaliters+=1
if num<10:
dptable[num] = (-1,False)
return (-1,False)
index = 0
number = str(num)
#print(number)
if totaliters > 100000:
print("NO")
exit()
for _char in number:
temp = byEight(int(removeEle(number,index)))
if temp[1]:
dptable[num] = temp
return temp
index+=1
dptable[num] = (-1,False)
return (-1,False)
case = input()
if '0' in case:
print("YES")
print('0')
exit()
for x in range(len(case))[::-1]:
if int(case[x]) % 2 != 0:
continue
case = case[:x+1]
break
superodd = True
for char in case:
if int(char) % 2 == 0:
superodd = False
break
if superodd:
print("NO")
exit()
answer = byEight(int(case))
if answer[1]:
print("YES")
print(answer[0])
else:
print("NO")
|
[
"tom@mcratracing.com"
] |
tom@mcratracing.com
|
dafca90b588ffde69707a03ec8a7cc7080e5d2c3
|
a02e5905fa556ec962f65a80fcaa57b1ff38d177
|
/code/custom.py
|
c96a8fc5087f255c1186a0897a9bf22715e27f1f
|
[] |
no_license
|
Xueelei/Computer-Vision-for-Melanoma-Detection
|
991e803bffd8c2d05e672adf9764b82838effa06
|
871c8d5143f5622198c68c89ffce7e10d713cb2f
|
refs/heads/master
| 2020-08-07T00:52:27.790090
| 2019-11-01T21:01:28
| 2019-11-01T21:01:28
| 213,228,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,346
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 16:43:15 2019
@author: dell
"""
import numpy as np
import util
from matplotlib import pyplot as plt
import skimage.io
from skimage import transform
import numpy as np
import scipy.ndimage
import skimage.color
import sklearn.cluster
import scipy.spatial.distance
import visual_words
import skimage.io
def extract_filter_responses(image):
'''
Extracts the filter responses for the given image.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* filter_responses: numpy.ndarray of shape (H,W,3F)
'''
''' Check image to make it a floating point with range[0,1] '''
if image.any()>1:
image = image.astype('float')/255
''' Convert to 3 channels if not '''
if len(image.shape) == 2:
image = np.tile(image[:, np.newaxis], (1, 1, 3))
if image.shape[2] == 4:
image = image[:,:,0:3]
''' Convert image into lab color space '''
image = skimage.color.rgb2lab(image)
''' Apply filters '''
scales = [1, 2, 4, 8, 8*np.sqrt(2)]
for scale in range(len(scales)):
for c in range(3):
#img = skimage.transform.resize(image, (int(ss[0]/scales[i]),int(ss[1]/scales[i])),anti_aliasing=True)
img = scipy.ndimage.gaussian_filter(image[:,:,c],sigma=scales[scale])
if scale == 0 and c == 0:
filter_responses = img[:,:,np.newaxis]
else:
filter_responses = np.concatenate((filter_responses,img[:,:,np.newaxis]),axis=2)
for c in range(3):
img = scipy.ndimage.gaussian_laplace(image[:,:,c],sigma=scales[scale])
filter_responses = np.concatenate((filter_responses,img[:,:,np.newaxis]),axis=2)
for c in range(3):
img = scipy.ndimage.gaussian_filter(image[:,:,c],sigma=scales[scale],order=[0,1])
filter_responses = np.concatenate((filter_responses,img[:,:,np.newaxis]),axis=2)
for c in range(3):
img = scipy.ndimage.gaussian_filter(image[:,:,c],sigma=scales[scale],order=[1,0])
filter_responses = np.concatenate((filter_responses,img[:,:,np.newaxis]),axis=2)
return filter_responses
def get_visual_words(image,dictionary):
'''
Compute visual words mapping for the given image using the dictionary of visual words.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* wordmap: numpy.ndarray of shape (H,W)
'''
# ----- TODO -----
filter_responses = extract_filter_responses(image)
H, W = image.shape[0], image.shape[1]
filter_responses = filter_responses.reshape(H*W, -1)
dists = scipy.spatial.distance.cdist(filter_responses, dictionary)
wordmap = np.argmin(dists, axis = 1).reshape(H,W)
return wordmap
pass
def compute_dictionary(num_workers):
'''
Creates the dictionary of visual words by clustering using k-means.
[input]
* num_workers: number of workers to process in parallel
[saved]
* dictionary: numpy.ndarray of shape (K,3F)
'''
train_data = np.load("../data/train_data.npz")
# ----- TODO -----
''' iterate through the paths to read images '''
i = 0 # index of training image
alpha = 50 # alpha between 50 and 500
K = 100 # K between 100 and 200
for image_path in train_data.f.files:
#print(i)
args = i, alpha, image_path
compute_dictionary_one_image(args)
current_compute = np.load(str(i)+".npy")
if i==0:
filter_responses = current_compute
else:
filter_responses = np.append(filter_responses, current_compute, axis=0)
i+=1
np.save("filter_responses.npy",filter_responses)
#filter_responses = np.load("filter_responses.npy")
kmeans = sklearn.cluster.KMeans(n_clusters = K,random_state=0).fit(filter_responses)
dictionary = kmeans.cluster_centers_
np.save("dictionary.npy",dictionary)
def compute_dictionary_one_image(args):
'''
Extracts random samples of the dictionary entries from an image.
This is a function run by a subprocess.
[input]
* i: index of training image
* alpha: number of random samples
* image_path: path of image file
* time_start: time stamp of start time
[saved]
* sampled_response: numpy.ndarray of shape (alpha,3F)
'''
i,alpha,image_path = args
# ----- TODO -----
image = skimage.io.imread('../data/'+image_path)
image = image.astype('float')/255
image = transform.downscale_local_mean(image, (3,3,1))
filter_response = extract_filter_responses(image)
''' get random pixels '''
#alpha_response = (np.random.permutation(filter_response))
alpha_response = np.zeros(shape = (alpha,filter_response.shape[2]))
for j in range(alpha):
row = np.random.randint(0,filter_response.shape[0])
col = np.random.randint(0,filter_response.shape[1])
alpha_response[j]=filter_response[row][col]
np.save(str(i)+".npy",alpha_response)
def build_recognition_system(num_workers):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* num_workers: number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,K*(4^layer_num-1)/3)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
train_data = np.load("../data/train_data.npz")
dictionary = np.load("dictionary.npy")
# ----- TODO -----
SPM_layer_num = 4
labels = train_data.f.labels
N = len(labels)
dict_size = dictionary.shape[0]
features = np.zeros((N, dict_size*((4**SPM_layer_num-1)//3)))
files = train_data.f.files
i=0
for image_path in files:
feature = get_image_feature(image_path,dictionary,SPM_layer_num,dict_size)
features[i] = feature
i+=1
#print(i)
trained_system = features, labels, dictionary, SPM_layer_num
np.save("trained_system.npz",trained_system)
pass
def evaluate_recognition_system(num_workers=2):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* num_workers: number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
test_data = np.load("../data/test_data.npz")
trained_system = np.load("trained_system.npz.npy")
# ----- TODO -----
features, train_labels, dictionary, SPM_layer_num = trained_system
test_labels = test_data.f.labels
files = test_data.f.files
accurate = 0
count=0
conf = [[0 for i in range(8)] for i in range(8)]
for i in range(len(files)):
word_hist = get_image_feature(files[i],dictionary,SPM_layer_num,dictionary.shape[0])
distance = distance_to_set(word_hist, features)
label = train_labels[np.argmax(distance)]
conf[test_labels[i]][label]+=1
count+=1
if test_labels[i]==label:
accurate+=1
accuracy = accurate/count
#print("accurate:",test_labels[i],"predict:", label, "accuracy:",accuracy)
conf = np.array(conf)
return conf, accuracy
def distance_to_set(word_hist,histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K*(4^layer_num-1)/3)
* histograms: numpy.ndarray of shape (N,K*(4^layer_num-1)/3)
[output]
* sim: numpy.ndarray of shape (N)
'''
# ----- TODO -----
sim = np.minimum(word_hist, histograms)
return np.sum(sim, axis=1)
def get_image_feature(file_path,dictionary,layer_num,K):
'''
Extracts the spatial pyramid matching feature.
[input]
* file_path: path of image file to read
* dictionary: numpy.ndarray of shape (K,3F)
* layer_num: number of spatial pyramid layers
* K: number of clusters for the word maps
[output]
* feature: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
# ----- TODO -----
image = skimage.io.imread('../data/'+file_path)
image = image.astype('float')/255
image = transform.downscale_local_mean(image, (3,3,1))
wordmap = visual_words.get_visual_words(image, dictionary)
feature = get_feature_from_wordmap_SPM(wordmap, layer_num, K)
return feature
def get_feature_from_wordmap(wordmap,dict_size):
'''
Compute histogram of visual words.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* dict_size: dictionary size K
[output]
* hist: numpy.ndarray of shape (K)
'''
# ----- TODO -----
hist,bins = np.histogram(wordmap, bins=list(range(dict_size + 1)), density=True)
return hist
def get_feature_from_wordmap_SPM(wordmap,layer_num,dict_size):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* layer_num: number of spatial pyramid layers
* dict_size: dictionary size K
[output]
* hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
# ----- TODO -----
H = wordmap.shape[0]
W = wordmap.shape[1]
# compute the finest, L=2
step_h = H // 4
step_w = W // 4
hist_all = np.zeros(dict_size*((4**layer_num-1)//3))
i,h,w = 0,0,0
for h in range(0, step_h*4, step_h):
for w in range(0, step_w*4, step_w):
sub_wordmap = wordmap[h:h+step_h, w:w+step_w]
sub_hist,sub_bins = np.histogram(sub_wordmap, bins=list(range(dict_size + 1)))
hist_all[i: i+dict_size] = np.divide(sub_hist, 2)
i+=dict_size
# compute the finer, L=1
sub_hist = (hist_all[0: dict_size]
+hist_all[dict_size: dict_size*2]
+hist_all[dict_size*4: dict_size*5]
+hist_all[dict_size*5: dict_size*6])
hist_all[i:i+dict_size] = np.divide(sub_hist, 4)
i+=dict_size
sub_hist = (hist_all[dict_size*2: dict_size*3]
+hist_all[dict_size*3: dict_size*4]
+hist_all[dict_size*6: dict_size*7]
+hist_all[dict_size*7: dict_size*8])
hist_all[i:i+dict_size] = np.divide(sub_hist, 4)
i+=dict_size
sub_hist = (hist_all[dict_size*8: dict_size*9]
+hist_all[dict_size*9: dict_size*10]
+hist_all[dict_size*12: dict_size*13]
+hist_all[dict_size*13: dict_size*14])
hist_all[i:i+dict_size] = np.divide(sub_hist, 4)
i+=dict_size
sub_hist = (hist_all[dict_size*10: dict_size*11]
+hist_all[dict_size*11: dict_size*12]
+hist_all[dict_size*14: dict_size*15]
+hist_all[dict_size*15: dict_size*16])
hist_all[i:i+dict_size] = np.divide(sub_hist, 4)
i+=dict_size
# compute the coarst, L=0
sub_hist = (hist_all[dict_size*16: dict_size*17]
+hist_all[dict_size*17: dict_size*18]
+hist_all[dict_size*18: dict_size*19]
+hist_all[dict_size*19: dict_size*20])
hist_all[i:i+dict_size] = np.divide(sub_hist, 4)
# L1 norm
hist_all = np.divide(hist_all, np.sum(hist_all))
return hist_all
if __name__ == '__main__':
num_cores = util.get_num_CPU()
path_img = "../data/park/labelme_evuftvyfpanmmab.jpg"
image = skimage.io.imread(path_img)
image = image.astype('float')/255
image = transform.downscale_local_mean(image, (3,3,1))
filter_responses = visual_words.extract_filter_responses(image)
util.display_filter_responses(filter_responses)
compute_dictionary(num_workers=num_cores)
dictionary = np.load('dictionary.npy')
wordmap = get_visual_words(image,dictionary)
filename="wordmap.jpg"
util.save_wordmap(wordmap, filename)
build_recognition_system(num_workers=num_cores)
conf, accuracy = evaluate_recognition_system(num_workers=num_cores)
print(conf)
print(np.diag(conf).sum()/conf.sum())
|
[
"xuelei@andrew.cmu.edu"
] |
xuelei@andrew.cmu.edu
|
f9aac5699d1e1c4bcbcdff38e4792101d207fcfc
|
8bf0babe2fede24e8727b3f04352dd1032631e1e
|
/news/netease.py
|
c48249c132560c9e56cce91e935103c2fe832088
|
[] |
no_license
|
maochuanwangzi/anna-news-server
|
80dfe4f95cd68291b5f071ea8f69b9c2f62e36f4
|
73b262f132013bb138f125195e0780449dc560fe
|
refs/heads/master
| 2020-03-19T08:30:02.602188
| 2018-05-15T11:40:27
| 2018-05-15T11:40:27
| 136,209,181
| 1
| 0
| null | 2018-06-05T17:03:14
| 2018-06-05T17:03:14
| null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
#!/usr/bin/env python
# coding=utf-8
def get_news_from_netease(site_url):
res = {'result': site_url}
return res
|
[
"noreply@github.com"
] |
maochuanwangzi.noreply@github.com
|
01e00b4ca543649fd4254ff5114f45c0090e5d27
|
61ced4644ccf4fe82a1d41c146388f0cc2b646fb
|
/asteroid_detection/features.py
|
8652a5dfc5b2a32506196578de1714bbed9344a8
|
[] |
no_license
|
cmccachern/asteroid_detection
|
89ff50f325db49911f27a70cb6f1aecfa5eb0a91
|
fba94a7008b611c8e4cda2a69cc7f601a7a49b7f
|
refs/heads/master
| 2022-11-28T10:21:23.098023
| 2019-11-12T00:55:07
| 2019-11-12T00:55:07
| 208,173,427
| 0
| 0
| null | 2022-11-22T04:16:26
| 2019-09-13T01:10:55
|
Python
|
UTF-8
|
Python
| false
| false
| 6,695
|
py
|
"""
Functions for manipulating fits images to find asteroids.
"""
#pylint: disable=no-member
#pylint: disable=unsubscriptable-object
import numpy as np
from scipy.signal import correlate2d
import cv2
from astropy.io import fits
import matplotlib.pyplot as plt
from sdss import fits_from_rcf, jpg_from_rcf
def plot_histogram(img):
"""
Plot the RGB histogram of an image.
"""
rgb_hist = rgb_histogram(img)
plt.figure()
for color, hist in rgb_hist.items():
plt.plot(hist, color=color)
plt.xlim([0, 256])
def rgb_histogram(img, channels=["r", "g", "b"]):
"""
Find the rgb histogram
"""
hist = {}
for ii, color in enumerate(channels):
hist[color] = cv2.calcHist([img], [ii], None, [256], [0, 256])
return hist
def data_from_fits(fits_file):
"""
Return the image data from a fits file.
"""
hdul = fits.open(fits_file)
data = hdul[0].data
return data
def asteroid(img):
"""
Hardcoded location for known asteroid in test image.
"""
return img[170:205, 200:230]
def other_object(img):
"""
Hardcoded location for non-asteroid object in test image.
"""
return img[500:570, 1000:1070]
def galaxy(img):
"""
Hardcoded location for galaxy in test image.
"""
return img[420:490, 710:770]
def plot_object(fits_file, object_getter=asteroid):
"""
Show the image of an object in a fits file.
This function uses an object getter function to get the cropped data from a larger 2D array.
"""
data = data_from_fits(fits_file)
data = object_getter(data)
plt.figure()
plt.title(fits_file)
plt.imshow(np.log10(data))
def plot_rgb(r_fits, g_fits, b_fits, object_getter=asteroid):
"""
Plot an rgb image with area cropped by an object getter.
"""
data = [None, None, None]
for ii, fits_file in enumerate([r_fits, g_fits, b_fits]):
data[ii] = data_from_fits(fits_file)
data[ii] = object_getter(data[ii])
plt.figure()
plt.imshow(data[ii])
data = np.dstack(data)
plt.figure()
plt.imshow(data)
def show_corls():
"""
Test function that shows correlations between multiple fit files.
"""
datar = data_from_fits("752_1_373_r.fits")
datar = other_object(datar)
datag = data_from_fits("752_1_373_g.fits")
datag = other_object(datag)
corl = correlate2d(datar, datag, mode="same")
plt.figure()
plt.imshow(corl)
datar = data_from_fits("752_1_373_r.fits")
datar = galaxy(datar)
datag = data_from_fits("752_1_373_g.fits")
datag = galaxy(datag)
corl = correlate2d(datar, datag, mode="same")
plt.figure()
plt.imshow(corl)
datar = data_from_fits("752_1_373_r.fits")
datar = asteroid(datar)
datag = data_from_fits("752_1_373_g.fits")
datag = asteroid(datag)
corl = correlate2d(datar, datag, mode="same")
plt.figure()
plt.imshow(corl)
def mask_img(img, mask):
"""
Return an image with all the masked data zeroed out.
"""
masked_img = np.zeros(img.shape)
masked_img[mask] = img[mask]
return masked_img
def find_objects(img, threshold=.3):
"""
Return an array of object centers from an image.
"""
thresholded_img = np.uint8(img > threshold)
_, markers = cv2.connectedComponents(thresholded_img)
object_centers = []
for ii in range(1, np.max(markers)):
masked_img = mask_img(img, markers == ii)
object_index = np.argmax(masked_img)
object_center = np.unravel_index(object_index, img.shape)
object_centers.append(object_center)
return np.array(object_centers)
class OutOfBounds(Exception):
"""
Error that is raised when trying to access image data that does not exist.
"""
pass #pylint: disable=unnecessary-pass
def crop(img, center, width, height):
"""
Crop an image.
"""
left = int(center[0] - np.floor(width/2.0))
right = left + width
bottom = int(center[1] - np.floor(height/2.0))
top = bottom + height
if left < 0:
raise OutOfBounds("Left side of crop window is outside of image")
if bottom < 0:
raise OutOfBounds("Bottom of crop window is outside of image")
if right > img.shape[0]:
raise OutOfBounds("Right side of crop window is outside of image")
if top > img.shape[1]:
raise OutOfBounds("Top of crop window is outside of image")
return img[left:right, bottom:top]
def crop_all(images, center, width, height):
""""
Crop multiple bands of fits file image data
"""
cropped_images = {}
for band, img in images.items():
cropped_images[band] = crop(img, center, width, height)
return cropped_images
def max_corl_offset(img1, img2):
"""
Return the maximum value in the cross correlation between two images.
"""
assert img1.shape == img2.shape, "Images must be the same shape"
corl = correlate2d(img1, img2, mode="same")
corl_max = np.argmax(corl)
return np.unravel_index(corl_max, img1.shape)
def find_asteroids(images, crop_width=50, crop_height=50):
"""
Use a correlation technique to find asteroids in images.
"""
objects = find_objects(images["r"])
asteroid_candidates = []
for obj in objects:
try:
cropped_images = crop_all(images, obj, crop_width, crop_height)
except OutOfBounds:
continue
corl_max_rg = max_corl_offset(cropped_images["r"], cropped_images["g"]) - np.array([10, 21])
corl_max_ri = max_corl_offset(cropped_images["r"], cropped_images["i"]) - np.array([20, 26])
if np.sum(np.abs(corl_max_rg)) > 1 and \
np.sum(np.abs(corl_max_ri)) > 1 and \
np.sum(np.abs(corl_max_rg - corl_max_ri)) > 1:
asteroid_candidates.append(obj)
return np.array(asteroid_candidates)
def main():
"""
Main function.
"""
run = 756#752
camcol = 1
field = 314#373
fits_file = fits_from_rcf(run, camcol, field)
images = {"r": fits_file["r"][0].data,
"g": fits_file["g"][0].data,
"i": fits_file["i"][0].data,
}
#images = {"r": data_from_fits("752_1_373_r.fits"),
# "g": data_from_fits("752_1_373_g.fits"),
# "i": data_from_fits("752_1_373_i.fits"),
# }
asteroids = find_asteroids(images)
print(asteroids)
img = jpg_from_rcf(run, camcol, field)#plt.imread("image.jpg")
plt.figure()
plt.imshow(img)
plt.figure()
plt.imshow(np.log10(fits_file["r"][0].data))
plt.scatter(asteroids.T[1], asteroids.T[0], color="r")
plt.show()
if __name__ == "__main__":
main()
|
[
"carey@mccachern.com"
] |
carey@mccachern.com
|
8651769e811843c8771b34777e0cd3f9f73886cd
|
20674c17d815214bf66b75be686bb8a45c0f5914
|
/version1/382_Linked_List_Random_Note.py
|
b3eb2bb00277f1ab4588a3185e4daf65f981fec9
|
[] |
no_license
|
moontree/leetcode
|
e7b670969fe20785b15aae82996875fd66de1b08
|
f2bf9b13508cd01c8f383789569e55a438f77202
|
refs/heads/master
| 2021-05-20T20:36:45.615420
| 2020-04-02T09:15:26
| 2020-04-02T09:15:26
| 252,408,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,799
|
py
|
"""
Given a singly linked list, return a random node's value from the linked list.
Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you?
Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
"""
from list_helper import *
import random
import collections
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
cur = head
self.nums = []
while cur:
self.nums.append(cur.val)
cur = cur.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
step = random.randint(0, len(self.nums) - 1)
return self.nums[step]
def _get_random_of_stream(self):
"""
Returns a random node's value.
:rtype: int
"""
h = self._head
if h is None:
return None
count = 0
res = h.val
while h:
rv = random.randint(0, count)
if rv == 0:
res = h.val
h = h.next
count += 1
return res
head = ListNode(1);
head.next = ListNode(2);
head.next.next = ListNode(3);
solution = Solution(head);
for i in xrange(5):
print solution.getRandom()
|
[
"zhangchao@zhangchaodeMacBook-Pro.local"
] |
zhangchao@zhangchaodeMacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.