blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0d8a8fb2dcb378f63e1168821dbac2059c37e96 | 930309163b930559929323647b8d82238724f392 | /dp_b.py | 574f94d3f786fc45d2d25c12a7b162d0c6140fbe | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py |
N,K = map(int, input().split())
*C,=map(int,input().split())
dp = [float("inf")]*(N+10)
dp[0] = 0
for i in range(N):
for k in range(K+1):
if i+k < N:
dp[i+k] = min(dp[i+k], dp[i]+abs(C[i+k]-C[i]))
print(dp[N-1])
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
dc5a4a95918a4b0fc3f63c865fdb46927e0bc44e | a08225934c425be313a12975c9563a72ded58be6 | /EDU105/45.py | b1095d784edeebeec5958904b3976b46f8ad450a | [] | no_license | marcus-aurelianus/codeforce | 27c966554dee9986f23fb2925bd53e6cceb8b9e9 | 4764df151ade7806e32b6c88283a2de946f99e16 | refs/heads/master | 2023-03-18T09:30:55.042594 | 2021-03-12T18:14:08 | 2021-03-12T18:14:08 | 231,387,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def jump(nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
print(jump([0,0]))
| [
"37787424+marcus-aurelianus@users.noreply.github.com"
] | 37787424+marcus-aurelianus@users.noreply.github.com |
9623de913778ac810ee55514abd5777510296aef | 009628e385aca8552dad5c1c5cba018ca6e5954d | /scripts/drawcurvature | 245a84fb58ff9543d068294490983a068708b04d | [] | no_license | csrocha/python-mtk | 565ebcfeb668a6409d48135bf081321d8121b263 | c3ba520f55c2e204feb6b98251abcb046e51c6cd | refs/heads/main | 2023-01-12T02:46:44.457520 | 2020-11-17T20:20:59 | 2020-11-17T20:20:59 | 313,939,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,957 | #!/usr/bin/python
"""
An example showing the norm and phase of an atomic orbital: isosurfaces of
the norm, with colors displaying the phase.
This example shows how you can apply a filter on one data set, and dislay
a second data set on the output of the filter. Here we use the contour
filter to extract isosurfaces of the norm of a complex field, and we
display the phase of the field with the colormap.
The field we choose to plot is a simplified version of the 3P_y atomic
orbital for hydrogen-like atoms.
The first step is to create a data source with two scalar datasets. The
second step is to apply filters and modules, using the
'set_active_attribute' filter to select on which data these apply.
Creating a data source with two scalar datasets is actually slighlty
tricky, as it requires some understanding of the layout of the datasets
in TVTK. The reader is referred to :ref:`data-structures-used-by-mayavi`
for more details.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Create the data ############################################################
import numpy as np
import sys
from optparse import OptionParser
from mtk.geometry.vol import load
_i = lambda x: np.array(map(round, x), dtype=int)
# Parsing command line
parser = OptionParser()
parser.add_option("-o", "--output", dest="outfile", default=None,
help="write figure to FILE", metavar="FILE")
parser.add_option("-a", "--azimuth", dest="azimuth", default=None,
help="set the azimuth of the view", metavar="ANGLE")
parser.add_option("-e", "--elevation", dest="elevation", default=None,
help="set the elevation of the view", metavar="ANGLE")
parser.add_option("-r", "--rotate", dest="rotate", default=False,
help="if set rotate 0:r:360, 0:r:180 over body", metavar="ANGLE")
parser.add_option("-d", "--distance", dest="distance", default=None,
help="set the distance of the view", metavar="UNITS")
parser.add_option("-W", "--width", dest="width", default=900,
help="set the width of the figure", metavar="PIXELS")
parser.add_option("-H", "--height", dest="height", default=600,
help="set the height of the figure", metavar="PIXELS")
(options, args) = parser.parse_args()
outfile = options.outfile
azimuth = options.azimuth
elevation = options.elevation
distance = options.distance
rotate = options.rotate
size = (int(options.width), int(options.height))
if azimuth != None: azimuth = float(azimuth)
if elevation != None: elevation = float(elevation)
if distance != None: distance = float(distance)
if rotate != None: rotate = int(rotate)
filename_body = args[0]
filename_curvature = args[1]
# Running
volA, mdA = load(filename_body)
volB, mdB = load(filename_curvature)
nmin = volA.min
nmax = volA.max
Bslice = map(lambda (a,b): slice(a,b+1),
zip(_i(volB.t(nmin)),_i(volB.t(nmax))))
A = volA._data
B = volB._data[Bslice]
print A.shape
print B.shape
# Plot it ####################################################################
from enthought.mayavi import mlab
mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1), size=(900,600))
# We create a scalar field with the module of Phi as the scalar
src = mlab.pipeline.scalar_field(A)
# And we add the phase of Phi as an additional array
# This is a tricky part: the layout of the new array needs to be the same
# as the existing dataset, and no checks are performed. The shape needs
# to be the same, and so should the data. Failure to do so can result in
# segfaults.
src.image_data.point_data.add_array(np.real(B).T.ravel())
#src.image_data.point_data.add_array(np.real(B._data).T.ravel())
# We need to give a name to our new dataset.
src.image_data.point_data.get_array(1).name = 'solid'
# Make sure that the dataset is up to date with the different arrays:
src.image_data.point_data.update()
# We select the 'scalar' attribute, ie the norm of Phi
src2 = mlab.pipeline.set_active_attribute(src,
point_scalars='scalar')
# Cut isosurfaces of the norm
contour = mlab.pipeline.contour(src2)
# Now we select the 'angle' attribute, ie the phase of Phi
contour2 = mlab.pipeline.set_active_attribute(contour,
point_scalars='solid')
# And we display the surface. The colormap is the current attribute: the phase.
mlab.pipeline.surface(contour2, colormap='spectral', vmax=1., vmin=0.)
mlab.colorbar(title='Curvature', orientation='vertical', nb_labels=3)
if rotate != None:
for a in np.arange(0,360,rotate):
for e in np.arange(0,180,rotate):
print "Rendering:", outfile % (a, e)
mlab.view(a,e,distance)
mlab.savefig(outfile % (a, e), size)
else:
mlab.view(azimuth,elevation,distance)
if outfile != None:
mlab.savefig(outfile, size)
else:
mlab.show()
| [
"cristian.rocha@moldeo.coop"
] | cristian.rocha@moldeo.coop | |
29eb97a2658069096f4798568142abee03c21992 | f58b8dd35674b396abe606d1890770f60bfeb655 | /utils/generate_coarse_patches.py | d97c878528522548f4e357329f2b49ceb7b09cdf | [] | no_license | Mancarl/BBR-Net | fbd6d1653b65a28de1267a2319c7aeb90d72c112 | a52adf186601f4c773ae9ad660f3069313dc0f29 | refs/heads/master | 2023-03-15T16:04:44.343846 | 2020-09-21T21:35:14 | 2020-09-21T21:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | import os
import csv
import random
import cv2 as cv
from tqdm import tqdm
annotation_file = '/path/to/full/image/annotation/csv/file'
img_save_path = '/folder/to/save/simulated/coarse/annotations/patches'
csv_save_path = '/path/to/save/patches/annotation'
if not os.path.exists(img_save_path):
os.mkdir(img_save_path)
random.seed(10)
def read_csv():
with open(annotation_file, 'r') as file:
reader = csv.reader(file)
result = {}
for row in reader:
img_file, x1, y1, x2, y2 = row[:5]
if img_file not in result:
result[img_file] = []
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2})
return result
def crop_img(result, repeat=5):
with open(csv_save_path, 'a') as annotation_csv:
for img_name, annotations in tqdm(result.items()):
img_pre = os.path.splitext(os.path.split(img_name)[-1])[0]
img = cv.imread(img_name)
img_shape = img.shape
for a in annotations:
x1 = int(a['x1'])
x2 = int(a['x2'])
y1 = int(a['y1'])
y2 = int(a['y2'])
for _ in range(repeat):
bbox = [x1, y1, x2, y2]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w * h > 6400:
w_expand_ratio = random.random() * 0.4 + 0.1
h_expand_ratio = random.random() * 0.4 + 0.1
w_expand = w_expand_ratio * w
h_expand = h_expand_ratio * h
w_shift_ratio = random.random()
h_shift_ratio = random.random()
left_x_shift = w_shift_ratio * w_expand
right_x_shift = (1 - w_shift_ratio) * w_expand
top_y_shift = h_shift_ratio * h_expand
bottom_y_shift = (1 - h_shift_ratio) * h_expand
bbox[0] = int(max((0, bbox[0] - left_x_shift)))
bbox[1] = int(max((0, bbox[1] - top_y_shift)))
bbox[2] = int(min((bbox[2] + right_x_shift, img_shape[1])))
bbox[3] = int(min((bbox[3] + bottom_y_shift, img_shape[0])))
new_x1, new_y1, new_x2, new_y2 = bbox
new_w = new_x2 - new_x1
new_h = new_y2 - new_y1
rl_x1 = (x1 - new_x1) / new_w
rl_x2 = (x2 - new_x1) / new_w
rl_y1 = (y1 - new_y1) / new_h
rl_y2 = (y2 - new_y1) / new_h
crop_name = '{}_{}_{}_{}_{}.jpg'.format(img_pre, new_x1, new_y1, new_x2, new_y2)
crop_path = os.path.join(img_save_path, crop_name)
annotation_csv.write('{},{},{},{},{}\n'.format(crop_path, rl_x1, rl_y1, rl_x2, rl_y2))
cv.imwrite(crop_path, img[new_y1:new_y2, new_x1:new_x2])
if __name__ == "__main__":
result = read_csv()
crop_img(result)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
5ec27c40bff669672db5985ca6a7ea318d2a6968 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200810184632.py | f3f37471c13443b0e75a79f07eb074f1d8d7f42e | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def discs(A):
start = [i-j for i,j in enumerate(A)]
start.sort()
pairs = 0
for i in range(lena))
discs([1,5,2,1,4,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
28c666be09e2102aa172bcd2c21c4db7396ed048 | 7b51b0e8ba88b8f3c57f5e210ff9847d0364b805 | /ircd/main.py | 7ecb7b0168e64d663ee65c4e0cf4df306f04dd02 | [] | no_license | marcuswanner/nameless-ircd | cd9324528ac890cb416b1b2b1207c4a2315bf12f | 9517b94fe622056f8ea0557403647f9f4ba1d717 | refs/heads/master | 2020-06-07T11:47:44.632012 | 2013-02-14T00:58:44 | 2013-02-14T00:58:44 | 8,111,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | #!/usr/bin/env python
import server
import user
import signal, traceback, asyncore
def hup(sig,frame):
print 'reload'
try:
reload(server.user)
reload(server.services)
reload(server)
except:
print 'Error reloading'
print traceback.format_exc()
else:
print 'okay'
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--port',type=int,help='port to run on',default=6666)
ap.add_argument('--host',type=str,help='bind host',default='127.0.0.1')
ap.add_argument('--opt',type=str,help='options',default=None)
args = ap.parse_args()
signal.signal(signal.SIGHUP,hup)
log = False
if args.opt is not None:
log = args.opt.strip() == 'log'
server.Server((args.host,args.port),do_log=log)
asyncore.loop()
if __name__ == '__main__':
main()
| [
"ampernand@gmail.com"
] | ampernand@gmail.com |
d32d132b39d4dc9285e3107a855567bbacd7d460 | 17ec70a0387905f84f7fc1e3ee7f3428dd4e7874 | /Atcoder/abc121/b.py | 257111dde25af5efa0a73b912864825fe92a899e | [] | no_license | onikazu/ProgramingCompetitionPractice | da348e984b6bcb79f96f461d9df15a33730169b2 | 5a682943976bcac8646176feef9b70a6784abd8a | refs/heads/master | 2021-02-09T06:27:54.994621 | 2020-03-14T02:28:50 | 2020-03-14T02:28:50 | 244,252,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | n, m, c = list(map(int, input().split()))
b = list(map(int, input().split()))
a = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for i in range(n):
code_num = 0
for j in range(m):
code_num += b[j] * a[i][j]
code_num += c
if code_num > 0:
ans += 1
print(ans)
| [
"programingmanagement@gmail.com"
] | programingmanagement@gmail.com |
454c1da25058863d752a9dcc89d5a2578166fa1d | 91fb65972d69ca25ddd892b9d5373919ee518ee7 | /pibm-training/sample-programs/multi_line_strings.py | 374fc51a28e3552ca4a41b12bda9efed67c2dbd2 | [] | no_license | zeppertrek/my-python-sandpit | c36b78e7b3118133c215468e0a387a987d2e62a9 | c04177b276e6f784f94d4db0481fcd2ee0048265 | refs/heads/master | 2022-12-12T00:27:37.338001 | 2020-11-08T08:56:33 | 2020-11-08T08:56:33 | 141,911,099 | 0 | 0 | null | 2022-12-08T04:09:28 | 2018-07-22T16:12:55 | Python | UTF-8 | Python | false | false | 101 | py | #multi_line_strings.py
mlstring = ''' x
y
z
w '''
print (mlstring) | [
"zeppertrek@gmail.com"
] | zeppertrek@gmail.com |
a30ee6100bf54d36acfa6dcbfb8ac2e84c954f05 | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /starrez_client/models/meal_plan_dining_hall_item.py | 985270758cc56bd1c0f628e53c6adb80e28f2a26 | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,832 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MealPlanDiningHallItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'meal_plan_dining_hall_id': 'int',
'description': 'str',
'capacity_min': 'int',
'capacity_max': 'int',
'view_on_web': 'bool',
'charge_item_id': 'int',
'date_modified': 'str'
}
attribute_map = {
'meal_plan_dining_hall_id': 'MealPlanDiningHallID',
'description': 'Description',
'capacity_min': 'CapacityMin',
'capacity_max': 'CapacityMax',
'view_on_web': 'ViewOnWeb',
'charge_item_id': 'ChargeItemID',
'date_modified': 'DateModified'
}
def __init__(self, meal_plan_dining_hall_id=None, description=None, capacity_min=None, capacity_max=None, view_on_web=None, charge_item_id=None, date_modified=None): # noqa: E501
"""MealPlanDiningHallItem - a model defined in Swagger""" # noqa: E501
self._meal_plan_dining_hall_id = None
self._description = None
self._capacity_min = None
self._capacity_max = None
self._view_on_web = None
self._charge_item_id = None
self._date_modified = None
self.discriminator = None
if meal_plan_dining_hall_id is not None:
self.meal_plan_dining_hall_id = meal_plan_dining_hall_id
if description is not None:
self.description = description
if capacity_min is not None:
self.capacity_min = capacity_min
if capacity_max is not None:
self.capacity_max = capacity_max
if view_on_web is not None:
self.view_on_web = view_on_web
if charge_item_id is not None:
self.charge_item_id = charge_item_id
if date_modified is not None:
self.date_modified = date_modified
@property
def meal_plan_dining_hall_id(self):
"""Gets the meal_plan_dining_hall_id of this MealPlanDiningHallItem. # noqa: E501
Meal Plan Dining Hall # noqa: E501
:return: The meal_plan_dining_hall_id of this MealPlanDiningHallItem. # noqa: E501
:rtype: int
"""
return self._meal_plan_dining_hall_id
@meal_plan_dining_hall_id.setter
def meal_plan_dining_hall_id(self, meal_plan_dining_hall_id):
"""Sets the meal_plan_dining_hall_id of this MealPlanDiningHallItem.
Meal Plan Dining Hall # noqa: E501
:param meal_plan_dining_hall_id: The meal_plan_dining_hall_id of this MealPlanDiningHallItem. # noqa: E501
:type: int
"""
self._meal_plan_dining_hall_id = meal_plan_dining_hall_id
@property
def description(self):
"""Gets the description of this MealPlanDiningHallItem. # noqa: E501
Description # noqa: E501
:return: The description of this MealPlanDiningHallItem. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this MealPlanDiningHallItem.
Description # noqa: E501
:param description: The description of this MealPlanDiningHallItem. # noqa: E501
:type: str
"""
if description is not None and len(description) > 100:
raise ValueError("Invalid value for `description`, length must be less than or equal to `100`") # noqa: E501
self._description = description
@property
def capacity_min(self):
"""Gets the capacity_min of this MealPlanDiningHallItem. # noqa: E501
Capacity Min # noqa: E501
:return: The capacity_min of this MealPlanDiningHallItem. # noqa: E501
:rtype: int
"""
return self._capacity_min
@capacity_min.setter
def capacity_min(self, capacity_min):
"""Sets the capacity_min of this MealPlanDiningHallItem.
Capacity Min # noqa: E501
:param capacity_min: The capacity_min of this MealPlanDiningHallItem. # noqa: E501
:type: int
"""
self._capacity_min = capacity_min
@property
def capacity_max(self):
"""Gets the capacity_max of this MealPlanDiningHallItem. # noqa: E501
Capacity Max # noqa: E501
:return: The capacity_max of this MealPlanDiningHallItem. # noqa: E501
:rtype: int
"""
return self._capacity_max
@capacity_max.setter
def capacity_max(self, capacity_max):
"""Sets the capacity_max of this MealPlanDiningHallItem.
Capacity Max # noqa: E501
:param capacity_max: The capacity_max of this MealPlanDiningHallItem. # noqa: E501
:type: int
"""
self._capacity_max = capacity_max
@property
def view_on_web(self):
"""Gets the view_on_web of this MealPlanDiningHallItem. # noqa: E501
View On Web # noqa: E501
:return: The view_on_web of this MealPlanDiningHallItem. # noqa: E501
:rtype: bool
"""
return self._view_on_web
@view_on_web.setter
def view_on_web(self, view_on_web):
"""Sets the view_on_web of this MealPlanDiningHallItem.
View On Web # noqa: E501
:param view_on_web: The view_on_web of this MealPlanDiningHallItem. # noqa: E501
:type: bool
"""
self._view_on_web = view_on_web
@property
def charge_item_id(self):
"""Gets the charge_item_id of this MealPlanDiningHallItem. # noqa: E501
Charge Item # noqa: E501
:return: The charge_item_id of this MealPlanDiningHallItem. # noqa: E501
:rtype: int
"""
return self._charge_item_id
@charge_item_id.setter
def charge_item_id(self, charge_item_id):
"""Sets the charge_item_id of this MealPlanDiningHallItem.
Charge Item # noqa: E501
:param charge_item_id: The charge_item_id of this MealPlanDiningHallItem. # noqa: E501
:type: int
"""
self._charge_item_id = charge_item_id
@property
def date_modified(self):
"""Gets the date_modified of this MealPlanDiningHallItem. # noqa: E501
Date Modified # noqa: E501
:return: The date_modified of this MealPlanDiningHallItem. # noqa: E501
:rtype: str
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""Sets the date_modified of this MealPlanDiningHallItem.
Date Modified # noqa: E501
:param date_modified: The date_modified of this MealPlanDiningHallItem. # noqa: E501
:type: str
"""
self._date_modified = date_modified
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MealPlanDiningHallItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"fedorareis@gmail.com"
] | fedorareis@gmail.com |
42d50dc13fc694a4eae2e3ec7fd1000c7ea32dfb | 20e7350dc776897330884271c54998cfdfe680e8 | /MetaData/python/samples_utils.py | 3656399c9566dca92ccd58f0f01e391be3c52781 | [] | no_license | ResonantHbbHgg/flashgg | d6f8128e42202c24dee00529a8b33f1c55542bff | cbbf47066a8648c9b54dce788ddf5d7c7cd33e96 | refs/heads/master | 2021-01-21T07:34:15.824797 | 2015-04-07T18:42:28 | 2015-04-07T18:42:28 | 33,238,679 | 0 | 0 | null | 2015-04-01T09:28:44 | 2015-04-01T09:28:43 | null | UTF-8 | Python | false | false | 17,234 | py | from optpars_utils import *
from das_cli import get_data as das_query
from pprint import pprint
import os,json,fcntl,sys
from parallel import Parallel
from threading import Semaphore
# -------------------------------------------------------------------------------
def shell_expand(string):
if string:
return os.path.expanduser( os.path.expandvars(string) )
return None
# -------------------------------------------------------------------------------
def ask_user(question,responses=["y","n"]):
reply = None
while not reply in responses:
print "%s [%s]" % ( question, "".join(responses) ),
reply = raw_input()
return reply
# -------------------------------------------------------------------------------
class SamplesManager(object):
def __init__(self,
catalog,
cross_sections=["$CMSSW_BASE/src/flashgg/MetaData/data/cross_sections.json"],
dbs_instance="prod/phys03",
queue=None
):
"""
Constructur:
@catalog: json file used to read/write dataset information
@cross_sections: json file where samples cross sections are stored
@dbs_instance: DBS instance tp use
"""
self.cross_sections_ = {}
self.dbs_instance_ = dbs_instance
for xsecFile in cross_sections:
fname = shell_expand(xsecFile)
self.cross_sections_.update( json.loads( open(fname).read() ) )
self.catalog_ = shell_expand(catalog)
self.parallel_ = None
self.sem_ = Semaphore()
print "Will use the following datasets catalog:"
print self.catalog_
self.queue_ = queue
def importFromDAS(self,datasets):
"""
Import datasets from DAS to the catalog.
@datasets: wildecard to be usd in dataset query
"""
catalog = self.readCatalog()
print "Importing from das %s" % datasets
if "*" in datasets:
response = das_query("https://cmsweb.cern.ch","dataset dataset=%s | grep dataset.name" % datasets, 0, 0, False, self.dbs_instance_)
datasets=[]
for d in response["data"]:
datasets.append( d["dataset"][0]["name"] )
print "Datasets to import"
print "\n".join(datasets)
for dsetName in datasets:
print "Importing %s" % dsetName
files = self.getFilesFomDAS(dsetName)
if dsetName in catalog:
catalog[ dsetName ]["files"] = files
else:
catalog[ dsetName ] = { "files" : files }
print "Writing catalog"
self.writeCatalog(catalog)
print "Done"
def getFilesFomDAS(self,dsetName):
"""
Read dataset files from DAS.
@dsetName: dataset name
"""
response = das_query("https://cmsweb.cern.ch","file dataset=%s | grep file.name,file.nevents" % dsetName, 0, 0, False, self.dbs_instance_)
files=[]
for d in response["data"]:
for jf in d["file"]:
if "nevents" in jf:
files.append({ "name" : jf["name"], "nevents" : jf["nevents"] })
break
## files.append( { "name" : d["file"][0]["name"], "nevents" : d["file"][0]["nevents"] } )
return files
def importFromEOS(self,folders):
"""
Import datasets from DAS to the catalog.
@datasets: dataset to be imported
"""
catalog = self.readCatalog()
for folder in folders:
dsetName = ""
while not len(dsetName.split("/")) == 4:
print "enter dataset name for folder %s" % folder,
dsetName = raw_input()
print "Importing %s as %s" % (folder,dsetName)
files = self.getFilesFomEOS(folder)
if dsetName in catalog:
catalog[ dsetName ]["files"] = files
else:
catalog[ dsetName ] = { "files" : files }
print "Writing catalog"
self.writeCatalog(catalog)
print "Done"
def getFilesFomEOS(self,dsetName):
"""
Read dataset files crawling EOS.
@dsetName: dataset name
Note: not implemented
"""
if not self.parallel_:
self.parallel_ = Parallel(200,self.queue_)
ret,out = self.parallel_.run("/afs/cern.ch/project/eos/installation/0.3.15/bin/eos.select",["find",dsetName],interactive=True)[2]
print out
files = []
for line in out.split("\n"):
if line.endswith(".root"):
files.append( {"name":line.replace("/eos/cms",""), "nevents":0} )
return files
def findDuplicates(self,dsetName):
"""
Find duplicate job outputs in dataset.
@dsetName: dataset name
Note: not implemented
"""
pass
def invalidateBadFiles(self,dsetName):
"""
Invalidate duplicate job output and corrupted files in DAS.
@dsetName: dataset name
Note: not implemented
"""
pass
def checkAllDatasets(self):
"""
Look for corrupted files in the whole catalog.
"""
catalog = self.readCatalog()
self.parallel_ = Parallel(50,self.queue_)
## self.parallel_ = Parallel(1,self.queue_)
print "Checking all datasets"
for dataset in catalog.keys():
self.checkDatasetFiles(dataset,catalog)
outcomes = self.parallel_.wait()
for dsetName,ifile,fName,ret,out in outcomes:
info = catalog[dsetName]["files"][ifile]
if info["name"] != fName:
print "Inconsistent outcome ", info["name"], dsetName,ifile,fName,ret,out
else:
if ret != 0:
info["bad"] = True
else:
extraInfo = json.loads(str(out))
for key,val in extraInfo.iteritems():
info[key] = val
print "Writing catalog"
self.writeCatalog(catalog)
print "Done"
def checkDatasetFiles(self,dsetName,catalog=None):
"""
Look for corrupted files in dataset.
@dsetName: dataset name
Note: not implemented
"""
writeCatalog = False
if not catalog:
catalog = self.readCatalog()
writeCatalog = True
wait = False
if not self.parallel_:
self.parallel_ = Parallel(16,self.queue_)
wait = True
print "Checking dataset",dsetName
info = catalog[dsetName]
files = info["files"]
print len(files)
for ifile,finfo in enumerate(files):
name = finfo["name"]
self.parallel_.run(SamplesManager.checkFile,[self,name,dsetName,ifile])
if wait:
self.parallel_.wait()
self.parallel_ = None
if writeCatalog:
self.writeCatalog(catalog)
def reviewCatalog(self):
datasets,catalog = self.getAllDatasets()
primaries = {}
keepAll = False
for d in datasets:
if not keepAll:
reply = ask_user("keep this dataset (yes/no/all)?\n %s\n" % d, ["y","n","a"])
if reply == "n":
catalog.pop(d)
continue
if reply == "a":
keepAll = True
primary = d.split("/")[1]
if not primary in primaries:
primaries[ primary ] = []
primaries[ primary ].append(d)
for name,val in primaries.iteritems():
if len(val) == 1: continue
reply = ask_user("More than one sample for %s:\n %s\nKeep all?" % (name,"\n ".join(val)))
if reply == "n":
for d in val:
reply = ask_user("keep this dataset?\n %s\n" % d)
if reply == "n":
catalog.pop(d)
self.writeCatalog(catalog)
def checkFile(self,fileName,dsetName,ifile):
"""
Check if file is valid.
@fileName: file name
"""
## fName = "root://eoscms//eos/cms%s" % fileName
fName = fileName
tmp = ".tmp%s_%d.json"%(dsetName.replace("/","_"),ifile)
## print "fggCheckFile.py",[fName,tmp,"2>/dev/null"]
ret,out = self.parallel_.run("fggCheckFile.py",[fName,tmp,"2>/dev/null"],interactive=True)[2]
try:
fout = open(tmp)
out = fout.read()
fout.close()
except IOError, e:
print ret, out
print e
out = "{}"
os.remove(tmp)
return dsetName,ifile,fileName,ret,out
def lockCatalog(self):
"""
Lock catalog file for writing.
Note: not implemented.
"""
pass
def unlockCatalog(self):
"""
Unlock catalog file for writing.
Note: not implemented.
"""
pass
def readCatalog(self,throw=False):
"""
Read catalog from JSON file.
@throw: thow exception if file does not exists.
"""
if os.path.exists(self.catalog_):
return json.loads( open(self.catalog_).read() )
if throw:
raise Exception("Could not find dataset catalog %s" % ( self.catalog_ ))
return {}
def writeCatalog(self,content):
"""
Write catalog to JSON file.
@content: catalog content.
"""
if not os.path.exists( os.path.dirname(self.catalog_) ):
os.mkdir( os.path.dirname(self.catalog_) )
with open(self.catalog_,"w+") as fout:
fout.write( json.dumps(content,indent=4,sort_keys=True) )
fout.close()
def getDatasetMetaData(self,maxEvents,primary,secondary=None,jobId=-1,nJobs=0):
"""
Extract dataset meta data.
@maxEvents: maximum number of events to read.
@primary: primary dataset name.
@secondary: secondary dataset name.
returns: tuple containing datasetName,cross-section,numberOfEvents,listOfFiles
"""
catalog = self.readCatalog(True)
primary = primary.lstrip("/")
found = False
xsec = 0.
allFiles = []
totEvents = 0.
totWeights = 0.
for dataset,info in catalog.iteritems():
empty,prim,sec,tier=dataset.split("/")
if prim == primary:
if secondary and sec != secondary:
continue
if found:
raise Exception("More then one dataset matched the request: /%s/%s" % ( primary, str(secondary) ))
found = dataset
if prim in self.cross_sections_:
xsec = self.cross_sections_[prim]
for fil in info["files"]:
if fil.get("bad",False):
continue
nev, name = fil["nevents"], fil["name"]
totEvents += nev
totWeights += fil.get("weights",0.)
allFiles.append(name)
if maxEvents > -1 and totEvents > maxEvents:
break
if not found:
raise Exception("No dataset matched the request: /%s/%s" % ( primary, str(secondary) ))
if maxEvents > -1 and totEvents > maxEvents:
totWeights = maxEvents / totEvents * totWeights
totEvents = maxEvents
maxEvents = int(totEvents)
if totWeights != 0.:
totEvents = totWeights
if jobId != -1:
files = [ allFiles[i] for i in range(jobId,len(allFiles),nJobs) ]
else:
files = allFiles
return found,xsec,totEvents,files,maxEvents
def getAllDatasets(self):
catalog = self.readCatalog()
datasets = sorted(catalog.keys())
return datasets,catalog
def clearCatalog(self):
self.writeCatalog({})
# -------------------------------------------------------------------------------
class SamplesManagerCli(SamplesManager):
def __init__(self,*args,**kwargs):
commands = [ "",
"import imports datasets from DBS to catalog",
"eosimport imports datasets from EOS",
"list lists datasets in catalog",
"review review catalog to remove datasets",
"check check files in datasets for errors and mark bad files"
]
parser = OptionParser(
usage="""%%prog [options] <command> [[command2] [command3] ..]
Command line utility to handle FLASHgg samples catalog.
Commands:
%s
""" % "\n ".join(commands),
option_list=[
make_option("-V","--flashggVersion",
action="store", dest="flashggVersion", type="string",
default="*",
help="FLASHgg version to use (only relevant when importing). default: %default",
),
make_option("-C","--campaign",
dest="campaign",action="store",type="string",
default="",
help="production campaign. default: %default",
),
make_option("-d","--dbs-instance",
dest="dbs_instance",action="store",type="string",
default="prod/phys03",
help="DBS instance to use. default: %default",
),
make_option("-m","--metaDataSrc",
dest="metaDataSrc",action="store",type="string",
default="flashgg",
help="MetaData package to use. default: %default",
),
make_option("--load", # special option to load whole configuaration from JSON
action="callback",callback=Load(),dest="__opt__",
type="string",
help="load JSON file with configuration",metavar="CONFIG.json"
),
make_option("-q","--queue",
dest="queue",action="store",type="string",
default=None,
help="Run jobs in batch using specified queue. default: %default",
),
make_option("-v","--verbose",
action="store_true", dest="verbose",
default=False,
help="default: %default",)
]
)
# parse the command line
(self.options, self.args) = parser.parse_args()
def __call__(self):
(options,args) = (self.options,self.args)
self.mn = SamplesManager("$CMSSW_BASE/src/%s/MetaData/data/%s/datasets.json" % (options.metaDataSrc,options.campaign),
dbs_instance=options.dbs_instance)
## pprint( mn.cross_sections_ )
if len(args) == 0:
args = ["list"]
method = getattr(self,"run_%s" % args[0],None)
if not method:
sys.exit("Unkown command %s" % a)
if len(args)>1:
method(*args[1:])
else:
method()
def run_import(self,query=None):
if query:
self.mn.importFromDAS([query])
else:
self.mn.importFromDAS("/*/*%s-%s*/USER" % (self.options.campaign,self.options.flashggVersion) )
def run_eosimport(self,*args):
self.mn.importFromEOS(args)
def run_check(self):
self.mn.checkAllDatasets()
def run_list(self):
print
print "Datasets in catalog:"
datasets,catalog = self.mn.getAllDatasets()
## datasets = [ d.rsplit("/",1)[0] for d in datasets ]
largest = max( [len(d) for d in datasets] )
for d in datasets:
nevents = 0.
weights = 0.
nfiles = len(catalog[d]["files"])
for fil in catalog[d]["files"]:
nevents += fil.get("nevents",0.)
weights += fil.get("weights",0.)
print d.ljust(largest), ("%d" % int(nevents)).rjust(8), ("%d" % nfiles).rjust(3),
if weights != 0.: print ("%1.2g" % ( weights/nevents ) )
else: print
def run_clear(self):
self.mn.clearCatalog()
def run_review(self):
self.mn.reviewCatalog()
| [
"pasquale.musella@cern.ch"
] | pasquale.musella@cern.ch |
9bbc4c87238f15869cb43208feda58e15b29152a | 15bfc2b3ba52420d95ed769a332aaa52f402bbd2 | /api/v2010/machine_to_machine/read-default/read-default.6.x.py | 1533bdd640a1fb7f14859f937a8dff30ef979f23 | [] | no_license | synackme/sample-code | 013b8f0a6a33bfd327133b09835ee88940d3b1f2 | 5b7981442f63df7cf2d17733b455270cd3fabf78 | refs/heads/master | 2020-03-17T04:53:07.337506 | 2018-05-07T16:47:48 | 2018-05-07T16:47:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = '"ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
machine_to_machine = client.available_phone_numbers("US").machine_to_machine \
.list()
for record in machine_to_machine:
print(record.friendly_name)
| [
"jose.oliveros.1983@gmail.com"
] | jose.oliveros.1983@gmail.com |
93d56532e0faa529e1b82d52cf1c017f43ef8373 | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/test/test_workflow_argument.py | 5c2a2e694a0af944ba4e5004cce281d75ef38efc | [] | no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ICA_SDK
from ICA_SDK.models.workflow_argument import WorkflowArgument # noqa: E501
from ICA_SDK.rest import ApiException
class TestWorkflowArgument(unittest.TestCase):
"""WorkflowArgument unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test WorkflowArgument
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ICA_SDK.models.workflow_argument.WorkflowArgument() # noqa: E501
if include_optional :
return WorkflowArgument(
name = '0',
value = '0',
json = None,
options = '0'
)
else :
return WorkflowArgument(
name = '0',
)
def testWorkflowArgument(self):
"""Test WorkflowArgument"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"siajunren@gmail.com"
] | siajunren@gmail.com |
9f2149acf5b8359da35b2d3288a10d45048d9ecd | 06b5d50d92af07dc7c2d9cc24922a2a906f7b88c | /church/main/migrations/0013_auto_20190722_2132.py | 70d6e4e83fbf8eb6f3b68d3a67eb09a0f8d5f5d4 | [] | no_license | ShehanHD/Django | 4fe6d841e38450b028765cc84bbe7b99e65b9387 | bc855c16acad5d8f1f5a24dc68438749704935fd | refs/heads/master | 2021-05-20T12:57:36.980701 | 2020-04-25T13:03:43 | 2020-04-25T13:03:43 | 252,305,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # Generated by Django 2.2.3 on 2019-07-22 19:32
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20190722_2131'),
]
operations = [
migrations.AlterField(
model_name='services',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 7, 22, 21, 32, 55, 180001), verbose_name='date'),
),
migrations.AlterField(
model_name='services',
name='img',
field=models.ImageField(blank=True, upload_to='pics/services'),
),
migrations.AlterField(
model_name='team',
name='img',
field=models.ImageField(blank=True, upload_to='pics/team'),
),
]
| [
"51677228+ShehanHD@users.noreply.github.com"
] | 51677228+ShehanHD@users.noreply.github.com |
bdc06f47018170b03fee10a372fb6e96f09cad56 | 9f2b07eb0e9467e17448de413162a14f8207e5d0 | /pylith/bc/DirichletTimeDependent.py | 767b2424dec16a77a004dc1158d2c1f6de9772b2 | [
"MIT"
] | permissive | fjiaqi/pylith | 2aa3f7fdbd18f1205a5023f8c6c4182ff533c195 | 67bfe2e75e0a20bb55c93eb98bef7a9b3694523a | refs/heads/main | 2023-09-04T19:24:51.783273 | 2021-10-19T17:01:41 | 2021-10-19T17:01:41 | 373,739,198 | 0 | 0 | MIT | 2021-06-04T06:12:08 | 2021-06-04T06:12:07 | null | UTF-8 | Python | false | false | 5,081 | py | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/bc/DirichletTimeDependent.py
#
# @brief Python object for managing a time-dependent Dirichlet (prescribed
# values) boundary condition.
#
# Factory: boundary_condition
from .BoundaryCondition import BoundaryCondition
from .bc import DirichletTimeDependent as ModuleDirichletTimeDependent
from pylith.utils.NullComponent import NullComponent
class DirichletTimeDependent(BoundaryCondition, ModuleDirichletTimeDependent):
"""Python object for managing a time-dependent Dirichlet (prescribed values)
boundary condition.
Factory: boundary_condition
"""
import pythia.pyre.inventory
constrainedDOF = pythia.pyre.inventory.array(
"constrained_dof", converter=int, default=[])
constrainedDOF.meta[
'tip'] = "Array of constrained degrees of freedom (0=1st DOF, 1=2nd DOF, etc)."
useInitial = pythia.pyre.inventory.bool("use_initial", default=True)
useInitial.meta['tip'] = "Use initial term in time-dependent expression."
useRate = pythia.pyre.inventory.bool("use_rate", default=False)
useRate.meta['tip'] = "Use rate term in time-dependent expression."
useTimeHistory = pythia.pyre.inventory.bool(
"use_time_history", default=False)
useTimeHistory.meta['tip'] = "Use time history term in time-dependent expression."
dbTimeHistory = pythia.pyre.inventory.facility(
"time_history", factory=NullComponent, family="temporal_database")
dbTimeHistory.meta['tip'] = "Time history with normalized amplitude as a function of time."
def __init__(self, name="dirichlettimedependent"):
"""Constructor.
"""
BoundaryCondition.__init__(self, name)
return
def _defaults(self):
from .AuxSubfieldsTimeDependent import AuxSubfieldsTimeDependent
self.auxiliarySubfields = AuxSubfieldsTimeDependent(
"auxiliary_subfields")
def preinitialize(self, problem):
"""Do pre-initialization setup.
"""
import numpy
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if 0 == comm.rank:
self._info.log(
"Performing minimal initialization of time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
BoundaryCondition.preinitialize(self, problem)
ModuleDirichletTimeDependent.setConstrainedDOF(
self, numpy.array(self.constrainedDOF, dtype=numpy.int32))
ModuleDirichletTimeDependent.useInitial(self, self.useInitial)
ModuleDirichletTimeDependent.useRate(self, self.useRate)
ModuleDirichletTimeDependent.useTimeHistory(self, self.useTimeHistory)
if not isinstance(self.dbTimeHistory, NullComponent):
ModuleDirichletTimeDependent.setTimeHistoryDB(
self, self.dbTimeHistory)
return
def verifyConfiguration(self):
"""Verify compatibility of configuration.
"""
BoundaryCondition.verifyConfiguration(self, self.mesh())
spaceDim = self.mesh().coordsys().getSpaceDim()
for d in self.bcDOF:
if d < 0 or d >= spaceDim:
raise ValueError("Attempting to constrain DOF (%d) that doesn't exist for time-dependent Dirichlet boundary condition '%s'. Space dimension is %d." %
(d, self.aliases[-1], spaceDim))
return
def _configure(self):
"""Setup members using inventory.
"""
if 0 == len(self.constrainedDOF):
raise ValueError("'constrained_dof' must be a zero based integer array of indices corresponding to the "
"constrained degrees of freedom.")
if self.inventory.useTimeHistory and isinstance(self.inventory.dbTimeHistory, NullComponent):
raise ValueError(
"Missing time history database for time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
if not self.inventory.useTimeHistory and not isinstance(self.inventory.dbTimeHistory, NullComponent):
self._warning.log(
"Ignoring time history database setting for time-dependent Dirichlet boundary condition '%s'." % self.aliases[-1])
BoundaryCondition._configure(self)
return
def _createModuleObj(self):
"""Create handle to corresponding C++ object.
"""
ModuleDirichletTimeDependent.__init__(self)
return
# Factories
def boundary_condition():
"""Factory associated with DirichletTimeDependent.
"""
return DirichletTimeDependent()
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
92c43f49b8885961160c4f1e4bb9c1ddb79dc162 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /examples/research_projects/luke/luke_utils.py | aec4133f21b36eee313a5c6371ff48537ccf613c | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 5,106 | py | import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def padding_tensor(sequences, padding_value, padding_side, sequence_length):
if isinstance(padding_value, tuple):
out_tensor = np.full((len(sequences), sequence_length, 2), padding_value)
else:
out_tensor = np.full((len(sequences), sequence_length), padding_value)
for i, tensor in enumerate(sequences):
if padding_side == "right":
if isinstance(padding_value, tuple):
out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length]
else:
out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length]
else:
if isinstance(padding_value, tuple):
out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length]
else:
out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length]
return out_tensor.tolist()
def is_punctuation(char):
cp = ord(char)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
@dataclass
class DataCollatorForLukeTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["entity_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch[label_name] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
ner_tags = [feature["ner_tags"] for feature in features]
batch["ner_tags"] = padding_tensor(ner_tags, -1, padding_side, sequence_length)
original_entity_spans = [feature["original_entity_spans"] for feature in features]
batch["original_entity_spans"] = padding_tensor(original_entity_spans, (-1, -1), padding_side, sequence_length)
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
214892b437bd6d59b56ffbcabe40e76edd0ab6b3 | 2b0f4f3590f5407da83d179db8103803f7c75e8f | /app/views/HomeView.py | b7fcbc6df3e89b79e9e2829c11df2860ae9ee937 | [] | no_license | caiomarinhodev/ciacimento | 6c783f169ac912ed599bcfaa6a208d5be5c7942e | cf7a6951196bc36655fe0b303e3131932ec254cf | refs/heads/master | 2023-07-07T00:25:16.101307 | 2023-02-28T00:46:12 | 2023-02-28T00:46:12 | 117,120,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from app.mixins.CustomContextMixin import CustomContextMixin
"""HomeView.py: Especifica a pagina inicial da aplicacao."""
__author__ = "Caio Marinho"
__copyright__ = "Copyright 2017"
class HomeView(TemplateView, CustomContextMixin):
template_name = 'site/index.html'
| [
"caiomarinho8@gmail.com"
] | caiomarinho8@gmail.com |
4cdd78b8851f5c509944a233f59931e35429a318 | 9c1fa66bc9fffc800890622d66c8cf50a3384c52 | /coresys/models/payment.py | 6b4819e7f1b141e857831886aff4d6ff9fa82547 | [] | no_license | fhydralisk/walibackend | 97a5f7ba0a02a36673ec57e1c42fd372afe42736 | dac474f3d418ac3711b1c51d00bd7d246d2bc1bd | refs/heads/master | 2020-03-15T15:26:45.625860 | 2018-09-04T12:23:52 | 2018-09-04T12:23:52 | 132,212,261 | 1 | 4 | null | 2018-11-18T15:00:53 | 2018-05-05T03:45:57 | Python | UTF-8 | Python | false | false | 568 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class CorePaymentMethod(models.Model):
ord = models.IntegerField(_("Order Number"))
deposit_scale = models.FloatField(validators=[MinValueValidator(0.0), MaxValueValidator(1.0)])
opmdesc = models.TextField(verbose_name=_("Description"), max_length=125)
in_use = models.BooleanField(default=True)
def __unicode__(self):
return self.opmdesc
| [
"fhy14@mails.tsinghua.edu.cn"
] | fhy14@mails.tsinghua.edu.cn |
36ad831f3772f2152d78a9892f032d218d3f976e | 50e3fcca6e2a9a73ed52d231a739f70c28ed108f | /Math/twoSum.py | 29d1fcf1fab8fb3d069b8fb3146aae35b69ed916 | [] | no_license | thomasyu929/Leetcode | efa99deaa2f6473325de516d280da6911c2cc4ab | 780271875c5b50177653fd7fe175d96dd10e84e2 | refs/heads/master | 2022-03-29T00:11:01.554523 | 2020-01-03T00:28:22 | 2020-01-03T00:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Solution:
# brute force
'''
def twoSum(self, nums, target):
for i in range(len(nums)):
for j in range(len(nums))[i+1:]: # not use the same element twice
if nums[i] + nums[j] == target:
return i,j
'''
# hash map
def twoSum(self, nums, target):
m = {}
for i, n in enumerate(nums):
x = target - n
if x in m:
return i, m[x]
else:
m[n] = i
if __name__ == "__main__":
cl = Solution()
nums = [2,7,11,15]
target = 9
print(cl.twoSum(nums, target)) | [
"yby4301955@gmail.com"
] | yby4301955@gmail.com |
c636180bd9979b8ec50d6a3e1f125465333ae932 | 8364e4d23191ee535c163debffafa8418d705843 | /aiokubernetes/models/v1_preconditions.py | a3878eb6ae0455ed5a39a95bb79bd48a6180ed09 | [
"Apache-2.0"
] | permissive | olitheolix/aiokubernetes | 2bb6499030e2e6e9b7ca0db63c4441293d70a09b | 266718b210dff2a9b2212183261ea89adf89115e | refs/heads/master | 2020-03-21T23:02:30.484410 | 2018-10-20T19:33:01 | 2018-10-22T05:52:42 | 139,162,905 | 28 | 3 | Apache-2.0 | 2018-10-22T05:52:51 | 2018-06-29T15:02:59 | Python | UTF-8 | Python | false | false | 2,978 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
class V1Preconditions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'uid': 'str'
}
attribute_map = {
'uid': 'uid'
}
def __init__(self, uid=None): # noqa: E501
"""V1Preconditions - a model defined in Swagger""" # noqa: E501
self._uid = None
self.discriminator = None
if uid is not None:
self.uid = uid
@property
def uid(self):
"""Gets the uid of this V1Preconditions. # noqa: E501
Specifies the target UID. # noqa: E501
:return: The uid of this V1Preconditions. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1Preconditions.
Specifies the target UID. # noqa: E501
:param uid: The uid of this V1Preconditions. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Preconditions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"olitheolix@gmail.com"
] | olitheolix@gmail.com |
345f793f94a52707da927018192182025510c0d0 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_ddos_custom_policies_operations.py | 29137d1ba254f43a54a7c02525a48e5f2cedcbb8 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 23,799 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations(object):
"""DdosCustomPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosCustomPolicy"
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "_models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosCustomPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "_models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosCustomPolicy"]
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosCustomPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosCustomPolicy"]
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
b1fbece0ddc4874190e66ca6b25eededa45727e2 | 6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6 | /venv/lib/python3.6/site-packages/pysnmp/carrier/asyncore/dgram/udp6.py | 900ef3c7d6986b1c921fbb7e443ee60ff27e6c6c | [
"MIT"
] | permissive | PitCoder/NetworkMonitor | b47d481323f26f89be120c27f614f2a17dc9c483 | 36420ae48d2b04d2cc3f13d60d82f179ae7454f3 | refs/heads/master | 2020-04-25T11:48:08.718862 | 2019-03-19T06:19:40 | 2019-03-19T06:19:40 | 172,757,390 | 2 | 0 | MIT | 2019-03-15T06:07:27 | 2019-02-26T17:26:06 | Python | UTF-8 | Python | false | false | 1,386 | py | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.carrier import sockfix
from pysnmp.carrier.base import AbstractTransportAddress
from pysnmp.carrier.asyncore.dgram.base import DgramSocketTransport
import socket
domainName = snmpUDP6Domain = (1, 3, 6, 1, 2, 1, 100, 1, 2)
class Udp6TransportAddress(tuple, AbstractTransportAddress):
pass
class Udp6SocketTransport(DgramSocketTransport):
sockFamily = socket.has_ipv6 and socket.AF_INET6 or None
addressType = Udp6TransportAddress
def normalizeAddress(self, transportAddress):
if '%' in transportAddress[0]: # strip zone ID
ta = self.addressType((transportAddress[0].split('%')[0],
transportAddress[1],
0, # flowinfo
0)) # scopeid
else:
ta = self.addressType((transportAddress[0],
transportAddress[1], 0, 0))
if (isinstance(transportAddress, self.addressType) and
transportAddress.getLocalAddress()):
return ta.setLocalAddress(transportAddress.getLocalAddress())
else:
return ta.setLocalAddress(self.getLocalAddress())
Udp6Transport = Udp6SocketTransport
| [
"overlord.lae@gmail.com"
] | overlord.lae@gmail.com |
7ce5cee9ad8c27af756fd514f892f81a4ef5cb27 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EBackend/FileCacheClass.py | 16d4a89c629531f79435bf2213bf53b2f5267bf0 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,627 | py | # encoding: utf-8
# module gi.repository.EBackend
# from /usr/lib64/girepository-1.0/EBackend-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.EDataServer as __gi_repository_EDataServer
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
class FileCacheClass(__gi.Struct):
"""
:Constructors:
::
FileCacheClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(FileCacheClass), '__module__': 'gi.repository.EBackend', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'FileCacheClass' objects>, '__weakref__': <attribute '__weakref__' of 'FileCacheClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f9dbf88e0e0>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(FileCacheClass)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
33b0303c5f17e64009b0c754863eb58633a2c980 | 9a0092226c40bc6c9c4eaadb670601234fadd739 | /grad-cam/scale/data/FF-PP/create_dataset.py | 7c40e2b741eb169fa115118c9dd62b5f6c1d4dfc | [] | no_license | ajioka-fumito/keras | 45005f214ae0b66fc6e88ca8f421ab9f44d52ec4 | 824691f4e243dd447ab91146a0e5336b416d0f83 | refs/heads/master | 2020-07-22T16:45:10.042977 | 2019-09-22T08:04:51 | 2019-09-22T08:04:51 | 204,160,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import glob
from PIL import Image
import random
import shutil
import os
paths = glob.glob("./F_crop/*")
random.shuffle(paths)
for i,path in enumerate(paths):
name = os.path.basename(path)
if 0<=i<=49:
shutil.copyfile(path,"./train/{}".format(name))
else:
shutil.copyfile(path,"./test/{}".format(name))
| [
"ajioka.ds@gmail.com"
] | ajioka.ds@gmail.com |
72e633a77883d9597e05ad3c85b9ee1045459b61 | ce32e0e1b9568c710a3168abc3c638d6f9f6c31b | /vnpy/app/algo_trading/ui/__init__.py | 9ac801bbd955dcc9e362b813186a5416c4b42944 | [
"MIT"
] | permissive | msincenselee/vnpy | 55ae76ca32cae47369a66bd2d6589c13d7a0bdd4 | 7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7 | refs/heads/vnpy2 | 2022-05-19T10:06:55.504408 | 2022-03-19T15:26:01 | 2022-03-19T15:26:01 | 38,525,806 | 359 | 158 | MIT | 2020-09-09T00:09:12 | 2015-07-04T07:27:46 | C++ | UTF-8 | Python | false | false | 32 | py | from .widget import AlgoManager
| [
"xiaoyou.chen@foxmail.com"
] | xiaoyou.chen@foxmail.com |
fb1cfdd12519ff10dc927a3ec165345521142654 | 8b86f7809b18de55fddd55800f932a20725132ea | /data_structures/binary_search_tree/bst.py | 0434af27e11cb927df539cbd99f26927e65710bd | [
"MIT"
] | permissive | vinozy/data-structures-and-algorithms | 75f0358167a2c6566a3a196aa9cafd33d2a95b16 | 0485b95f5aabc0ee255cd7e50b48a6ccec851e00 | refs/heads/master | 2022-02-17T14:21:06.412047 | 2019-08-17T04:23:09 | 2019-08-17T04:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | class Node:
"""create a Node"""
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def __repr__(self): # pragma: no cover
return 'Node Val: {}'.format(self.val)
def __str__(self): # pragma: no cover
return self.val
class BST:
"""create a binary search tree data structure"""
def __init__(self, iterable=[]):
self.root = None
if type(iterable) is not list:
raise TypeError
for item in iterable:
self.insert(item)
def __repr__(self): # pragma: no cover
return '<BST Root {}>'.format(self.root.val)
def __str__(self): # pragma: no cover
return self.root.val
def in_order(self, operation):
"""insert in order"""
def _walk(node=None):
if node is None:
return
if node.left is not None:
_walk(node.left)
operation(node)
if node.right is not None:
_walk(node.right)
_walk(self.root)
def pre_order(self, operation):
"""insert in pre-order"""
def _walk(node=None):
if node is None:
return
operation(node)
if node.left is not None:
_walk(node.left)
if node.right is not None:
_walk(node.right)
_walk(self.root)
def post_order(self, operation):
"""insert in post-order"""
def _walk(node=None):
if node is None:
return
if node.left is not None:
_walk(node.left)
if node.right is not None:
_walk(node.right)
operation(node)
_walk(self.root)
def insert(self, val):
"""insert node into BST"""
node = Node(val)
current = self.root
if self.root is None:
self.root = node
return node
while current:
if val >= current.val:
if current.right is not None:
current = current.right
else:
current.right = node
break
elif val < current.val:
if current.left is not None:
current = current.left
else:
current.left = node
break
return node
| [
"beverly.pham@gmail.com"
] | beverly.pham@gmail.com |
7e81544fbf791344dc1031d79843c5d25105605b | 75616acbd19956182868b9c84ecc5805394b6ead | /src/mcdp_user_db/userdb.py | 79d421dbc2edbdeeb5837c10da951a442c0e04fb | [] | no_license | fgolemo/mcdp | 16c245665d91af7f91bba8a24d4272001ce90746 | 46eb25ca85660f4d6c2f1f6d026f7e97c7977ac3 | refs/heads/master | 2021-01-21T22:14:52.599881 | 2017-09-01T18:00:55 | 2017-09-01T18:00:55 | 102,139,393 | 5 | 2 | null | 2017-09-01T17:58:36 | 2017-09-01T17:58:36 | null | UTF-8 | Python | false | false | 6,117 | py | from datetime import datetime
from contracts import contract
from mcdp.logs import logger
from .user import UserInfo
from mcdp_utils_misc.my_yaml import yaml_load
__all__ = ['UserDB']
class UserDB(object):
# def __init__(self, userdir):
# self.users = {}
# us = load_users(userdir)
# self.userdir = userdir
# self.users.update(us)
#
# if not MCDPConstants.USER_ANONYMOUS in self.users:
# msg = 'Need account for the anonymous user "%s".' % MCDPConstants.USER_ANONYMOUS
# raise_desc(ValueError, msg, found=self.users)
#
def __contains__(self, key):
return key in self.users
def match_by_id(self, provider, provider_id):
for u in self.users.values():
for w in u.info.authentication_ids:
if w.provider == provider and w.id == provider_id:
return u
return None
def best_match(self, username, name, email):
if username is not None:
if username in self.users:
return self.users[username]
for u in self.users.values():
user_info = u.info
if name is not None and user_info.get_name() == name:
return u
if email is not None and user_info.get_email() == email:
return u
return None
@contract(returns=UserInfo)
def __getitem__(self, key):
if key is None:
key = 'anonymous'
u = self.users[key].info
return u
def exists(self, login):
return login in self
@contract(returns=bool, login=str)
def authenticate(self, login, password):
user_info = self.users[login].info
for p in user_info.authentication_ids:
if p.provider == 'password':
pwd = p.password
match = password == pwd
if not match:
msg = 'Password %s does not match with stored %s.' % (password, pwd)
logger.warn(msg)
user_info.account_last_active = datetime.now()
return match
return False
@contract(returns=bytes, candidate_usernames='list(str)')
def find_available_user_name(self, candidate_usernames):
for x in candidate_usernames:
if x not in self.users:
return x
for i in range(2,10):
for x in candidate_usernames:
y = '%s%d' % (x, i)
if y not in self.users:
return y
raise ValueError(candidate_usernames)
def create_new_user(self, username, u):
if username in self.users:
msg = 'User "%s" already present.'
raise ValueError(msg)
self.users[username] = u
@contract(returns='isinstance(User)')
def get_unknown_user_struct(self, username):
unknown = """
info:
username: %s
website:
name: Unknown
subscriptions: []
account_last_active:
affiliation:
authentication_ids: []
groups: []
email:
account_created:
images: {}
""" % username
user_data = yaml_load(unknown)
from mcdp_hdb_mcdp.main_db_schema import DB
user = DB.view_manager.create_view_instance(DB.user, user_data)
user.set_root()
return user
# self.save_user(username, new_user=True)
#
# def save_user(self, username, new_user=False):
# userdir = os.path.join(self.userdir, username + '.' + MCDPConstants.user_extension)
# if not os.path.exists(userdir):
# if new_user:
# os.makedirs(userdir)
# else:
# msg = 'Could not find user dir %r.' % userdir
# raise ValueError(msg)
#
# filename = os.path.join(userdir, MCDPConstants.user_desc_file)
# if not os.path.exists(filename) and not new_user:
# msg = 'Could not find user filename %r.' % filename
# raise ValueError(msg)
# user = self.users[username]
# y = yaml_from_userinfo(user)
# s = yaml.dump(y)
# logger.info('Saving %r:\n%s' % (username, s))
# with open(filename, 'w') as f:
# f.write(s)
#
# # if user.picture is not None:
# # fn = os.path.join(userdir, MCDPConstants.user_image_file)
# # with open(fn, 'wb') as f:
# # f.write(user.picture)
# logger.debug('Saved user information here: %s' % userdir)
#
#
# def load_users(userdir):
# ''' Returns a dictionary of username -> User profile '''
# users = {}
#
# exists = os.path.exists(userdir)
# if not exists:
# msg = 'Directory %s does not exist' % userdir
# raise Exception(msg)
#
# assert exists
#
# l = locate_files(userdir,
# pattern='*.%s' % MCDPConstants.user_extension,
# followlinks=True,
# include_directories=True,
# include_files=False)
#
# for userd in l:
# username = os.path.splitext(os.path.basename(userd))[0]
# info = os.path.join(userd, MCDPConstants.user_desc_file)
# if not os.path.exists(info):
# msg = 'Info file %s does not exist.' % info
# raise Exception(msg)
# data = open(info).read()
# s = yaml.load(data)
#
# users[username] = userinfo_from_yaml(s, username)
#
# f = os.path.join(userd, MCDPConstants.user_image_file)
# if os.path.exists(f):
# users[username].picture = open(f, 'rb').read()
#
# if not users:
# msg = 'Could not load any user from %r' % userdir
# raise Exception(msg)
# else:
# logger.info('loaded users: %s.' % format_list(sorted(users)))
#
# return users
| [
"acensi@ethz.ch"
] | acensi@ethz.ch |
f742007d91e3d77ee008bb7e42b02145f4bbf4a6 | 79debba8bb967d38dd06ba9a241864d7559303f3 | /bose/python/elements_of_interview/fp_transform.py | 999233e3bcfa6f73fd7cea27d506ede6d1a555ee | [] | no_license | hghimanshu/CodeForces-problems | 0c03813cdcdabbff56e2f56e93757e3116b9a038 | 205e1f7e269df1f710e8cd1fd5e5b34066fd4796 | refs/heads/master | 2023-07-23T13:32:19.563965 | 2021-04-06T19:29:59 | 2021-04-06T19:29:59 | 228,555,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | def transform(init,mapping,combination,seq):
if not seq:
return init
else:
return combination(mapping(seq[0]),transform(init,mapping,combination,seq[1:]))
def product_with_transform(seq):
return transform(1,lambda x:x,lambda a,b:a*b,seq)
def sum_with_transform(seq):
return transform(0,lambda x:x,lambda a,b: a+b,seq)
#reduction function (foldr)
def foldr(func,init,seq):
if not seq:
return init
else:
return func(seq[0],foldr(func,init,seq[1:]))
def product_with_foldr(seq):
return foldr(lambda seqval,acc:seqval*acc,1,seq)
def sum_with_foldr(seq):
return foldr(lambda seqval,acc:seqval+acc,0,seq)
def reverse_with_foldr(seq):
return foldr(lambda seqval,acc:acc+[seqval],[],seq)
def foldl(func,init,seq):
if not seq:
return init
else:
return foldl(func,func(init,seq[0]),seq[1:])
def product_with_foldl(seq):
return foldl(lambda seqval,acc:seqval*acc,1,seq)
def digits2num_with_foldl(seq):
return foldl(lambda acc,seqval: acc*10 + seqval,0 ,seq)
if __name__ == "__main__":
print(product_with_transform([1,2,3,4]))
print(sum_with_transform([1,2,3,4]))
print(product_with_foldr([1,2,3,4]))
print(sum_with_foldr([1,2,3,4]))
print(reverse_with_foldr([1,2,3,4]))
print(product_with_foldl([1,2,3,4]))
print(digits2num_with_foldl([1,2,3,4])) | [
"abose550@gmail.com"
] | abose550@gmail.com |
d875413f77e7f819f2d679a8877403ac27383aab | ad5dd929e2a02e7cc545cf2bec37a319d009bab8 | /sentence_iter.py | fdbd7fee1216df2c5d184bc3937d4cab27bab635 | [] | no_license | liuwei881/fluency_python | 1dcadf3113ecd6cda6c2c9676fc4a5f0529fe098 | 2ae0d8959d57ed1094cf5df3d2d8ca0df1f8d201 | refs/heads/master | 2021-09-03T21:01:51.465228 | 2018-01-12T00:33:24 | 2018-01-12T00:33:24 | 109,910,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #coding=utf-8
import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self): # 1
return SentenceIterator(self.words) # 2
class SentenceIterator:
def __init__(self, words):
self.words = words # 3
self.index = 0 # 4
def __next__(self):
try:
word = self.words[self.index] # 5
except IndexError:
raise StopIteration() # 6
self.index += 1 # 7
return word # 8
def __iter__(self): # 9
return self | [
"liuwei@polex.com.cn"
] | liuwei@polex.com.cn |
21282651510d53f48e89791d8d48142d7040384b | 3c3484769274a741308eb13d91caf960eae0c5b4 | /src/logging/util.py | 870f62c780197a5ac54545869cb12b21e3693a54 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | shwina/still-magic | 7f7ad11a05346fc08f4331234740d69ad57ceefa | 1d651840497d66d44ff43528f6e1f38e698ce168 | refs/heads/master | 2020-06-09T06:10:41.367616 | 2019-04-16T20:00:41 | 2019-04-16T20:00:41 | 193,387,841 | 0 | 0 | NOASSERTION | 2019-06-23T19:32:46 | 2019-06-23T19:32:46 | null | UTF-8 | Python | false | false | 536 | py | import logging
MESSAGE_FORMAT = '%(asctime)s,%(name)s,%(levelname)s,%(message)s'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def create_logger(name, level, filename):
# Create logger.
logger = logging.getLogger(name)
logger.setLevel(level)
# Send messages to standard output.
handler = logging.FileHandler(filename)
# Define format.
formatter = logging.Formatter(MESSAGE_FORMAT, DATE_FORMAT)
# Stitch everything together.
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| [
"gvwilson@third-bit.com"
] | gvwilson@third-bit.com |
5ff9e070f2ea8bbc5d73c47120f0659f9bffb7fe | 11f54a9d392cdfc3b4cca689c0b5abdbf10625ff | /kangaroo.py | 0852875515d575bd15b1881f1a8ac881af163dc7 | [] | no_license | stheartsachu/Miscellinuous | aa0de96115bea73d49bed50f80e263f31cf9d9ad | 3063233669f7513166b2987e911d662a0fbad361 | refs/heads/master | 2021-01-05T03:51:15.168301 | 2020-02-16T10:13:42 | 2020-02-16T10:13:42 | 240,869,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | x1 = 0
v1 = 3
x2 = 4
v2 = 2
# def fun(f_kan,s_kan):
# if f_kan == s_kan:
# return True
# print(fun(f_kan,s_kan))
# while fun(f_kan,s_kan) == True:
#
# f_kan = f_kan+v1
# print(f_kan)
f_kan = x1
s_kan = x2
while(f_kan <= 10000 and s_kan <= 10000 ):
f_kan += v1
s_kan += v2
# try:
if f_kan == s_kan:
print("YES")
break
# finally:
# print("NO")
# break
else:
print("NO")
| [
"seartsachu@gmail.com"
] | seartsachu@gmail.com |
43ff798c9d35108f04af4aa813c9e27cd1f69c88 | af7df9d77a2545b54d8cd03e7f4633dce6125f4a | /ch07/gui7c.py | 6f34a331505440579910e5fe3bafa70c62f2964b | [] | no_license | socrates77-sh/PP4E | 71e6522ea2e7cfd0c68c1e06ceb4d0716cc0f0bd | c92e69aea50262bfd63e95467ae4baf7cdc2f22f | refs/heads/master | 2020-05-29T08:46:47.380002 | 2018-11-16T10:38:44 | 2018-11-16T10:38:44 | 69,466,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | import gui7
from tkinter import *
class HelloPackage(gui7.HelloPackage):
def __getattr__(self, name):
# pass off to a real widget
return getattr(self.top, name)
if __name__ == '__main__':
HelloPackage().mainloop() # invokes __getattr__
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
1a520fe98f7f8f7872637d486cf3c11c8cfdfd6e | a15200778946f6f181e23373525b02b65c44ce6e | /Algoritmi/2019-06-25/all-CMS-submissions/2019-06-25.10:50:45.629190.VR434403.biancaneve.py | d19254ac4d915d04f644a65152214d4681199a73 | [] | no_license | alberto-uni/portafoglioVoti_public | db518f4d4e750d25dcb61e41aa3f9ea69aaaf275 | 40c00ab74f641f83b23e06806bfa29c833badef9 | refs/heads/master | 2023-08-29T03:33:06.477640 | 2021-10-08T17:12:31 | 2021-10-08T17:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | """
* user: VR434403
* fname: CROSARA
* lname: MARCO
* task: biancaneve
* score: 12.0
* date: 2019-06-25 10:50:45.629190
"""
n, m = map(int, raw_input().split())
perm = map(int, raw_input().split())
for l in range(m):
row = map(int, raw_input().split())
#print row
if row[0] == 1:
perm[row[1]-1], perm[row[2]-1] = perm[row[2]-1], perm[row[1]-1]
else:
da_ = row[1]
a_ = row[2]
for i in range(n):
if perm[i] >= da_ and perm[i] <= a_:
result = "YES"
for j in range(i+1, i+a_-da_):
if (perm[j] < da_) or (perm[j] > a_):
result = "NO"
print result
break
| [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
6d40723471e9a7e9b314bf27d54b384edd18e847 | d98bfd59d27db330c970ed7dbf156e4e27be4cbc | /week10/Project1/Proj1/login/views.py | bd1b27a2a32fc9c7c33fa88fa0feaff1bbdf1c8f | [] | no_license | Aktoty00/webdev2019 | 3784324f090851ccf2cc5318f7297340a716ad7d | 44e4bb2905232da53053a334346340a905863d1e | refs/heads/master | 2020-04-18T21:57:19.434168 | 2019-04-29T22:07:51 | 2019-04-29T22:07:51 | 167,220,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.shortcuts import render
from django.http import HttpResponse
def signup(request):
return HttpResponse('<input type = "text" name = "emailOrNumber" class = "emailOrNumber" placeholder = "Моб. телефон или эл. адрес"><br>'
'<input type = "text" name = "name" placeholder= "Имя и фамилия">'
'<input type = "text" name = "surname" placeholder = "Имя пользователя"><br>'
'<input type = "password" name = "newPassword" class = "newPassword" placeholder = "Пароль"><br>'
'<button type="submit">Регистрация</button>')
| [
"aktoty.rysdaulet@gmail.com"
] | aktoty.rysdaulet@gmail.com |
4e124d9d992edb2065ee6bd68581458013863276 | a3530aef1481451641daff69570d5ecd4ef069cf | /models/account/year.py | b12d37c21bc8e1bcb377d9742dadf10466e323c5 | [] | no_license | Trilokan/nagini | 757e576aad2482c4f7cb68043e3bf481aa918a30 | d936a965c2f4ea547de24e25d1fbc42649fb4b43 | refs/heads/master | 2020-04-03T09:02:45.484397 | 2019-01-02T12:45:42 | 2019-01-02T12:45:42 | 155,152,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | # -*- coding: utf-8 -GPK*-
from odoo import models, fields, api
from calendar import monthrange
from datetime import datetime, timedelta
PROGRESS_INFO = [("draft", "Draft"), ("confirmed", "Confirmed")]
class Year(models.Model):
_name = "year.year"
_rec_name = "name"
name = fields.Char(string="Year", required=True)
financial_year = fields.Char(string="Financial Year", required=True)
period_detail = fields.One2many(comodel_name="period.period",
inverse_name="year_id",
string="Period",
readonly=True)
_sql_constraints = [('unique_year', 'unique (name)', 'Error! Year must be unique'),
('unique_financial_year', 'unique (financial_year)', 'Error! Financial Year must be unique')]
def generate_period(self, year, year_id):
for month in range(1, 13):
_, num_days = monthrange(year, month)
from_date = datetime(year, month, 1)
till_date = datetime(year, month, num_days)
data = {"from_date": from_date.strftime("%Y-%m-%d"),
"till_date": till_date.strftime("%Y-%m-%d"),
"year_id": year_id.id}
self.env["period.period"].create(data)
@api.model
def create(self, vals):
year_id = super(Year, self).create(vals)
year = int(vals["name"])
self.generate_period(year, year_id)
return year_id
| [
"rameshkumar@ajaxmediatech.com"
] | rameshkumar@ajaxmediatech.com |
e608e87eb6a45b67f54560ba8bfdf8aa39509c4f | b366806c99ac30e77789f80417978902e25628da | /boto3_exceptions/importexport.py | e76b14e4ca77bc73aaba4ac69ba3abc50a10a1c3 | [
"MIT"
] | permissive | siteshen/boto3_exceptions | 9027b38c238030859572afec7f96323171596eb7 | d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b | refs/heads/master | 2020-04-19T03:15:02.525468 | 2019-10-23T07:37:36 | 2019-10-23T07:37:36 | 167,928,540 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | import boto3
exceptions = boto3.client('importexport').exceptions
BucketPermissionException = exceptions.BucketPermissionException
CanceledJobIdException = exceptions.CanceledJobIdException
CreateJobQuotaExceededException = exceptions.CreateJobQuotaExceededException
ExpiredJobIdException = exceptions.ExpiredJobIdException
InvalidAccessKeyIdException = exceptions.InvalidAccessKeyIdException
InvalidAddressException = exceptions.InvalidAddressException
InvalidCustomsException = exceptions.InvalidCustomsException
InvalidFileSystemException = exceptions.InvalidFileSystemException
InvalidJobIdException = exceptions.InvalidJobIdException
InvalidManifestFieldException = exceptions.InvalidManifestFieldException
InvalidParameterException = exceptions.InvalidParameterException
InvalidVersionException = exceptions.InvalidVersionException
MalformedManifestException = exceptions.MalformedManifestException
MissingCustomsException = exceptions.MissingCustomsException
MissingManifestFieldException = exceptions.MissingManifestFieldException
MissingParameterException = exceptions.MissingParameterException
MultipleRegionsException = exceptions.MultipleRegionsException
NoSuchBucketException = exceptions.NoSuchBucketException
UnableToCancelJobIdException = exceptions.UnableToCancelJobIdException
UnableToUpdateJobIdException = exceptions.UnableToUpdateJobIdException
| [
"xiaojiang@actwill.com.cn"
] | xiaojiang@actwill.com.cn |
936b43bfd4885419120e1afe90faef8ebccc7a26 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/programmers/level3/외벽점검.py | e173eb88a500981e940d6aee7f4e16c5a99e1e29 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # dist가 최대 8이기 때문에 brute force를 통해 탐색이 가능하다.
from itertools import permutations
def solution(n, weak, dist):
weakLen = len(weak)
for i in range(weakLen):
weak.append(weak[i] + n)
# print(weak)
answer = len(dist) + 1
for i in range(weakLen):
start = [weak[j] for j in range(i, i + weakLen)]
candidates = permutations(dist, len(dist))
for candidate in candidates:
idx, count = 0, 1
check = start[0] + candidate[idx]
for j in range(weakLen):
if start[j] > check:
count += 1
if count > len(candidate):
break
idx += 1
check = candidate[idx] + start[j]
answer = min(answer, count)
if answer > len(dist):
return -1
return answer
print(solution(12, [1, 5, 6, 10], [1, 2, 3, 4]))
| [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
8bd1277a439d2db671284eb40192c4e5b069f8ff | 77ae39a4e38dc53ed50e3943a0049fc4c72af735 | /Leetcode/Triangle.py | 6e49a3f82c0624bd8e60a6792df0901614de84a8 | [
"MIT"
] | permissive | harrifeng/Python-Study | 41ab870a31213d414f08c5753d22e8463bb3f102 | d8158e33392a322830244594405cae7e9d7f6fb4 | refs/heads/master | 2021-01-18T10:48:23.215569 | 2016-02-04T02:06:22 | 2016-02-04T02:06:22 | 51,045,556 | 1 | 0 | null | 2016-02-04T02:05:39 | 2016-02-04T02:05:38 | null | UTF-8 | Python | false | false | 1,807 | py | """
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
"""
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
M = len(triangle)
N = len(triangle[-1])
dp = [ [ 0 for j in range(N)] for i in range(M)]
for i in range(M)[::-1]:
for j in range(len(triangle[i])):
if i == M-1:
dp[i][j] = triangle[i][j]
else:
dp[i][j] = min(dp[i+1][j], dp[i+1][j+1]) + triangle[i][j]
return dp[0][0]
# Notes:
# This is not the best solution. But easier to understand
# 1. status: ```dp[x][y]```表示从bottom走到top每个坐标的最短路径
# 2. function: dp[i][j] = min(dp[i+1][j], dp[i+1][j+1]) + triangle[i][j]
# 3. initialize: dp[-1][j] = triangle[-1][j]
# 4. answer: dp[0][0]
#This is older way, but still pretty good
def minimumTotal_2(self, triangle):
n = len(triangle) - 1
dp = triangle[n]
n -= 1
while n >= 0:
for i in range(n+1):
dp[i] = triangle[n][i] + min(dp[i], dp[i+1])
n -= 1
return dp[0]
# This look too simple
# Understand of this:
# 1. From bottom to top
# 2. transfer func: dp[i] = triangle[n][i] + min(dp[i], dp[i+1])
# top level dp[i] = current triangle value + min(bottom level reachable dps)
| [
"cyandterry@hotmail.com"
] | cyandterry@hotmail.com |
ad75b303a973c7c021baa80e550cb88a59ab7fa5 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mwGt38m3Q3KcsSaPY_0.py | 13a9949717c99e6d0eaa2eb24ebe169ca765f62e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py |
def increment_items(lst):
return [i+1 for i in lst]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
dd0916097167cb5d677ecb3fcf9c3a7a4546be4c | effce116340b7d937bd285e43b49e1ef83d56156 | /data_files/offlinearize.py | 642230032148b0163c1dd2ffe22831780bec7e19 | [] | no_license | DL2021Spring/CourseProject | a7c7ef57d69bc1b21e3303e737abb27bee3bd585 | 108cdd906e705e9d4d05640af32d34bfc8b124da | refs/heads/master | 2023-04-11T18:52:30.562103 | 2021-05-18T09:59:59 | 2021-05-18T09:59:59 | 365,733,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py |
import sys
from urlparse import urlparse, urljoin
from os.path import dirname, join as joinpath
from os import makedirs
from urllib import urlopen
from simplejson import loads
try:
base_url = sys.argv[1]
url = urlparse(base_url)
except:
print sys.argv[1]
print "Syntax: %s <url>" % sys.argv[0]
sys.exit(1)
this_dir = dirname(sys.argv[0])
datadir = joinpath(this_dir, '../offline_data')
coll_and_doc = url.fragment
coll = dirname(coll_and_doc)[1:]
def convert_coll(coll):
if coll == '':
ajax_coll = '/'
else:
ajax_coll = '/%s/' % coll
coll_query_url = urljoin(base_url, 'ajax.cgi?action=getCollectionInformation&collection=%s' % ajax_coll)
coll_dir = joinpath(datadir, coll)
try:
makedirs(coll_dir)
except:
pass
print ajax_coll
conn = urlopen(coll_query_url)
jsonp = conn.read()
conn.close
with open(joinpath(coll_dir, 'collection.js'), 'w') as f:
f.write("jsonp=")
f.write(jsonp)
coll_data = loads(jsonp)
for item in coll_data['items']:
if item[0] == 'd':
doc = item[2]
print " %s" % doc
doc_query_url = urljoin(base_url, 'ajax.cgi?action=getDocument&collection=%s&document=%s' % (ajax_coll, doc))
conn = urlopen(doc_query_url)
jsonp = conn.read()
conn.close
with open(joinpath(coll_dir, '%s.data.js' % doc), 'w') as f:
f.write("jsonp=")
f.write(jsonp)
elif item[0] == 'c' and item[2] != '..':
convert_coll(item[2])
convert_coll(coll)
| [
"1042448815@qq.com"
] | 1042448815@qq.com |
12841b3819ac2e828f695f322d21313affe8148c | f15d8305d1c97482b7a3391036742eaaaccc8238 | /TestTurtle/frctal_tree_2.0.py | a4b7f7d25a925a7ea9a19601fdb47308c521bb78 | [] | no_license | yuansuixin/learn_python__xiaoxiang | cdeec72a615c28de15334b6d61de87a4df4b25cd | 3b90cab535a052ed101ea6838cf86529cf570ec6 | refs/heads/master | 2021-04-03T06:15:41.841685 | 2018-03-11T10:18:43 | 2018-03-11T10:18:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py |
## 绘制分型树
import turtle
def draw_brach(brach_length):
if brach_length > 5:
# 绘制右侧的树枝
turtle.forward(brach_length)
print('向前',brach_length)
turtle.right(20)
print('右转20')
draw_brach(brach_length-15)
# 绘制左侧的树枝
turtle.left(40)
print('左转40')
draw_brach(brach_length-15)
# 返回之前的树枝上
turtle.right(20)
print('右转20')
turtle.backward(brach_length)
print('返回',brach_length)
def main():
turtle.left(90)
turtle.penup()
turtle.backward(150)
turtle.pendown()
turtle.color('red')
draw_brach(100)
turtle.exitonclick()
if __name__ == '__main__':
main()
| [
"cyss428@163.com"
] | cyss428@163.com |
32bd6af443d509ff9a722650c33ac8c9fda2b766 | 901b554d55e661e1f2af4493e0fd446b8dd31e3f | /20090126-sws7-sb/parseChaco.py | 791a5b6e2c53ed54414f4a192c11d24ae3fac415 | [] | no_license | dsoto/swData | dec5542f0fa2af9554e946b5cd3c248201042c36 | 19dc7d9cd3e23e282c80166c359c995548525e63 | refs/heads/master | 2020-12-24T17:54:57.486775 | 2010-06-16T15:07:02 | 2010-06-16T15:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,518 | py | #!/usr/bin/env python
from enthought.chaco.api import (OverlayPlotContainer,
VPlotContainer, Plot, ArrayPlotData)
from enthought.chaco.tools.api import (PanTool, LineInspector)
from enthought.traits.api import (HasTraits, Instance, Array,
Button, Str, Int, Float, Bool, Tuple)
from enthought.traits.ui.api import (View, Item, Handler, HGroup)
from enthought.traits.ui.menu import Action, OKButton
from enthought.enable.component_editor import ComponentEditor
import numpy
import glob
import sys
import os.path
sys.path.append("../roxanne")
import roxanne as rx
class customTool(LineInspector):
def __init__(self,*args,**kwargs):
super(customTool,self).__init__(*args,**kwargs)
self.plotBox = kwargs['plotBox']
def normal_mouse_move(self, event):
LineInspector.normal_mouse_move(self,event)
plot = self.component
plot.request_redraw()
cursorPosX = self.component.map_data([event.x,event.y])[0]
self.plotBox.cursorPosX = int(cursorPosX)
self.plotBox.cursorPosY = self.plotBox.normal[self.plotBox.cursorPosX]
def normal_left_down(self, event):
cursorPosX = self.component.map_data([event.x,event.y])[0]
self.plotBox.cursorPosX = int(cursorPosX)
self.plotBox.cursorPosY = self.plotBox.normal[self.plotBox.cursorPosX]
if self.plotBox.pointsClicked == 3:
self.plotBox.pointsClicked = 0
self.plotBox.pointX[self.plotBox.pointsClicked]=self.plotBox.cursorPosX
self.plotBox.pointY[self.plotBox.pointsClicked]=self.plotBox.cursorPosY
self.plotBox.pointX = self.plotBox.pointX
self.plotBox.pointY = self.plotBox.pointY
self.plotBox.plotdata.set_data('pointX',self.plotBox.pointX)
self.plotBox.plotdata.set_data('pointY',self.plotBox.pointY)
self.plotBox.pointsClicked += 1
def normal_left_up(self, event):
pass
class plotBoxHandler(Handler):
def close(self, info, is_ok):
if info.object.isAccepted == True:
return True
def closed(self, info, is_ok):
outString = (info.object.fileName + '\t' +
str(info.object.pointX[0]) + '\t' +
str(info.object.pointX[1]) + '\t' +
str(info.object.pointX[2]) + '\n')
info.object.fOut.write(outString)
def accept(self, info):
info.object.message = 'plot points accepted'
info.object.isAccepted = True
def reject(self, info):
info.object.message = 'plot points rejected, choose again'
info.object.pointX = numpy.array([0.0,100.0,200.0])
info.object.pointY = numpy.array([0.0,0.0,0.0])
info.object.plotdata.set_data('pointX',info.object.pointX)
info.object.plotdata.set_data('pointY',info.object.pointY)
info.object.isAccepted = False
info.object.pointsClicked = 0
def object_pointX_changed(self, info):
pass
class plotBox(HasTraits):
pointsClicked = Int
index = Array
normal = Array
shear = Array
pointX = Array(dtype = int, value = ([0.0,100.0,200.0]), comparison_mode = 0)
pointY = Array(dtype = float, value = ([0.0,0.0,0.0]), comparison_mode = 0)
message = Str
isAccepted = Bool
accept = Action(name = "Accept", action = "accept")
reject = Action(name = "Reject", action = "reject")
cursorPosX = Int
cursorPosY = Float
vPlot = Instance(VPlotContainer)
def __init__(self, fileName, fOut):
print 'init plotBox'
super(plotBox, self).__init__()
self.isAccepted = True
self.fOut = fOut
self.message = 'Analysis Acceptable?'
self.vPlot = VPlotContainer(padding = 10)
self.vPlot.stack_order = 'top_to_bottom'
topPlot = OverlayPlotContainer(padding = 10)
self.vPlot.add(topPlot)
bottomPlot = OverlayPlotContainer(padding = 10)
self.vPlot.add(bottomPlot)
# def parseFileName():
self.fileName = fileName
# get complete path of data file
fullFileName = os.path.abspath(fileName)
self.fileName = os.path.split(fullFileName)[1]
self.shortFileName = os.path.splitext(self.fileName)[1]
self.plotTitle = self.shortFileName
# def readData():
print fileName
fileIn = open(fileName,'r')
hD = rx.readDataFileHeader(fileIn)
print 'hD'
dD = rx.readDataFileArray(fileIn)
self.normal = numpy.array(map(float,dD['voltageForceLateral']))
self.shear = numpy.array(map(float,dD['voltageForceNormal']))
self.index = numpy.arange(len(self.normal))
# index dictionary
# iD = rx.parseForceTrace(hD,dD)
# self.pointX[0] = iD['indexContact']
# self.pointY[0] = self.normal[iD['indexContact']]
# self.pointX[1] = iD['indexMaxPreload']
# self.pointY[1] = self.normal[iD['indexMaxPreload']]
# self.pointX[2] = iD['indexMaxAdhesion']
# self.pointY[2] = self.normal[iD['indexMaxAdhesion']]
# def constructPlots():
self.plotdata = ArrayPlotData(index = self.index,
normal = self.normal,
shear = self.shear,
pointX = self.pointX,
pointY = self.pointY)
self.normalPlot = Plot(self.plotdata)
self.normalPlot.plot(('index','normal'), type = 'line',
color = 'blue')
self.normalPlot.plot(('pointX','pointY'), type = 'scatter',
marker = 'diamond',
marker_size = 5,
color = (0.0,0.0,1.0,0.5),
outline_color = 'none')
self.normalPlot.value_range.set_bounds(-1,1)
self.shearPlot = Plot(self.plotdata)
self.shearPlot.plot(('index','shear'),type='line',color='green')
self.normalPlot.overlays.append(customTool(plotBox = self,
component = self.normalPlot,
axis = 'index_x',
inspect_mode = 'indexed',
write_metadata = True,
color = 'black',
is_listener = False))
self.normalPlot.tools.append(rx.SimpleZoom(self.normalPlot))
self.normalPlot.tools.append(PanTool(self.normalPlot,drag_button = 'right'))
self.normalPlot.title = 'Normal Force Trace'
self.shearPlot.title = 'Shear Force Trace'
topPlot.add(self.shearPlot)
bottomPlot.add(self.normalPlot)
self.shearPlot.index_range = self.normalPlot.index_range
traits_view = View(Item('vPlot',
editor = ComponentEditor(),
resizable = True,
show_label = False),
HGroup(Item('message', width = 200),
Item('cursorPosX', width = 200),
Item('cursorPosY', width = 200),
Item('pointX', style='readonly', width = 200),
Item('pointY', style='readonly', width = 200)),
buttons = [accept, reject, OKButton],
title = 'Roxanne Parse Application',
handler = plotBoxHandler(),
resizable = True,
width = 1400, height = 800,
x = 20, y = 40)
def main():
fileNameList = glob.glob('./data/*sws*.data')
fOut = open('parsed.data','w')
outputList = ['dataFileName',
'indexContact',
'indexMaxPreload',
'indexMaxAdhesion\n']
sep = '\t'
headerString = sep.join(outputList)
fOut.write(headerString)
for fileName in fileNameList:
myPlotBox = plotBox(fileName,fOut)
myPlotBox.configure_traits()
if __name__=='__main__':
main() | [
"danielrsoto@gmail.com"
] | danielrsoto@gmail.com |
97f71acb578518c20028682674b1d9b0be86ddb1 | 29416ed280fff073ea325ed7dc0d573cb7145d47 | /stagesepx/classifier/svm.py | d2f9d17c112c863bfcd77074772925fcfedf4f63 | [
"MIT"
] | permissive | lshvisual/stagesepx | 64507f3cf2db65c3cfe7129cecb357237f3a8db7 | 98aebb4195e87b631c736878fbcef210f19b22cc | refs/heads/master | 2020-07-16T14:57:48.688554 | 2019-09-02T04:18:20 | 2019-09-02T04:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,449 | py | from loguru import logger
import cv2
import os
import pickle
import typing
import numpy as np
from sklearn.svm import LinearSVC
from stagesepx.classifier.base import BaseClassifier
from stagesepx import toolbox
class SVMClassifier(BaseClassifier):
FEATURE_DICT = {
'hog': toolbox.turn_hog_desc,
'lbp': toolbox.turn_lbp_desc,
# do not use feature transform
'raw': lambda x: x,
}
def __init__(self,
feature_type: str = None,
*args, **kwargs):
"""
init classifier
:param feature_type:
before training, classifier will convert pictures into feature, for better classification.
eg: 'hog', 'lbp' or 'raw'
"""
super().__init__(*args, **kwargs)
if not feature_type:
feature_type = 'hog'
if feature_type not in self.FEATURE_DICT:
raise AttributeError(f'no feature func named {feature_type}')
self.feature_func = self.FEATURE_DICT[feature_type]
self._model = None
logger.debug(f'feature function: {feature_type}')
def clean_model(self):
self._model = None
def save_model(self, model_path: str, overwrite: bool = None):
"""
save trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f'save model to {model_path}')
# assert model file
if os.path.isfile(model_path) and not overwrite:
raise FileExistsError(f'model file {model_path} already existed, you can set `overwrite` True to cover it')
# assert model data is not empty
assert self._model, 'model is empty'
with open(model_path, 'wb') as f:
pickle.dump(self._model, f)
def load_model(self, model_path: str, overwrite: bool = None):
"""
load trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f'load model from {model_path}')
# assert model file
assert os.path.isfile(model_path), f'model file {model_path} not existed'
# assert model data is empty
if self._model and not overwrite:
raise RuntimeError(f'model is not empty, you can set `overwrite` True to cover it')
# joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead
with open(model_path, 'rb') as f:
self._model = pickle.load(f)
def read_from_list(self, data: typing.List[int], video_cap: cv2.VideoCapture = None, *_, **__):
raise NotImplementedError('svm classifier only support loading data from files')
def train(self):
"""
train your classifier with data. must be called before prediction
:return:
"""
if not self._model:
logger.debug('no model can be used. build a new one.')
self._model = LinearSVC()
else:
logger.debug('already have a trained model. train on this model.')
train_data = list()
train_label = list()
for each_label, each_label_pic_list in self.read():
for each_pic_object in each_label_pic_list:
logger.debug(f'training label: {each_label}')
# apply hook
each_pic_object = self._apply_hook(-1, each_pic_object)
each_pic_object = self.feature_func(each_pic_object).flatten()
train_data.append(each_pic_object)
train_label.append(each_label)
logger.debug('data ready')
self._model.fit(train_data, train_label)
logger.debug('train finished')
def predict(self, pic_path: str) -> str:
"""
predict a single picture
:param pic_path:
:return:
"""
pic_object = cv2.imread(pic_path)
return self.predict_with_object(pic_object)
def predict_with_object(self, pic_object: np.ndarray) -> str:
"""
predict a single object
:param pic_object:
:return:
"""
pic_object = self.feature_func(pic_object)
pic_object = pic_object.reshape(1, -1)
return self._model.predict(pic_object)[0]
def _classify_frame(self,
frame_id: int,
frame: np.ndarray,
*_, **__) -> str:
return self.predict_with_object(frame)
| [
"178894043@qq.com"
] | 178894043@qq.com |
48f895d6d0a9b8dfa8df8e2750cdb4c53e168f84 | 2a2ce1246252ef6f59e84dfea3888c5a98503eb8 | /examples/tutorials/09_flip.py | 9c8409019a6c692430f2e24ac884f78b28cd478d | [
"BSD-3-Clause"
] | permissive | royqh1979/PyEasyGraphics | c7f57c1fb5a829287e9c462418998dcc0463a772 | 842121e461be3273f845866cf1aa40c312112af3 | refs/heads/master | 2021-06-11T10:34:03.001842 | 2021-04-04T10:47:52 | 2021-04-04T10:47:52 | 161,438,503 | 8 | 4 | BSD-3-Clause | 2021-04-04T10:47:53 | 2018-12-12T05:43:31 | Python | UTF-8 | Python | false | false | 376 | py | """
Draw a bus without transformations
"""
from easygraphics import *
import draw_bus
def main():
init_graph(500, 300)
draw_bus.draw_bus()
set_color("gray")
set_line_style(LineStyle.DASH_LINE)
line(0, 300, 500, 0)
set_line_style(LineStyle.SOLID_LINE)
reflect(0, 300, 500, 0)
draw_bus.draw_bus()
pause()
close_graph()
easy_run(main) | [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
e687132d147ed7ba43628a5af04b87969ca6ed6a | ed114d6111f9f75bdf74de9687f1ec6145ddcae2 | /25_file_IO.py | c1e548d18301d51ec1b2960066b36dee4f3a4281 | [] | no_license | tayyabmalik4/Python-With-Tayyab | 495b6074c7a75bcb5eced34bdec3053b2103c78c | 5f566e4036bfe17f2a7879a7a7a5d70f259ec6d0 | refs/heads/main | 2023-06-29T01:25:57.827350 | 2021-08-02T15:27:52 | 2021-08-02T15:27:52 | 387,209,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # File IO Basics
"""
"r" => open file for reading----this is a default
"w" => open a file for writing
"x" => Creates a file if not exits
"a" => add more content to a file
"t" => text mode -----this is a defaut
"b" => binary mode
"r+" => read and write
"""
| [
"mtayyabmalik99@gmail.com"
] | mtayyabmalik99@gmail.com |
e90a761365c11c6ac9cab953da8b10fbd1b4b195 | 8a51e947e11d37fc2937a81cc02e9901e06b4291 | /envfh/bin/chardetect | 16d09ac1b0de412a80954ca6956a7e69b839da7d | [] | no_license | Deepakchawla/flask_mysql_heroku | 107fa4ad1074901cd5cdd4a762e8bc960b7b90ee | 5ff5186291a461f9eaedd6a009f4a5e1a08f3a62 | refs/heads/main | 2022-12-26T01:34:41.957981 | 2020-10-05T15:41:57 | 2020-10-05T15:41:57 | 301,443,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/home/deepakchawala/PycharmProjects/flask_heroku/envfh/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"deepakchawla35@gmail.com"
] | deepakchawla35@gmail.com | |
9bcb3781b5e49ae5f845098c5dd06f91187d1435 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-elb/setup.py | 78a2480f61627fbccd1b215889f9405423ab312f | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkelb"
VERSION = "3.0.52"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "hwcloudsdk@huawei.com"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "ELB"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
OPTIONS = {
'bdist_wheel': {
'universal': True
}
}
setup(
name=NAME,
version=VERSION,
options=OPTIONS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "ELB"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development'
]
)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
8fc4b9be4d5be7229044d9a004d39710886fc693 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit58.py | be45e1e64e6e2f380c154fe1c6637b6004f64340 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,324 | py | # qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.cx(input_qubit[2],input_qubit[1]) # number=6
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit58.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
63feb400bba4f58c6678073156ad61cd583474db | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/defusedxml-0.5.0/other/python_genshi.py | 183d1fec94405e9cea10ac4388123eb295e70391 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 172 | py | #!/usr/bin/python
import sys
from pprint import pprint
from genshi.input import XMLParser
with open(sys.argv[1]) as f:
parser = XMLParser(f)
pprint(list(parser))
| [
"yingchen@cloudera.com"
] | yingchen@cloudera.com |
267f6e8d8777be378029593fbbaa43b7117edfb9 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/990.verifying-an-alien-dictionary/990.verifying-an-alien-dictionary.py | 14b6ba9f83d6ebf0cbaa4e0b377b09982bcb1a90 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
a0fb9bec08517382b82887234d38d017db71fc27 | 3b6cc22708e02e056cbd3175f63e08fc330836c6 | /server.py | 797b7d51a61de3a5e1a67b968e8a2e3e8ad3a160 | [] | no_license | janinemartinez/rpg-web-app | 4884a27f50bc06169d2f52d1ee99087801049bad | 01cba5999634194e02971bf57ed6258253b3f2ed | refs/heads/master | 2021-07-11T07:42:37.292396 | 2020-03-18T08:56:08 | 2020-03-18T08:56:08 | 239,904,803 | 0 | 0 | null | 2021-03-20T02:54:06 | 2020-02-12T01:51:29 | HTML | UTF-8 | Python | false | false | 28,331 | py | """Movie Ratings."""
from jinja2 import StrictUndefined
import json
from flask import (Flask, render_template, redirect, request, flash, session, jsonify)
from flask_debugtoolbar import DebugToolbarExtension
from model import (
User,
Character,
Attribute,
Skill,
Template,
Spell,
Char_spell,
Char_species,
Char_skill,
Class_spell,
connect_to_db,
db,
)
import random
# import helper
app = Flask(__name__)
app.secret_key = "tiger&luna"
app.jinja_env.undefined = StrictUndefined
def add_attribute_inc(attrib1, attrib2, stre, dex, con, inte, wis, cha, attribid):
bonuses = [attrib1, attrib2]
attributes = Attribute.query.filter_by(attributes_id=attribid).first()
for i in bonuses:
if attrib1 == "strength":
stre += 1
elif attrib1 == "dexterity":
dex += 1
elif attrib1 == "constitution":
con += 1
elif attrib1 == "intelligence":
inte += 1
elif attrib1 == "wisdom":
wis += 1
else:
cha += 1
attributes.strength = stre
attributes.dexterity = dex
attributes.constitution = con
attributes.wisdom = wis
attributes.intelligence=inte
attributes.charisma=cha
db.session.add(attributes)
db.session.commit()
def convert(lst):
return eval(lst)
def retrieve_char_spells(char_id):
char_spell_obj = db.session.query(Char_spell.char_id, Char_spell.spell_id)
char_spell_obj = set(char_spell_obj)
spells_array = []
for i in char_spell_obj:
if i[0] == char_id:
spells_array.append(i[1])
return spells_array
def retrieve_spells(spells_array):
spells=[]
for spell_id in spells_array:
spell = Spell.query.filter_by(spell_id=spell_id).all()
for i in spell:
spell_desc, spell_name = i.spell_desc, i.spell_name
spells.append((spell_desc, spell_name))
return spells
def char_query(charid):
return Character.query.filter_by(char_id=charid).first()
def template_query(tempid):
return Template.query.filter_by(template_id=tempid).first()
def retrieve_user_characters(charlst, user):
user_chars=[]
for i in charlst:
if i[2] == user:
user_chars.append(i)
# returns char attributes in this order: char_id, char_name, user_id, template_id, spec_id, experience_points,
# character_level, attributes_id
return user_chars
def append_user_characters(chrlst):
user_chars = []
for i in chrlst:
j=list(i)
template=Template.query.filter_by(template_id=i[3]).first()
j.append(template.template_name)
species=Char_species.query.filter_by(spec_id=i[4]).first()
j.append(species.spec_type)
user_chars.append(j)
# returns char attributes in this order: char_id, char_name, user_id, template_id, spec_id, experience_points,
# character_level, attributes_id, template_name, spec_name
return user_chars
def retrieve_char_skills(char_id):
char_skill_obj = db.session.query(Char_skill.char_id, Char_skill.skill_id)
char_skill_obj = set(char_skill_obj)
skills_array = []
for i in char_skill_obj:
if i[0] == char_id:
skills_array.append(i[1])
return skills_array
def retrieve_skills(skills_array):
skills=[]
for skill_id in skills_array:
skill = Skill.query.filter_by(skill_id=skill_id).all()
for i in skill:
skill_desc, skill_name = i.skill_desc, i.skill_name
skills.append((skill_desc, skill_name))
return skills
def retrieve_race(spec_id):
race = Char_species.query.filter_by(spec_id=spec_id).first()
spec_type = race.spec_type
speed = race.speed
return [spec_type, speed]
def retrieve_attributes(attributes_id):
attributes = Attribute.query.filter_by(attributes_id=attributes_id).first()
strength = attributes.strength
dexterity = attributes.dexterity
constitution = attributes.constitution
wisdom = attributes.wisdom
intelligence = attributes.intelligence
charisma = attributes.charisma
return [strength, dexterity, constitution, wisdom, intelligence, charisma]
def retrieve_character(char_id):
character = Character.query.filter_by(char_id=char_id).first()
name = character.char_name
char_align = character.char_align
flavor_txt = character.flavor_txt
hit_points = character.hit_points
age = character.age
experience_points = character.experience_points
character_level = character.character_level
spec_id = character.spec_id
return spec_id, character, [name, char_align, flavor_txt, hit_points, age, experience_points, character_level], character_level
def prof_bon(lvl):
prof_dict = {1:2, 2:2, 3:2, 4:2, 5:3, 6:3, 7:3, 8:3, 9:4, 10:4, 11:4, 12:4, 13:5, 14:5, 15:5, 16:5, 17:6, 18:6, 19:6, 20:6}
return prof_dict[lvl]
def level_up(xp, level):
lvl_lst = [0, 300, 900, 2700, 6500, 14000, 23000, 34000, 48000, 64000, 85000, 100000, 120000, 140000, 165000, 195000, 225000,
265000, 305000, 355000, 120000000]
if lvl_lst[level] <= xp:
return True
return False
def attribute_incr(level):
water_marks = [3, 7, 11, 15, 18]
return level in water_marks
def modifiers(attribute):
return attribute//2-5
def if_spells(dct):
for item in range(0, 21):
try:
if dct.get(f'{item}'):
return True
except AttributeError:
continue
return False
def how_many_spells(lst):
num_levels = 0
dif_levels = []
for i in lst:
if i[0] in dif_levels:
continue
else:
dif_levels.append(i[0])
num_levels += 1
return list(range(num_levels)), dif_levels
def growth_lst(dict):
iters=0
growth_list = []
for key in dict:
for rnge in range((dict[key])):
growth_list.append((iters, int(key)))
iters+=1
return growth_list
def compliment_list(last, num):
return list(range((last[-1]+1), num))
def commit_spell(spell_array, char):
for spell in spell_array:
if int(spell) < 1:
continue
else:
spell_obj=Char_spell(char_id=char, spell_id=spell)
db.session.add(spell_obj)
db.session.commit()
def commit_skill(skill_array, char):
for skill in skill_array:
if int(skill) < 1:
continue
else:
skill_obj=Char_skill(char_id=char, skill_id=skill)
db.session.add(skill_obj)
db.session.commit()
@app.route('/')
def index():
"""Homepage."""
return render_template("homepage.html")
@app.route('/register', methods=["GET"])
def register_form():
return render_template("register_form.html")
@app.route('/register', methods=["POST"])
def register_process():
email = request.form.get('email')
password = request.form.get('password')
password2 = request.form.get('password2')
if password2 != password:
flash('Passwords do not match.')
return redirect('/register')
if not User.query.filter_by(email=email).first():
user = User(email=email, password=password)
db.session.add(user)
db.session.commit()
flash('Registration successful')
return redirect('/')
else:
flash('Email already in use.')
return redirect('/register')
@app.route('/your_characters')
def find_characters():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
user_id = int(session.get('user_id'))
all_characters = db.session.query(Character.char_id, Character.char_name, Character.user_id,
Character.template_id, Character.spec_id, Character.experience_points, Character.character_level,
Character.attributes_id).all()
user_chars = retrieve_user_characters(all_characters, user_id)
user_chars_expand = append_user_characters(user_chars)
return render_template("your_characters.html", user_chars_expand=user_chars_expand)
@app.route('/upgrade_portal', methods=["POST"])
def xp_add():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
attributes_id = int(request.form.get('attributes_id'))
char_id = int(request.form.get('char_id'))
character_level = int(request.form.get('character_level'))
experience_points = int(request.form.get('character_level'))
num_xp = int(request.form.get('num_xp'))
template_id = int(request.form.get('template_id'))
user_id = int(request.form.get('user_id'))
user_chars_expand = request.form.getlist('user_chars_expand')
char = char_query(char_id)
char.experience_points += num_xp
db.session.add(char)
db.session.commit()
all_characters = db.session.query(Character.char_id, Character.char_name, Character.user_id,
Character.template_id, Character.spec_id, Character.experience_points, Character.character_level,
Character.attributes_id).all()
user_chars = retrieve_user_characters(all_characters, user_id)
user_chars_expand = append_user_characters(user_chars)
char = char_query(char_id)
character_level = char.character_level
upgrade = level_up(char.experience_points, char.character_level)
attribute_names = ['strength', 'dexterity', 'constitution', 'wisdom', 'intelligence', 'charisma']
if upgrade == False:
return render_template("your_characters.html", user_chars_expand=user_chars_expand)
else:
char.character_level += 1
db.session.add(char)
db.session.commit()
char = char_query(char_id)
return render_template("level_up.html", attributes=retrieve_attributes(attributes_id), character_level=char.character_level,
conmod=modifiers(retrieve_attributes(attributes_id)[2]), hit_points=char.hit_points,
hit_dice=template_query(template_id).hit_dice, attrib_plus=attribute_incr(character_level),
attribute_names=attribute_names, user_id=user_id, template_id=template_id, experience_points=char.experience_points,
spec_id=char.spec_id, flavor_txt=char.flavor_txt, char_align=char.char_align,
age=char.age, char_name=char.char_name, attributes_id=char.attributes_id, char_id=char_id)
@app.route('/character_start')
def start_making_character():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
temp_nfo = db.session.query(Template.template_id, Template.template_name).all()
spec_nfo = db.session.query(Char_species.spec_id, Char_species.spec_type).all()
return render_template("character_start.html", temp_nfo=temp_nfo, spec_nfo=spec_nfo)
@app.route('/character_start', methods=["POST"])
def first_form():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
char_name = request.form.get('char_name')
template_id = request.form.get('template_id')
spec_id = request.form.get('spec_id')
flavor_txt = request.form.get('flavor_txt')
age = request.form.get('age')
user_id = session.get('user_id')
char_align = request.form.get('char_align')
return render_template("builds_character.html", user_id=user_id, template_id=template_id, spec_id=spec_id,
char_name=char_name, flavor_txt=flavor_txt, age=age, char_align=char_align)
@app.route('/builds_character', methods=["POST"])
def add_attributes():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
age = int(request.form.get('age'))
char_align = request.form.get('char_align')
character_level = int(request.form.get('character_level'))
char_name = request.form.get('char_name')
charisma = int(request.form.get('charisma'))
constitution = int(request.form.get('constitution'))
dexterity = int(request.form.get('dexterity'))
experience_points = int(request.form.get('experience_points'))
flavor_txt = request.form.get('flavor_txt')
hit_points = request.form.get('hit_points')
intelligence = int(request.form.get('intelligence'))
spec_id = int(request.form.get('spec_id'))
strength = int(request.form.get('strength'))
template_id = int(request.form.get('template_id'))
user_id = int(request.form.get('user_id'))
wisdom = int(request.form.get('wisdom'))
hitdice = Template.query.filter_by(template_id=template_id).first()
hitdice = hitdice.hit_dice
mod = modifiers(constitution)
stre_mod = modifiers(strength)
dex_mod = modifiers(dexterity)
inte_mod = modifiers(intelligence)
wis_mod = modifiers(wisdom)
cha_mod = modifiers(charisma)
hit_points = hitdice + mod
return render_template('/dependencies.html', user_id=user_id, template_id=template_id, spec_id=spec_id,
char_name=char_name, flavor_txt=flavor_txt, age=age, strength=strength, char_align=char_align,
dexterity=dexterity, constitution=constitution, intelligence=intelligence, hit_points=hit_points,
wisdom=wisdom, charisma=charisma, experience_points=experience_points, character_level=character_level,
mod=mod, hitdice=hitdice, stre_mod=stre_mod, dex_mod=dex_mod, inte_mod=inte_mod, wis_mod=wis_mod,
cha_mod=cha_mod)
@app.route('/dependencies', methods=["POST"])
def commit_char_attr():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
age = int(request.form.get('age'))
attributes_id = request.form.get('attributes_id')
attr_inc_1 = request.form.get('attr_inc_1')
attr_inc_2 = request.form.get('attr_inc_2')
char_align = request.form.get('char_align')
char_id = request.form.get('char_id')
character_level = int(request.form.get('character_level'))
char_name = request.form.get('char_name')
charisma = int(request.form.get('charisma'))
constitution = int(request.form.get('constitution'))
dexterity = int(request.form.get('dexterity'))
experience_points = int(request.form.get('experience_points'))
flavor_txt = request.form.get('flavor_txt')
hit_points = int(request.form.get('hit_points'))
intelligence = int(request.form.get('intelligence'))
spec_id = int(request.form.get('spec_id'))
strength = int(request.form.get('strength'))
template_id = int(request.form.get('template_id'))
user_id = int(request.form.get('user_id'))
wisdom = int(request.form.get('wisdom'))
if character_level == 1:
#creates attribute object
attributes = Attribute(strength=strength, dexterity=dexterity, constitution=constitution, wisdom=wisdom, intelligence=intelligence, charisma=charisma)
#adds attribute object to the session and flushes
db.session.add(attributes)
db.session.flush()
#searches attribute in session to retrieve id
attrib = db.session.query(Attribute).order_by(Attribute.attributes_id.desc()).first()
attributes_id = attrib.attributes_id
#creates character object
character = Character(user_id=user_id, char_align=char_align, hit_points=hit_points, template_id=template_id,
spec_id=spec_id, char_name=char_name, flavor_txt=flavor_txt, age=age, experience_points=experience_points,
character_level=character_level, attributes_id=attributes_id)
#commits character object and attribute object to database
db.session.add(character)
db.session.commit()
else:
attributes_id = int(attributes_id)
char_id = int(char_id)
if attr_inc_1:
add_attribute_inc(attr_inc_1, attr_inc_2, strength, dexterity, constitution, intelligence,
wisdom, charisma, attributes_id)
character = char_query(char_id)
character.hit_points = hit_points
db.session.add(character)
db.session.commit()
#retrieves character id
char = db.session.query(Character).order_by(Character.char_id.desc()).first()
this_template = Template.query.filter_by(template_id=template_id).first()
#retrieves the jsons which shows the characters accumulation of spells and special abilities
growth_item = json.loads(this_template.growth_table)
growth_item = growth_item[f'{character_level}']
spells = if_spells(growth_item)
char_id = char.char_id
if "spells_known" in growth_item:
del growth_item["spells_known"]
if "bonus_spell" in growth_item:
del growth_item["bonus_spell"]
if "Additional Spells" in growth_item:
del growth_item["Additional Spells"]
if this_template.spell_ability == "null" or spells == False:
if character_level != 1:
this_template = Template.query.filter_by(template_id=template_id).first()
template_name = this_template.template_name
hitdice = this_template.hit_dice
attributes = retrieve_attributes(attributes_id)
spec_id, character_object, character, character_level = retrieve_character(char_id)
race = retrieve_race(spec_id)
skills_obj = retrieve_char_skills(char_id)
skills_obj = retrieve_skills(skills_obj)
spells_obj = retrieve_char_spells(char_id)
spells_obj = retrieve_spells(spells_obj)
growth_items = json.loads(this_template.growth_table)
growth_item = growth_items[f'{character_level}']
feats = json.loads(this_template.features_table)
prof = prof_bon(character[6])
sneak = growth_item.get("sneak_attack", "POOP")
rages = sum([item.get("rages", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
rage_damage = sum([item.get("rage_damage", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
barbarian = [rages, rage_damage]
spells_known = sum([item.get("spells_known", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
feat_list = [item for lvl, item in feats.items() if int(lvl) <= character_level and item != "null"]
monk = growth_item.get("Ki Points", 0), growth_item.get("Unarmored Movement", 0)
return render_template("commit_char_true.html", attributes_id=attributes_id, template_id=template_id, this_template=this_template,
attributes=attributes, hitdice=hitdice, character=character, race=race, skills_obj=skills_obj, char_id=char_id,
spells_obj=spells_obj, sneak=sneak, barbarian=barbarian, spells_known=spells_known, feat_list=feat_list, template_name=template_name,
monk=monk, prof=prof)
else:
skills = []
skill_nfo, skills_num = this_template.skill_choices, this_template.num_skills
skill_info = skill_nfo.rsplit(', ')
for i in skill_info:
i = int(i)
skill_obj = Skill.query.get(i)
skills.append((skill_obj.skill_id, skill_obj.skill_name))
num_skills = list(range(1, skills_num+1))
other_list = compliment_list(num_skills, 6)
return render_template("add_skills.html", template_id=template_id, skills=skills, num_skills=num_skills, other_list=other_list, char_id=char_id,
user_id=user_id, attributes_id=attributes_id)
else:
char = char_query(char_id)
growth_list = growth_lst(growth_item)
# flash('Character successfully saved')
spell_objects = db.session.query(Class_spell.template_id, Class_spell.spell_id).all()
rel_spells = []
specific_spells = []
for i in spell_objects:
if i[0] == template_id:
rel_spells.append(i[1])
for i in rel_spells:
x = Spell.query.get(i)
if int(x.int_requirement) <= character_level:
specific_spells.append((x.int_requirement, x.spell_name, x.spell_id))
no_spells=list(range(len(growth_list)))
unvariety = compliment_list(no_spells, 8)
return render_template("add_spells.html", growth_list=growth_list, spell_objects=spell_objects, specific_spells=specific_spells,
unvariety=unvariety, template_id=template_id, user_id=user_id, char_id=char_id, attributes_id=attributes_id, character_level=character_level)
@app.route('/add_skills_after_spells', methods=["POST"])
def skills_after_spells():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
attributes_id = int(request.form.get('attributes_id'))
char_id = int(request.form.get('char_id'))
character_level = int(request.form.get('character_level'))
spell_id_0 = int(request.form.get('spell_id_0'))
spell_id_1 = int(request.form.get('spell_id_1'))
spell_id_2 = int(request.form.get('spell_id_2'))
spell_id_3 = int(request.form.get('spell_id_3'))
spell_id_4 = int(request.form.get('spell_id_4'))
spell_id_5 = int(request.form.get('spell_id_5'))
spell_id_6 = int(request.form.get('spell_id_6'))
spell_id_7 = int(request.form.get('spell_id_7'))
template_id = int(request.form.get('template_id'))
user_id = int(request.form.get('user_id'))
this_template = Template.query.filter_by(template_id=template_id).first()
skills = []
skill_nfo, skills_num = this_template.skill_choices, this_template.num_skills
skill_info = skill_nfo.rsplit(', ')
for i in skill_info:
i = int(i)
skill_obj = Skill.query.get(i)
skills.append((skill_obj.skill_id, skill_obj.skill_name))
num_skills = list(range(1, skills_num+1))
other_list = compliment_list(num_skills, 6)
spell_ids = [spell_id_0, spell_id_1, spell_id_2, spell_id_3, spell_id_4, spell_id_5, spell_id_6, spell_id_7]
commit_spell(spell_ids, char_id)
if character_level == 1:
return render_template("add_skills_after_spells.html", template_id=template_id, user_id=user_id,
char_id=char_id, other_list=other_list, num_skills=num_skills, skills=skills, attributes_id=attributes_id)
else:
this_template = Template.query.filter_by(template_id=template_id).first()
template_name = this_template.template_name
hitdice = this_template.hit_dice
attributes = retrieve_attributes(attributes_id)
spec_id, character_object, character, character_level = retrieve_character(char_id)
race = retrieve_race(spec_id)
skills_obj = retrieve_char_skills(char_id)
skills_obj = retrieve_skills(skills_obj)
spells_obj = retrieve_char_spells(char_id)
spells_obj = retrieve_spells(spells_obj)
growth_items = json.loads(this_template.growth_table)
growth_item = growth_items[f'{character_level}']
feats = json.loads(this_template.features_table)
prof = prof_bon(character[6])
sneak = growth_item.get("sneak_attack", "POOP")
rages = sum([item.get("rages", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
rage_damage = sum([item.get("rage_damage", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
barbarian = [rages, rage_damage]
spells_known = sum([item.get("spells_known", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
feat_list = [item for lvl, item in feats.items() if int(lvl) <= character_level and item != "null"]
monk = growth_item.get("Ki Points", 0), growth_item.get("Unarmored Movement", 0)
return render_template("commit_char_true.html", attributes_id=attributes_id, template_id=template_id, this_template=this_template,
attributes=attributes, hitdice=hitdice, character=character, race=race, skills_obj=skills_obj, char_id=char_id,
spells_obj=spells_obj, sneak=sneak, barbarian=barbarian, spells_known=spells_known, feat_list=feat_list, template_name=template_name,
monk=monk, prof=prof)
@app.route('/commit_char_true', methods=["POST"])
def commit_show_char():
try:
assert 'user_id' in session
except:
AssertionError
flash('You must be logged in')
return redirect('/')
attributes_id = int(request.form.get('attributes_id'))
char_id = int(request.form.get('char_id'))
skill_id_1 = int(request.form.get('skill_id_1'))
skill_id_2 = int(request.form.get('skill_id_2'))
skill_id_3 = int(request.form.get('skill_id_3'))
skill_id_4 = int(request.form.get('skill_id_4'))
skill_id_5 = int(request.form.get('skill_id_5'))
template_id = int(request.form.get('template_id'))
user_id = int(request.form.get('user_id'))
skill_ids = [skill_id_1, skill_id_2, skill_id_3, skill_id_4, skill_id_5]
commit_skill(skill_ids, char_id)
this_template = Template.query.filter_by(template_id=template_id).first()
template_name = this_template.template_name
hitdice = this_template.hit_dice
attributes = retrieve_attributes(attributes_id)
spec_id, character_object, character, character_level = retrieve_character(char_id)
race = retrieve_race(spec_id)
skills_obj = retrieve_char_skills(char_id)
skills_obj = retrieve_skills(skills_obj)
spells_obj = retrieve_char_spells(char_id)
spells_obj = retrieve_spells(spells_obj)
growth_items = json.loads(this_template.growth_table)
growth_item = growth_items[f'{character_level}']
feats = json.loads(this_template.features_table)
prof = prof_bon(character[6])
sneak = growth_item.get("sneak_attack", "POOP")
rages = sum([item.get("rages", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
rage_damage = sum([item.get("rage_damage", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
barbarian = [rages, rage_damage]
spells_known = sum([item.get("spells_known", 0) for lvl, item in growth_items.items() if int(lvl) <= character_level])
feat_list = [item for lvl, item in feats.items() if int(lvl) <= character_level and item != "null"]
monk = growth_item.get("Ki Points", 0), growth_item.get("Unarmored Movement", 0)
return render_template("commit_char_true.html", attributes_id=attributes_id, template_id=template_id, this_template=this_template,
attributes=attributes, hitdice=hitdice, character=character, race=race, skills_obj=skills_obj, char_id=char_id,
spells_obj=spells_obj, sneak=sneak, barbarian=barbarian, spells_known=spells_known, feat_list=feat_list, template_name=template_name,
monk=monk, prof=prof)
@app.route('/log_in', methods=["GET"])
def log_in():
return render_template("log_in.html")
@app.route('/log_in', methods=["POST"])
def log_in_form():
email = request.form.get('email')
password = request.form.get('password')
db_user = User.query.filter_by(email=email).first()
db_password = db_user.password
if password == db_password:
session['user_id'] = db_user.user_id
flash('You were successfully logged in.')
return redirect('/')
else:
flash('Incorrect username or password.')
return redirect('/')
@app.route('/log_out', methods=["GET"])
def log_out():
del session['user_id']
flash('User logged out.')
print(session)
return redirect('/')
if __name__ == "__main__":
app.debug = True
app.jinja_env.auto_reload = app.debug
connect_to_db(app)
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DebugToolbarExtension(app)
app.run(host="0.0.0.0", debug=True) | [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
a2d0655c2ac073270dcb83ddf25f82ba36e77b75 | 1a2ca64839723ede3134a0781128b0dc0b5f6ab8 | /ExtractFeatures/Data/mrafayaleem/test_constraints.py | 7e1fc758c05afc8ac61c487f1d1ed22cae0f23a3 | [] | no_license | vivekaxl/LexisNexis | bc8ee0b92ae95a200c41bd077082212243ee248c | 5fa3a818c3d41bd9c3eb25122e1d376c8910269c | refs/heads/master | 2021-01-13T01:44:41.814348 | 2015-07-08T15:42:35 | 2015-07-08T15:42:35 | 29,705,371 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,411 | py | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.unit import MockTrue
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, Request
from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED
from swift.common import constraints
class TestConstraints(unittest.TestCase):
def test_check_metadata_empty(self):
headers = {}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
def test_check_metadata_good(self):
headers = {'X-Object-Meta-Name': 'Value'}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
def test_check_metadata_empty_name(self):
headers = {'X-Object-Meta-': 'Value'}
self.assert_(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), HTTPBadRequest)
def test_check_metadata_name_length(self):
name = 'a' * constraints.MAX_META_NAME_LENGTH
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_metadata_value_length(self):
value = 'a' * constraints.MAX_META_VALUE_LENGTH
headers = {'X-Object-Meta-Name': value}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
headers = {'X-Object-Meta-Name': value}
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_metadata_count(self):
headers = {}
for x in xrange(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
headers['X-Object-Meta-Too-Many'] = 'v'
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_metadata_size(self):
headers = {}
size = 0
chunk = constraints.MAX_META_NAME_LENGTH + \
constraints.MAX_META_VALUE_LENGTH
x = 0
while size + chunk < constraints.MAX_META_OVERALL_SIZE:
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
size += chunk
x += 1
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object'), None)
# add two more headers in case adding just one falls exactly on the
# limit (eg one header adds 1024 and the limit is 2048)
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d%s' %
(x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
self.assertEquals(constraints.check_metadata(Request.blank('/',
headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
def test_check_object_creation_content_length(self):
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank('/',
headers=headers), 'object_name'), None)
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_REQUEST_ENTITY_TOO_LARGE)
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank('/',
headers=headers), 'object_name'), None)
headers = {'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_LENGTH_REQUIRED)
def test_check_object_creation_name_length(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
self.assertEquals(constraints.check_object_creation(Request.blank('/',
headers=headers), name), None)
name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), name).status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'}
self.assertEquals(constraints.check_object_creation(Request.blank('/',
headers=headers), 'object_name'), None)
headers = {'Transfer-Encoding': 'chunked'}
self.assertEquals(constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name').status_int,
HTTP_BAD_REQUEST)
def test_check_object_creation_bad_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': '\xff\xff'}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
self.assert_('Content-Type' in resp.body)
def test_check_object_manifest_header(self):
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': 'container/prefix', 'Content-Length':
'0', 'Content-Type': 'text/plain'}), 'manifest')
self.assert_(not resp)
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': 'container', 'Content-Length': '0',
'Content-Type': 'text/plain'}), 'manifest')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': '/container/prefix',
'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': 'container/prefix?query=param',
'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': 'container/prefix&query=param',
'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
resp = constraints.check_object_creation(Request.blank('/',
headers={'X-Object-Manifest': 'http://host/container/prefix',
'Content-Length': '0', 'Content-Type': 'text/plain'}), 'manifest')
self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
def test_check_mount(self):
self.assertFalse(constraints.check_mount('', ''))
constraints.os = MockTrue() # mock os module
self.assertTrue(constraints.check_mount('/srv', '1'))
self.assertTrue(constraints.check_mount('/srv', 'foo-bar'))
self.assertTrue(constraints.check_mount('/srv', '003ed03c-242a-4b2f-bee9-395f801d1699'))
self.assertFalse(constraints.check_mount('/srv', 'foo bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo/bar'))
self.assertFalse(constraints.check_mount('/srv', 'foo?bar'))
reload(constraints) # put it back
def test_check_float(self):
self.assertFalse(constraints.check_float(''))
self.assertTrue(constraints.check_float('0'))
def test_check_utf8(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
unicode_with_null = u'abc\u0000def'
utf8_with_null = unicode_with_null.encode('utf-8')
for false_argument in [None,
'',
invalid_utf8_str,
unicode_with_null,
utf8_with_null]:
self.assertFalse(constraints.check_utf8(false_argument))
for true_argument in ['this is ascii and utf-8, too',
unicode_sample,
valid_utf8_str]:
self.assertTrue(constraints.check_utf8(true_argument))
def test_validate_bad_meta(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-object-meta-hello':
'ab' * constraints.MAX_HEADER_SIZE})
self.assertEquals(constraints.check_metadata(req, 'object').status_int,
HTTP_BAD_REQUEST)
if __name__ == '__main__':
unittest.main()
| [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
578dd5df4eda97c5cd637742a9711502dace842e | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Database/AthenaPOOL/AthenaPoolExample/AthenaPoolExampleAlgorithms/share/AthenaPoolExample_WMetaJobOptions.py | 3af9e8794b0385996c0820cd6b3c8079ce383f32 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py | ## @file AthenaPoolExample_WriteJobOptions.py
## @brief Example job options file to illustrate how to write event data to Pool.
## @author Peter van Gemmeren <gemmeren@anl.gov>
## $Id: AthenaPoolExample_WMetaJobOptions.py,v 1.8 2008-03-25 22:38:54 gemmeren Exp $
###############################################################
#
# This Job option:
# ----------------
# 1. Writes a SimplePoolFile5.root file with ExampleHit
# using WriteData algorithm
# ------------------------------------------------------------
# Expected output file (20 events):
# -rw-r--r-- 1 gemmeren zp 36266 Dec 8 19:08 SimplePoolFile5.root
#
#==============================================================
import AthenaCommon.AtlasUnixGeneratorJob
## get a handle on the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
## get a handle on the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
from AthenaCommon.AppMgr import theApp
theApp.EvtMax = 20
#--------------------------------------------------------------
# Load POOL support
#--------------------------------------------------------------
import AthenaPoolCnvSvc.WriteAthenaPool
#Explicitly specify the output file catalog
svcMgr.PoolSvc.WriteCatalog = "xmlcatalog_file:Catalog2.xml"
svcMgr.AthenaPoolCnvSvc.CommitInterval = 10;
from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream
Stream1 = AthenaPoolOutputStream( "Stream1", "ROOTTREE:SimplePoolFile5.root" )
Stream1.ItemList += [ "ExampleHitContainer#MyHits" ]
Stream1.MetadataItemList += [ "ExampleHitContainer#PedestalWriteData" ]
"""
Stream1.MetadataItemList += [ "EventBookkeeperCollection#EventBookkeepers" ]
from EventBookkeeperTools.BookkeepingInfoWriter import EventBookkeepersWriter
EBWriter1 = EventBookkeepersWriter()
EBWriter1.setDoMC( False )
EBWriter1.setCycle( 0 )
EBWriter1.OutputCollectionName = "EventBookkeepers"
topSequence += EBWriter1
"""
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
# Load "user algorithm" top algorithms to be run, and the libraries that house them
from AthenaPoolExampleAlgorithms.AthenaPoolExampleAlgorithmsConf import AthPoolEx__WriteData,AthPoolEx__WriteCond
topSequence += AthPoolEx__WriteData( "WriteData" )
from StoreGate.StoreGateConf import StoreGateSvc
topSequence += AthPoolEx__WriteCond( "WriteCond", DetStore = StoreGateSvc( "MetaDataStore" ),
ConditionName = "PedestalWriteData" )
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL)
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = 3
svcMgr.PoolSvc.OutputLevel = 2
svcMgr.AthenaPoolCnvSvc.OutputLevel = 2
topSequence.WriteData.OutputLevel = 2
Stream1.OutputLevel = 2
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
5b52f7f306a5a2aaec29752ff685f73d5f23ce39 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/glnrus002/boxes.py | e4fe66cfc48ac2de962374281b9b70b12248e4d0 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | #boxes.p
#glnrus002
def print_square ():
print("*"*5)
for i in range (3):
print("*"," ","*" )
print("*"*5)
def print_rectangle (width, height):
print("*"*width)
for i in range(height-2):
print("*"," "*(width-2),"*",sep="")
print("*"*width)
def get_rectangle (width, height):
box="*"*width+"\n"
for i in range(height-2):
box=box+ "*"+" "*(width-2)+"*"+"\n"
box=box+("*"*width)+"\n"
return box
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
230653d44e2ef94bab9b0d8fe6e66616e3e15c15 | 370c40babd60df6ef84b339a31fb7365ebe2400f | /toolkit/crashreporter/google-breakpad/src/tools/windows/dump_syms/moz.build | 6220ae3cf6b64ac4db3561aa3ca94cafa06230bd | [
"BSD-3-Clause",
"LicenseRef-scancode-unicode-mappings",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | roytam1/gecko-kaios | b676a1c9ae1c1412274a2327419660803f214ab9 | 2c1b5fe198e12edc4cb17a34ecbcceedc642c78e | refs/heads/master | 2023-05-10T16:50:43.590202 | 2019-01-11T07:55:14 | 2019-10-17T09:15:16 | 215,687,205 | 5 | 0 | NOASSERTION | 2023-05-07T02:37:27 | 2019-10-17T02:34:23 | null | UTF-8 | Python | false | false | 752 | build | # -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
HostProgram('dump_syms')
HOST_SOURCES += [
'../../../common/windows/dia_util.cc',
'../../../common/windows/guid_string.cc',
'../../../common/windows/omap.cc',
'../../../common/windows/pdb_source_line_writer.cc',
'../../../common/windows/string_utils.cc',
'dump_syms.cc',
]
HOST_CXXFLAGS += [
'-O2',
'-EHsc',
'-MD'
]
HOST_OS_LIBS += [
'diaguids',
'imagehlp'
]
LOCAL_INCLUDES += [
'../../..'
]
| [
"roytam@gmail.com"
] | roytam@gmail.com |
2cccf78706623f5fbda15d33d057348aedad483f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_12801.py | 234f131a3965b2e8605641bd7146db4924dc7a1d | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # how to add up the data with variable numbers in MySQL with Python?
SELECT id, name, users_count+1 as users_count FROM data;
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
5a49851c993afa4acec0a0a96ccb39f922f00904 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_hazed.py | d38bc8abb99b6e274b895bcfa1cd01cd638c6422 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.verbs._haze import _HAZE
#calss header
class _HAZED(_HAZE, ):
def __init__(self,):
_HAZE.__init__(self)
self.name = "HAZED"
self.specie = 'verbs'
self.basic = "haze"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8ac6d3cdb82c433b8d428c5e8a2d3cada473a505 | e982c42888da91e957aef4a67e339135918d25ec | /venv/Lib/site-packages/jinja2/testsuite/__init__.py | db816dfe9054cf235a1bc8af3306fa9a761cedb0 | [] | no_license | nikhil9856/kisan | c88f890d88e96dd718bd9cfaef41f3d40eb7b72d | 556e57427a2b9a91fcc4a44ca25706c49e790d73 | refs/heads/master | 2020-03-15T01:59:09.825264 | 2018-05-03T18:08:02 | 2018-05-03T18:08:02 | 131,904,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,797 | py | # -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.teardown()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception as e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, bytecode_cache, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
suite.addTest(bytecode_cache.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if PY2:
suite.addTest(doctests.suite())
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
| [
"kumar.nikhil110@gmail.com"
] | kumar.nikhil110@gmail.com |
36f6b770385698e68c30bc1c36d2fb9bf80814a6 | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2019-11-27-Parametersearch_again/MOOSEModel.py | 028bd96d9a3b9c8f7b6929e864f3aac0a67af60a | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,307 | py | #exec(open('MOOSEModel.py').read())
#Has been modified heavily to be used in Parametersearch_again folder. Do not use as an api
import moose
import rdesigneur as rd
import numpy as np
import matplotlib.pyplot as plt
import xmltodict
def parameterfile_parser(Parameterfile):
"""
Parses Parameterfile and returns rdesigneur function parameters
Arguements:
Parameterfile -- A valid parameterfile address, string
"""
depth = 0.1
F = 96485.3329
with open(Parameterfile) as fd:
Model = xmltodict.parse(fd.read())
Parameters = {}
cellProto = [['somaProto', 'soma', float(Model['Model']['segment']['Morphology']['@sm_diam']), float(Model['Model']['segment']['Morphology']['@sm_len'])]]
chanProto = []
chanDistrib = []
chd = Model['Model']['segment']['Channels']
for channel in chd.keys():
chanProto.append([ chd[channel]['@kinetics'][:-2]+channel+'()' , channel ])
chanDistrib.append([ channel, 'soma', 'Gbar', chd[channel]['@gbar'] ])
chanProto.append([ Model['Model']['segment']['Ca_Conc']['@kinetics'][:-2]+'Ca_Conc()' , 'Ca_conc' ])
chanDistrib.append([ 'Ca_conc', 'soma', 'CaBasal', Model['Model']['segment']['Ca_Conc']['@Ca_inf'], 'tau', Model['Model']['segment']['Ca_Conc']['@Ca_tau'] ])
passiveDistrib = [['soma', 'RM', Model['Model']['segment']['Passive']['@RM'], 'CM', Model['Model']['segment']['Passive']['@CM'], 'initVm', str(-0.065), 'Em', Model['Model']['segment']['Passive']['@Em']]]
Parameters['cellProto'] = cellProto
Parameters['chanProto'] = chanProto
Parameters['chanDistrib'] = chanDistrib
Parameters['passiveDistrib'] = passiveDistrib
Parameters['Ca_B'] = float(Model['Model']['segment']['Ca_Conc']['@Ca_B'])
return Parameters
def generateModel(Parameterfile, CurrInjection):
"""
Returns in-silico model current clamp
Arguements:
Parameterfile -- A valid parameterfile address, string
CurrInjection -- Current clamp level, float
"""
Parameters = parameterfile_parser(Parameterfile)
elecPlotDt = 0.0001
elecDt = 0.00005
depth = 0.1
preStimTime = 1
injectTime = 0.5
postStimTime = 1
try:
# [moose.delete(x) for x in ['/model', '/library']]
moose.delete('/model')
except:
pass
rdes = rd.rdesigneur(
elecPlotDt = elecPlotDt,
elecDt = elecDt,
cellProto = Parameters['cellProto'],
chanProto = Parameters['chanProto'],
passiveDistrib = Parameters['passiveDistrib'],
chanDistrib = Parameters['chanDistrib'],
stimList = [['soma', '1', '.', 'inject', f'(t>={preStimTime} && t<={preStimTime+injectTime}) ? {CurrInjection} : 0']],
plotList = [
['soma', '1', '.', 'Vm', 'Soma Membrane potential MOOSE'],
],
)
rdes.buildModel()
#Setup clock table to record time
clk = moose.element('/clock')
plott = moose.Table('/model/graphs/plott')
moose.connect(plott, 'requestOut', clk, 'getCurrentTime')
#Setting Ca_conc B value
try:
moose.element('/model/elec/soma/Ca_conc').B = Parameters['Ca_B']
# moose.element('/model/elec/soma/Ca_conc').B *= 2
# moose.element('/model/elec/soma/Ca_conc').B = 0
except:
pass
print('MOOSE Model generated')
return rdes
def runModel(Parameterfile, CurrInjection):
preStimTime = 1
injectTime = 0.5
postStimTime = 1
generateModel(Parameterfile, CurrInjection)
moose.reinit()
moose.start(preStimTime+injectTime+postStimTime)
Vmvec=moose.element('/model/graphs/plot0').vector
tvec=moose.element('/model/graphs/plott').vector
return [tvec, Vmvec]
def plotModel(Parameterfile, CurrInjection):
"""
Returns in-silico model current clamp
Arguements:
Parameterfile -- A valid parameterfile address, string
CurrInjection -- Current clamp level, float
"""
preStimTime = 1
injectTime = 0.5
postStimTime = 1
rdes = generateModel(Parameterfile, CurrInjection)
moose.reinit()
moose.start(preStimTime+injectTime+postStimTime)
rdes.display()
return rdes
if __name__ == '__main__':
rdes = plotModel('Modelparameters/dummyModel.xml', 150e-12)
tvec, Vmvec = runModel('Modelparameters/dummyModel.xml', 150e-12)
| [
"analkumar2@gmail.com"
] | analkumar2@gmail.com |
80dd01870ac9e112e4604f30ffbae9f5908ff053 | 0b9622c6d67ddcb252a7a4dd9b38d493dfc9a25f | /HackerRank/30daysChallenge/Day17.py | 8901c15a41dedb01041ed65199ea7db65b6992e5 | [] | no_license | d80b2t/python | eff2b19a69b55d73c4734fb9bc115be1d2193e2d | 73603b90996221e0bcd239f9b9f0458b99c6dc44 | refs/heads/master | 2020-05-21T20:43:54.501991 | 2017-12-24T12:55:59 | 2017-12-24T12:55:59 | 61,330,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | """
Objective: Yesterday's challenge taught you to manage exceptional situations by using try and catch blocks. In today's challenge, you're going to practice throwing and propagating an exception. Check out the Tutorial tab for learning materials and an instructional video!
Task: Write a Calculator class with a single method: int power(int,int). The power method takes two integers, n and p, as parameters and returns the integer result of n^p. If either n or p is negative, then the method must throw an exception with the message: n and p should be non-negative.
Note: Do not use an access modifier (e.g.: public) in the declaration for your Calculator class.
Input Format: Input from stdin is handled for you by the locked stub code in your editor. The first line contains an integer, , the number of test cases. Each of the subsequent lines describes a test case in space-separated integers denoting and , respectively.
Constraints: No Test Case will result in overflow for correctly written code.
Output Format: Output to stdout is handled for you by the locked stub code in your editor. There are lines of output, where each line contains the result of as calculated by your Calculator class' power method.
Sample Input:
4
3 5
2 4
-1 -2
-1 3
Sample Output
243
16
n and p should be non-negative
n and p should be non-negative
"""
class Calculator:
def power(self,n,p):
self.n = n
self.p = p
if n < 0 or p < 0:
raise Exception("n and p should be non-negative")
else:
return n**p
myCalculator=Calculator()
T=int(input())
for i in range(T):
n,p = map(int, input().split())
try:
ans=myCalculator.power(n,p)
print(ans)
except Exception as e:
print(e)
| [
"npross@lbl.gov"
] | npross@lbl.gov |
00fa5669f3518c900044e1e8337d4c59875d531f | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part008087.py | 760f0f0fb7dcdf93745f9ceafbd5db722a6e9fff | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,219 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher17046(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i4.1.0', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i4.1.0', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i4.1.0', 1, 1, S(1)), Mul)
]),
3: (3, Multiset({3: 1}), [
(VariableWithCount('i4.1.0', 1, 1, S(1)), Mul)
]),
4: (4, Multiset({}), [
(VariableWithCount('i4.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i4.1.1', 1, 1, None), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher17046._instance is None:
CommutativeMatcher17046._instance = CommutativeMatcher17046()
return CommutativeMatcher17046._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 17045
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i4.1.1.0', S(0))
except ValueError:
pass
else:
pass
# State 17047
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i4.1.1.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 17048
if len(subjects) >= 1:
tmp3 = subjects.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i4.1.1.1.0', tmp3)
except ValueError:
pass
else:
pass
# State 17049
if len(subjects) == 0:
pass
# 0: c + x*d
yield 0, subst3
subjects.appendleft(tmp3)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp5 = subjects.popleft()
associative1 = tmp5
associative_type1 = type(tmp5)
subjects6 = deque(tmp5._args)
matcher = CommutativeMatcher17051.get()
tmp7 = subjects6
subjects6 = []
for s in tmp7:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp7, subst1):
pass
if pattern_index == 0:
pass
# State 17052
if len(subjects) == 0:
pass
# 0: c + x*d
yield 0, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Add):
tmp8 = subjects.popleft()
associative1 = tmp8
associative_type1 = type(tmp8)
subjects9 = deque(tmp8._args)
matcher = CommutativeMatcher17054.get()
tmp10 = subjects9
subjects9 = []
for s in tmp10:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp10, subst0):
pass
if pattern_index == 0:
pass
# State 17060
if len(subjects) == 0:
pass
# 0: c + x*d
yield 0, subst1
subjects.appendleft(tmp8)
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp11 = subjects.popleft()
subjects12 = deque(tmp11._args)
# State 17142
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i4.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 17143
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i4.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 17144
if len(subjects12) >= 1:
tmp15 = subjects12.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i4.1.2.1.0', tmp15)
except ValueError:
pass
else:
pass
# State 17145
if len(subjects12) >= 1 and subjects12[0] == Integer(2):
tmp17 = subjects12.popleft()
# State 17146
if len(subjects12) == 0:
pass
# State 17147
if len(subjects) == 0:
pass
# 1: (c + x*d)**2
yield 1, subst3
subjects12.appendleft(tmp17)
if len(subjects12) >= 1:
tmp18 = subjects12.popleft()
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i4.1.2', tmp18)
except ValueError:
pass
else:
pass
# State 17203
if len(subjects12) == 0:
pass
# State 17204
if len(subjects) == 0:
pass
# 2: (c + x*d)**n
yield 2, subst4
subjects12.appendleft(tmp18)
subjects12.appendleft(tmp15)
if len(subjects12) >= 1 and isinstance(subjects12[0], Mul):
tmp20 = subjects12.popleft()
associative1 = tmp20
associative_type1 = type(tmp20)
subjects21 = deque(tmp20._args)
matcher = CommutativeMatcher17149.get()
tmp22 = subjects21
subjects21 = []
for s in tmp22:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp22, subst1):
pass
if pattern_index == 0:
pass
# State 17150
if len(subjects12) >= 1 and subjects12[0] == Integer(2):
tmp23 = subjects12.popleft()
# State 17151
if len(subjects12) == 0:
pass
# State 17152
if len(subjects) == 0:
pass
# 1: (c + x*d)**2
yield 1, subst2
subjects12.appendleft(tmp23)
if len(subjects12) >= 1:
tmp24 = []
tmp24.append(subjects12.popleft())
while True:
if len(tmp24) > 1:
tmp25 = create_operation_expression(associative1, tmp24)
elif len(tmp24) == 1:
tmp25 = tmp24[0]
else:
assert False, "Unreachable"
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i4.1.2', tmp25)
except ValueError:
pass
else:
pass
# State 17205
if len(subjects12) == 0:
pass
# State 17206
if len(subjects) == 0:
pass
# 2: (c + x*d)**n
yield 2, subst3
if len(subjects12) == 0:
break
tmp24.append(subjects12.popleft())
subjects12.extendleft(reversed(tmp24))
subjects12.appendleft(tmp20)
if len(subjects12) >= 1:
tmp27 = subjects12.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i4.1.1', tmp27)
except ValueError:
pass
else:
pass
# State 17677
if len(subjects12) >= 1 and subjects12[0] == Integer(2):
tmp29 = subjects12.popleft()
# State 17678
if len(subjects12) == 0:
pass
# State 17679
if len(subjects) == 0:
pass
# 3: x**2
yield 3, subst1
subjects12.appendleft(tmp29)
subjects12.appendleft(tmp27)
if len(subjects12) >= 1 and isinstance(subjects12[0], Add):
tmp30 = subjects12.popleft()
associative1 = tmp30
associative_type1 = type(tmp30)
subjects31 = deque(tmp30._args)
matcher = CommutativeMatcher17154.get()
tmp32 = subjects31
subjects31 = []
for s in tmp32:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp32, subst0):
pass
if pattern_index == 0:
pass
# State 17160
if len(subjects12) >= 1 and subjects12[0] == Integer(2):
tmp33 = subjects12.popleft()
# State 17161
if len(subjects12) == 0:
pass
# State 17162
if len(subjects) == 0:
pass
# 1: (c + x*d)**2
yield 1, subst1
subjects12.appendleft(tmp33)
if len(subjects12) >= 1:
tmp34 = []
tmp34.append(subjects12.popleft())
while True:
if len(tmp34) > 1:
tmp35 = create_operation_expression(associative1, tmp34)
elif len(tmp34) == 1:
tmp35 = tmp34[0]
else:
assert False, "Unreachable"
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i4.1.2', tmp35)
except ValueError:
pass
else:
pass
# State 17207
if len(subjects12) == 0:
pass
# State 17208
if len(subjects) == 0:
pass
# 2: (c + x*d)**n
yield 2, subst2
if len(subjects12) == 0:
break
tmp34.append(subjects12.popleft())
subjects12.extendleft(reversed(tmp34))
subjects12.appendleft(tmp30)
subjects.appendleft(tmp11)
return
yield
from .generated_part008088 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part008091 import *
from .generated_part008089 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset
from .generated_part008092 import * | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
7a5991c57222dafdfee32c8d59345b334061e4ce | 423f5eb4cf319ea11701ad2c84c045eeeb4e261c | /class-29/demo/custom_user/users/views.py | 643b390ee821ab3200991c48ec7ea530854d7bea | [] | no_license | MsDiala/amman-python-401d2 | 4031899d0a8d70f1ecd509e491b2cb0c63c23a06 | bb9d102da172f51f6df7371d2208146bbbee72fb | refs/heads/master | 2023-06-14T18:46:04.192199 | 2021-07-12T12:48:44 | 2021-07-12T12:48:44 | 315,765,421 | 1 | 0 | null | 2020-11-24T22:16:10 | 2020-11-24T22:16:09 | null | UTF-8 | Python | false | false | 350 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView
from .forms import CustomUserCreationForm
# Create your views here.
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
| [
"ahmad.alawad.sf@gmail.com"
] | ahmad.alawad.sf@gmail.com |
1f81c51bf9b448533ea259031e5a3032efc5929b | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2009.1/kernel/default/drivers/module-alsa-driver/actions.py | fad2a69a934d6867aa230a8c09e48cc680620014 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
from pisi.actionsapi import kerneltools
KDIR = kerneltools.getKernelVersion()
NoStrip = ["/"]
if "_" in get.srcVERSION():
# Snapshot
WorkDir = "alsa-driver"
else:
# Upstream tarball
WorkDir = "alsa-driver-%s" % get.srcVERSION()
def setup():
autotools.configure("--with-oss \
--with-kernel=/lib/modules/%s/build \
--with-isapnp=yes \
--with-sequencer=yes \
--with-card-options=all \
--disable-verbose-printk \
--with-cards=all" % KDIR)
# Needed for V4L stuff
shelltools.sym("%s/alsa-driver/include/config.h" % get.workDIR(), "%s/alsa-driver/sound/include/config.h" % get.workDIR())
shelltools.sym("%s/alsa-driver/include/config1.h" % get.workDIR(), "%s/alsa-driver/sound/include/config1.h" % get.workDIR())
def build():
autotools.make()
# Build v4l drivers
shelltools.copy("Module.symvers", "v4l/")
autotools.make("-C /lib/modules/%s/build M=%s/v4l V=1 modules" % (KDIR, get.curDIR()))
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR(), "install-modules")
# Install v4l drivers
for d in ["saa7134", "cx88", "cx231xx", "em28xx"]:
pisitools.insinto("/lib/modules/%s/kernel/sound/drivers" % KDIR, "v4l/%s/*.ko" % d)
# Copy symvers file for external module building like saa7134-alsa, cx2388-alsa, etc.
pisitools.insinto("/lib/modules/%s/kernel/sound" % KDIR, "Module.symvers", "Module.symvers.alsa")
# Install alsa-info
pisitools.insinto("/usr/bin", "utils/alsa-info.sh", "alsa-info")
for f in shelltools.ls("alsa-kernel/Documentation/*txt"):
pisitools.dodoc(f)
pisitools.dodoc("doc/serialmidi.txt")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
dfcf294e0b1aa993aa455bf04d608367573ead1d | 9dea14a0818dfd318b238b6c98c0e753d162896d | /venv/Scripts/pip-script.py | 8bf9870477e1fde5d687039f19bd2a794435a9c4 | [] | no_license | CatchTheDog/data_visualization | a665094ef2e64502992d0de65ddd09859afb756b | 326ecd421a7945e9566bec17bd4db18b86a6e5a4 | refs/heads/master | 2020-03-31T08:08:22.544009 | 2018-10-09T05:36:02 | 2018-10-09T05:36:02 | 152,047,286 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 438 | py | #!C:\马俊强\软件安装\pycharm\workspace\data_visualization\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"1924528451@qq.com"
] | 1924528451@qq.com |
4e8bf5c534c923a03e4af6dd7d56b15d1dc3a6cb | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /0137. Single Number II.py | b870ddfce211866e34fd0523d542c8aa1ff14c5a | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # https://leetcode.com/problems/single-number-ii/discuss/43295
# /Detailed-explanation-and-generalization-of-the-bitwise-operation-method-for-single-numbers
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
x1, x2 = 0, 0
mask = 0
for num in nums:
x2 ^= (x1&num)
x1 ^= num
mask = ~(x2&x1)
x2 = x2&mask
x1 = x1&mask
return x1|x2 # or return x1
"""
Given a non-empty array of integers, every element appears three times except for one, which appears exactly once. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
Example 1:
Input: [2,2,3,2]
Output: 3
Example 2:
Input: [0,1,0,1,0,1,99]
Output: 99
"""
| [
"wzhou007@ucr.edu"
] | wzhou007@ucr.edu |
d9dcba660bcd03f1948e39ddf267d4040d3cd0c9 | 0eeeb14c1f5952a9d9c7b3bc13e708d2bf2a17f9 | /algos/make_graph.py | 5a5e1869f8eb8d02385257c0dcb4982bf31b0611 | [] | no_license | mbhushan/pydev | 6cc90140e1103c5d5e52e55287d02ed79d1a5c36 | bdf84710da87f58253cfec408c728f6a9134a2ea | refs/heads/master | 2020-04-25T07:22:03.281719 | 2015-12-22T16:28:42 | 2015-12-22T16:28:42 | 26,382,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py |
def make_link(graph, n1, n2):
if n1 not in graph:
graph[n1] = {}
(graph[n1])[n2] = 1
if n2 not in graph:
graph[n2] = {}
(graph[n2])[n1] = 1
return graph
def main():
aring = {}
n = 5
for i in range(n):
make_link(aring, i, (i+1) % n)
print len(aring)
print sum([len(aring[node]) for node in aring.keys()])/2
print aring
if __name__ == '__main__':
main()
| [
"manibhushan.cs@gmail.com"
] | manibhushan.cs@gmail.com |
67af6746f4006cdd0dbaf50e42e3b97229cf2e3d | f016dd6fd77bb2b135636f904748dbbab117d78b | /day9/异常处理.py | c6560f0dae933a2be1434adeda9c293a01f7fbb5 | [
"Apache-2.0"
] | permissive | w7374520/Coursepy | b3eddfbeeb475ce213b6f627d24547a1d36909d8 | ac13f8c87b4c503135da51ad84c35c745345df20 | refs/heads/master | 2020-04-26T23:57:42.882813 | 2018-05-24T07:54:13 | 2018-05-24T07:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | #!/usr/bin/python
# -*- coding utf8 -*-
try:
int('xx')
except ValueError as e:
print(e)
except NameError:
print('NameError')
#万能异常
except Exception as e:
print(e)
else:
print('木有异常')
finally:
print('有无异常都会执行,finally通常用于回收资源')
'''
'''
| [
"windfishing5@gmail.com"
] | windfishing5@gmail.com |
f4e017906f223cb2f03aaf41d50c5683a986308a | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/encodings/undefined.py | 033836eb5d544ca83ab1beb3ac718d1751bd755f | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,550 | py | # 2016.08.04 19:59:24 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/encodings/undefined.py
""" Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors = 'strict'):
raise UnicodeError('undefined encoding')
def decode(self, input, errors = 'strict'):
raise UnicodeError('undefined encoding')
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final = False):
raise UnicodeError('undefined encoding')
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final = False):
raise UnicodeError('undefined encoding')
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='undefined', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\undefined.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:59:24 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
5ded482fc4add564cdac998bf4060c588c0ea0d7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-3087.py | 960b9779450a1b71cea4865ee75db3be32fd7930 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,347 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
$ID.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
62564c3fc592a5f84af5bf01c6625f58d819f220 | 153c943de6095aaf5803b8f452af362c4a9c66e8 | /blogger-business/business/migrations/0002_business_image.py | 2ed7550f54199dd1a44e6e2e81c8d315fb5b8d5d | [] | no_license | 3asyPe/blogger-business | 428427a0832f77c45914e737d6408d9051b57b42 | 6b9b1edefd2700b554a5a26d29dfe5158ca4861b | refs/heads/master | 2023-03-03T11:09:39.813603 | 2021-01-29T11:29:45 | 2021-01-29T11:29:45 | 304,942,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Generated by Django 3.1 on 2020-10-18 15:03
import business.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='business',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=business.utils.upload_image_path_business),
),
]
| [
"alex.kvasha228@gmail.com"
] | alex.kvasha228@gmail.com |
6d0e67e73ed90c5669a471a8ac43e88ab9540de0 | f3a1c2ab0935902a2ca1f97e0979676ff1636cc8 | /generator_images.py | 07debe829e2c07bd982006fe125d84410b83fc09 | [] | no_license | kerzhao/pySpark-Hive-sql | ddcdf85d61cd6d992a74548330b6798bbefbf2a8 | da0d73d6e8ad3ac3ca23e90d287c0e730df99696 | refs/heads/master | 2020-07-15T07:48:47.106065 | 2017-06-28T02:48:26 | 2017-06-28T02:48:26 | 94,305,331 | 0 | 0 | null | 2017-06-14T08:01:23 | 2017-06-14T08:01:23 | null | UTF-8 | Python | false | false | 9,090 | py | #coding:utf-8
import random
import os
from itertools import product
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
"""
基本:
1 图片size
2 字符个数
3 字符区域(重叠、等分)
4 字符位置(固定、随机)
5 字符size(所占区域大小的百分比)
6 字符fonts
7 字符 type (数字、字母、汉字、数学符号)
8 字符颜色
9 背景颜色
高级:
10 字符旋转
11 字符扭曲
12 噪音(点、线段、圈)
"""
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabdefghijlmnqrtuwxy"
def decode(y):
y = np.argmax(np.array(y), axis=2)[:,0]
return ''.join([chars[x] for x in y])
#----------------------------------------------------------------------
def sin(x, height):
""""""
a = float(random.choice([5, 12, 24, 48, 128]))
d = random.choice([2, 4])
c = random.randint(1, 100)
b = 2 * random.random()
return np.array(map(int, height / d * (np.sin((x+c)/a) + b)))
def randRGB():
return (random.randint(40, 110), random.randint(40, 110), random.randint(40, 110))
def cha_draw(cha, text_color, font, rotate,size_cha, max_angle=45):
im = Image.new(mode='RGBA', size=(size_cha*2, size_cha*2))
drawer = ImageDraw.Draw(im)
drawer.text(xy=(0, 0), text=cha, fill=text_color, font=font) #text 内容,fill 颜色, font 字体(包括大小)
if rotate:
#max_angle = 45 # to be tuned
angle = random.randint(-max_angle, max_angle)
im = im.rotate(angle, Image.BILINEAR, expand=1)
im = im.crop(im.getbbox())
return im
def captcha_draw(size_im, nb_cha, set_cha, fonts=None, overlap=0.1,
rd_bg_color=False, rd_text_color=False, rd_text_pos=False, rd_text_size=False,
rotate=False, noise=None, dir_path=''):
"""
overlap: 字符之间区域可重叠百分比, 重叠效果和图片宽度字符宽度有关
字体大小 目前长宽认为一致!!!
所有字大小一致
扭曲暂未实现
noise 可选:point, line , circle
fonts 中分中文和英文字体
label全保存在label.txt 中,文件第i行对应"i.jpg"的图片标签,i从1开始
"""
rate_cha = 1.3 # rate to be tuned
rate_noise = 0.25 # rate of noise
cnt_noise = random.randint(10, 20)
width_im, height_im = size_im
width_cha = int(width_im / max(nb_cha-overlap, 1)) # 字符区域宽度
height_cha = height_im # 字符区域高度
width_noise = width_im
height_noise = height_im
bg_color = 'white'
text_color = 'black'
derx = 0
dery = 0
if rd_text_size:
rate_cha = random.uniform(rate_cha-0.1, rate_cha+0.1) # to be tuned
size_cha = int(rate_cha*min(width_cha, height_cha)) # 字符大小
size_noise = int(rate_noise*height_noise) # 字符大小
#if rd_bg_color:
#bg_color = randRGB()
bg_color = (random.randint(165, 176), random.randint(165, 176), random.randint(165, 176))
im = Image.new(mode='RGB', size=size_im, color=bg_color) # color 背景颜色,size 图片大小
drawer = ImageDraw.Draw(im)
contents = []
for i in range(cnt_noise):
text_color = (random.randint(120, 140), random.randint(120, 140), random.randint(120, 140))
#text_color = (random.randint(14, 50), random.randint(14, 50), random.randint(14, 50))
derx = random.randint(0, max(width_noise-size_noise, 0))
dery = random.randint(0, max(height_noise-size_noise, 0))
cha_noise = random.choice(set_cha)
font_noise = ImageFont.truetype(fonts['eng'], size_noise)
im_noise = cha_draw(cha_noise, text_color, font_noise, rotate, size_noise, max_angle=180)
im.paste(im_noise,
(derx+random.randint(0, 10), dery++random.randint(0, 10)),
im_noise) # 字符左上角位置
for i in range(nb_cha):
if rd_text_color:
text_color = randRGB()
if rd_text_pos:
derx = random.randint(0, max(width_cha-size_cha-5, 0))
dery = random.randint(0, max(height_cha-size_cha-5, 0))
# font = ImageFont.truetype("arial.ttf", size_cha)
cha = random.choice(set_cha)
font = ImageFont.truetype(fonts['eng'], size_cha)
contents.append(cha)
im_cha = cha_draw(cha, text_color, font, rotate, size_cha)
im.paste(im_cha,
(int(max(i-overlap, 0)*width_cha)+derx+random.randint(0, 10), dery++random.randint(0, 10)),
im_cha) # 字符左上角位置
if 'point' in noise:
nb_point = 30
color_point = randRGB()
for i in range(nb_point):
x = random.randint(0, width_im)
y = random.randint(0, height_im)
drawer.point(xy=(x, y), fill=color_point)
if 'sin' in noise:
img = np.asarray(im)
color_sine = randRGB()
x = np.arange(0, width_im)
y = sin(x, height_im)
for k in range(4):
for i, j in zip(x, y+k):
if j >= 0 and j < height_im and all(img[j, i]==bg_color):
drawer.point(xy=(i, j), fill=color_sine)
if 'line' in noise:
nb_line = 10
for i in range(nb_line):
color_line = randRGB()
sx = random.randint(0, width_im)
sy = random.randint(0, height_im)
ex = random.randint(0, width_im)
ey = random.randint(0, height_im)
drawer.line(xy=(sx, sy, ex, ey), fill=color_line)
if 'circle' in noise:
nb_circle = 5
color_circle = randRGB()
for i in range(nb_circle):
sx = random.randint(0, width_im-50)
sy = random.randint(0, height_im-20)
ex = sx+random.randint(15, 25)
ey = sy+random.randint(10, 15)
drawer.arc((sx, sy, ex, ey), 0, 360, fill=color_circle)
return np.asarray(im), contents
def captcha_generator(width,
height,
batch_size=64,
set_cha=chars,
font_dir='/home/ubuntu/fonts/english'
):
size_im = (width, height)
overlaps = [0.0, 0.3, 0.6]
rd_text_poss = [True, True]
rd_text_sizes = [True, True]
rd_text_colors = [True, True] # false 代表字体颜色全一致,但都是黑色
rd_bg_color = True
noises = [['line', 'point']]
rotates = [True, True]
nb_chas = [4, 6]
font_paths = []
for dirpath, dirnames, filenames in os.walk(font_dir):
for filename in filenames:
filepath = dirpath + os.sep + filename
font_paths.append({'eng':filepath})
n_len = 6
n_class = len(set_cha)
X = np.zeros((batch_size, height, width, 3), dtype=np.uint8)
y = [np.zeros((batch_size, n_class), dtype=np.uint8) for i in range(n_len)]
while True:
for i in range(batch_size):
overlap = random.choice(overlaps)
rd_text_pos = random.choice(rd_text_poss)
rd_text_size = random.choice(rd_text_sizes)
rd_text_color = random.choice(rd_text_colors)
noise = random.choice(noises)
rotate = random.choice(rotates)
nb_cha = 6
font_path = random.choice(font_paths)
dir_name = 'all'
dir_path = 'img_data/'+dir_name+'/'
im, contents = captcha_draw(size_im=size_im, nb_cha=nb_cha, set_cha=set_cha,
overlap=overlap, rd_text_pos=rd_text_pos, rd_text_size=False,
rd_text_color=rd_text_color, rd_bg_color=rd_bg_color, noise=noise,
rotate=rotate, dir_path=dir_path, fonts=font_path)
contents = ''.join(contents)
X[i] = im
for j, ch in enumerate(contents):
y[j][i, :] = 0
y[j][i, set_cha.find(ch)] = 1
yield X, y
#----------------------------------------------------------------------
def captcha_save():
""""""
a = captcha_generator(140, 44)
dir_path = 'img_data/all/'
X, y = a.next()
for x in X:
if os.path.exists(dir_path) == False: # 如果文件夹不存在,则创建对应的文件夹
os.makedirs(dir_path)
pic_id = 1
else:
pic_names = map(lambda x: x.split('.')[0], os.listdir(dir_path))
#pic_names.remove('label')
pic_id = max(map(int, pic_names))+1 # 找到所有图片的最大标号,方便命名
img_name = str(pic_id) + '.jpg'
img_path = dir_path + img_name
label_path = dir_path + 'label.txt'
#with open(label_path, 'a') as f:
#f.write(''.join(pic_id)+'\n') # 在label文件末尾添加新图片的text内容
print img_path
img = Image.fromarray(np.uint8(x))
img.save(img_path)
if __name__ == "__main__":
# test()
#captcha_generator(140, 44)
for _ in range(100):
captcha_save()
| [
"kerzhao@163.com"
] | kerzhao@163.com |
79ef43737c6204f12ad8e05708929563a5c2dca0 | cb6b1aa2d61b80cba29490dfe8755d02c7b9a79f | /sakura/scripting/mathops.py | 34da3e965be7c239e56fd4fa19d2a3961dbc79dc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | piotrmaslanka/Ninja-Tower | c127a64888bc3306046e4b400ce3a8c6764b5481 | 7eca86e23513a8805dd42c3c542b7fae0499576b | refs/heads/master | 2021-12-06T07:56:13.796922 | 2015-10-15T08:10:35 | 2015-10-15T08:10:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from __future__ import division
from math import hypot
def vector_towards(sx, sy, tx, ty, ln):
vdif = tx - sx, ty - sy
vlen = hypot(*vdif)
if vlen == 0: return # Cannot accelerate nowhere!
return vdif[0]*ln/vlen, vdif[1]*ln/vlen
| [
"piotr.maslanka@henrietta.com.pl"
] | piotr.maslanka@henrietta.com.pl |
29f06f178db222814e1a5e134ac9f3129211a536 | 8683aed87d25177f9a0a026c5c48fb412745ff89 | /ledger/payments/bpoint/models.py | ad000b68b8caecf613dc23fca0b703ea2b22e7b5 | [
"Apache-2.0"
] | permissive | gaiaresources/ledger | 2b1e9bfee14abbfbc993c5c62f60589a6eaf0a86 | b87c4bf4226e3e2b2f0bc24303f5d107f94f134e | refs/heads/master | 2020-04-05T20:50:06.727970 | 2017-07-18T02:16:33 | 2017-07-18T02:16:33 | 51,276,638 | 2 | 0 | null | 2017-07-13T07:09:15 | 2016-02-08T02:51:55 | Python | UTF-8 | Python | false | false | 4,127 | py | from __future__ import unicode_literals
import datetime
from django.db import models
from ledger.payments.bpoint import settings as bpoint_settings
from django.utils.encoding import python_2_unicode_compatible
from oscar.apps.order.models import Order
from ledger.accounts.models import EmailUser
class BpointTransaction(models.Model):
ACTION_TYPES = (
('payment','payment'),
('refund','refund'),
('reversal','reversal'),
('preauth', 'preauth'),
('capture','capture')
)
CARD_TYPES = (
('AX','American Express'),
('DC','Diners Club'),
('JC','JCB Card'),
('MC','MasterCard'),
('VC','Visa')
)
SUB_TYPES = (
('single','single'),
('recurring','recurring')
)
TRANSACTION_TYPES = (
('callcentre','callcentre'),
('cardpresent','cardpresent'),
('ecommerce','ecommerce'),
('internet', 'internet'),
('ivr','ivr'),
('mailorder','mailorder'),
('telephoneorder','telephoneorder')
)
created = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=20, choices=ACTION_TYPES)
amount = models.DecimalField(decimal_places=2,max_digits=12)
amount_original = models.DecimalField(decimal_places=2,max_digits=12)
amount_surcharge = models.DecimalField(default=0,decimal_places=2,max_digits=12)
cardtype = models.CharField(max_length=2, choices=CARD_TYPES, blank=True, null=True)
crn1 = models.CharField(max_length=50, help_text='Reference for the order that the transaction was made for')
response_code = models.CharField(max_length=50)
response_txt = models.CharField(max_length=128)
receipt_number = models.CharField(max_length=50)
processed = models.DateTimeField()
settlement_date = models.DateField(blank=True, null=True)
type = models.CharField(max_length=50, choices=TRANSACTION_TYPES)
# store the txn number from Bpoint
txn_number = models.CharField(unique=True, max_length=128, help_text='Transaction number used by BPOINT to identify a transaction')
original_txn = models.ForeignKey('self', to_field='txn_number', blank=True, null=True, help_text='Transaction number stored \
if current transaction depends on a previous transaction \
in the case where the action is a refund, reversal or capture')
class Meta:
ordering = ('-created',)
def __unicode__(self):
return self.txn_number
@property
def approved(self):
return self.response_code == "0"
@property
def order(self):
from ledger.payments.models import Invoice
return Order.objects.get(number=Invoice.objects.get(reference=self.crn1).order_number)
class TempBankCard(object):
def __init__(self,card_number,expiry_date,ccv=None):
self.number=card_number
self.expiry_date=datetime.datetime.strptime(expiry_date, '%m%y').date()
self.ccv=ccv
class BpointToken(models.Model):
CARD_TYPES = (
('AX','American Express'),
('DC','Diners Club'),
('JC','JCB Card'),
('MC','MasterCard'),
('VC','Visa')
)
user = models.ForeignKey(EmailUser, related_name='stored_cards')
DVToken = models.CharField(max_length=128)
masked_card = models.CharField(max_length=50)
expiry_date = models.DateField()
card_type = models.CharField(max_length=2, choices=CARD_TYPES, blank=True, null=True)
class Meta:
unique_together = ('user', 'masked_card','expiry_date','card_type')
@property
def last_digits(self):
return self.masked_card[-4:]
@property
def bankcard(self):
return TempBankCard(
self.DVToken,
self.expiry_date.strftime("%m%y")
)
def delete(self):
UsedBpointToken.objects.create(DVToken=self.DVToken)
super(BpointToken,self).delete()
class UsedBpointToken(models.Model):
added = models.DateTimeField(auto_now_add=True)
DVToken = models.CharField(max_length=128) | [
"ndwigabrian@gmail.com"
] | ndwigabrian@gmail.com |
620633147ca9c1b734abc81c4bb02677d7ee96b3 | 6ef4df3f0ecdfd4b880c49e0fc057a98931f9b2e | /marketsim/gen/_out/observable/Cumulative/_StdDev.py | 95ae75ca2166e6b0d23dd434bec57674b4d5975e | [] | no_license | Courvoisier13/marketsimulator | b8be5e25613d8c4f3ede1c47276cb6ad94637ca8 | 3ab938e2b3c980eaba8fc0dba58a141041125291 | refs/heads/master | 2021-01-18T02:17:54.153078 | 2013-12-19T15:09:32 | 2013-12-19T15:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | from marketsim import registry
from marketsim.ops._function import Function
from marketsim import IObservable
from marketsim.gen._out.mathops._Sqrt import Sqrt
from marketsim.gen._out.observable.Cumulative._Var import Var
from marketsim import context
@registry.expose(["Statistics", "StdDev"])
class StdDev(Function[float]):
"""
"""
def __init__(self, source = None):
from marketsim.gen._out._const import const
self.source = source if source is not None else const()
self.impl = self.getImpl()
@property
def label(self):
return repr(self)
_properties = {
'source' : IObservable
}
def __repr__(self):
return "\\sqrt{\\sigma^2_{cumul}_{%(source)s}}" % self.__dict__
_internals = ['impl']
@property
def attributes(self):
return {}
def getImpl(self):
return Sqrt(Var(self.source))
def bind(self, ctx):
self._ctx = ctx.clone()
def reset(self):
self.impl = self.getImpl()
ctx = getattr(self, '_ctx', None)
if ctx: context.bind(self.impl, ctx)
def __call__(self, *args, **kwargs):
return self.impl()
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
89bbdcd7dd4f1ba5699d55df1f50ae760994fedd | 45c3624f0fd45167357c37aaf3912d77e83aaffc | /baseApp/migrations/0007_auto_20200112_1937.py | 2f97576659fbd3cffc1de19653ec90f8106d4d61 | [] | no_license | kaustubh619/AllianceDjango | c15b959d3362b4f2e7fec7bb68b09e86d7fc9e1f | 61d33e8b8ee8a8245d1a9227d5f8ff8d39507450 | refs/heads/master | 2020-12-10T14:58:50.785990 | 2020-01-13T17:27:17 | 2020-01-13T17:27:17 | 233,626,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # Generated by Django 2.2.4 on 2020-01-12 14:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('baseApp', '0006_packagequery'),
]
operations = [
migrations.AlterField(
model_name='packagequery',
name='package',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='baseApp.Packages'),
),
]
| [
"kaustubhkrishna9031@gmail.com"
] | kaustubhkrishna9031@gmail.com |
eae4f825aadcfc2d20c7253b128ee2ce99d0841b | 37e66cf57cdf67d4d910d58e949f35422ddd52bc | /trimesh/scene/scene.py | 80f73067f155538985c7aeeb5d66c8a6f1fb6528 | [
"MIT"
] | permissive | heidtn/trimesh | f62f626c9d10c9f041b9ad1b509427e652a5f485 | 7bb5eb823366155bc6949258e5b6caf734c79fed | refs/heads/master | 2021-09-06T00:42:32.030504 | 2018-02-01T01:27:47 | 2018-02-01T01:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,660 | py | import numpy as np
import collections
import copy
from .. import util
from .. import units
from .. import convex
from .. import grouping
from .. import transformations
from .. import bounds as bounds_module
from ..io import gltf
from .transforms import TransformForest
class Scene:
'''
A simple scene graph which can be rendered directly via pyglet/openGL,
or through other endpoints such as a raytracer.
Meshes and lights are added by name, which can then be moved by updating
transform in the transform tree.
'''
def __init__(self,
geometry=None,
base_frame='world',
metadata={}):
# mesh name : Trimesh object
self.geometry = collections.OrderedDict()
# graph structure of instances
self.graph = TransformForest(base_frame=base_frame)
self._cache = util.Cache(id_function=self.md5)
self.add_geometry(geometry)
self.set_camera()
self.metadata = {}
self.metadata.update(metadata)
def add_geometry(self,
geometry,
node_name=None):
'''
Add a geometry to the scene.
If the mesh has multiple transforms defined in its metadata, they will
all be copied into the TransformForest of the current scene automatically.
Parameters
----------
geometry: Trimesh, Path3D, or list of same
node_name: name in the scene graph
Returns
----------
node_name: str, name of node in self.graph
'''
if geometry is None:
return
# if passed a sequence call add_geometry on all elements
if util.is_sequence(geometry):
return [self.add_geometry(i) for i in geometry]
# default values for transforms and name
transforms = np.eye(4).reshape((-1, 4, 4))
geometry_name = 'geometry_' + str(len(self.geometry))
# if object has metadata indicating different transforms or name
# use those values
if hasattr(geometry, 'metadata'):
if 'name' in geometry.metadata:
geometry_name = geometry.metadata['name']
if 'transforms' in geometry.metadata:
transforms = np.asanyarray(geometry.metadata['transforms'])
transforms = transforms.reshape((-1, 4, 4))
# save the geometry reference
self.geometry[geometry_name] = geometry
for i, transform in enumerate(transforms):
# if we haven't been passed a name to set in the graph
# use the geometry name plus an index
if node_name is None:
node_name = geometry_name + '_' + str(i)
self.graph.update(frame_to=node_name,
matrix=transform,
geometry=geometry_name,
geometry_flags={'visible': True})
def md5(self):
'''
MD5 of scene, which will change when meshes or transforms are changed
Returns
--------
hashed: str, MD5 hash of scene
'''
# get the MD5 of geometry and graph
data = [i.md5() for i in self.geometry.values()]
hashed = util.md5_object(np.append(data, self.graph.md5()))
return hashed
@property
def is_empty(self):
'''
Does the scene have anything in it.
Returns
----------
is_empty: bool, True if nothing is in the scene
'''
is_empty = len(self.geometry) == 0
return is_empty
@util.cache_decorator
def bounds(self):
'''
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
'''
corners = collections.deque()
for node_name in self.graph.nodes_geometry:
# access the transform and geometry name for every node
transform, geometry_name = self.graph[node_name]
# not all nodes have associated geometry
if geometry_name is None:
continue
# geometry objects have bounds properties, which are (2,3) or (2,2)
current_bounds = self.geometry[geometry_name].bounds.copy()
# find the 8 corner vertices of the axis aligned bounding box
current_corners = bounds_module.corners(current_bounds)
# transform those corners into where the geometry is located
corners.extend(transformations.transform_points(current_corners,
transform))
corners = np.array(corners)
bounds = np.array([corners.min(axis=0),
corners.max(axis=0)])
return bounds
@util.cache_decorator
def extents(self):
'''
Return the axis aligned box size of the current scene.
Returns
----------
extents: (3,) float, bounding box sides length
'''
return np.diff(self.bounds, axis=0).reshape(-1)
@util.cache_decorator
def scale(self):
'''
The approximate scale of the mesh
Returns
-----------
scale: float, the mean of the bounding box edge lengths
'''
scale = (self.extents ** 2).sum() ** .5
return scale
@util.cache_decorator
def centroid(self):
'''
Return the center of the bounding box for the scene.
Returns
--------
centroid: (3) float point for center of bounding box
'''
centroid = np.mean(self.bounds, axis=0)
return centroid
@util.cache_decorator
def triangles(self):
'''
Return a correctly transformed polygon soup of the current scene.
Returns
----------
triangles: (n,3,3) float, triangles in space
'''
triangles = collections.deque()
triangles_node = collections.deque()
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
geometry = self.geometry[geometry_name]
if not hasattr(geometry, 'triangles'):
continue
triangles.append(transformations.transform_points(
geometry.triangles.copy().reshape((-1, 3)),
transform))
triangles_node.append(np.tile(node_name, len(geometry.triangles)))
self._cache['triangles_node'] = np.hstack(triangles_node)
triangles = np.vstack(triangles).reshape((-1, 3, 3))
return triangles
@util.cache_decorator
def triangles_node(self):
'''
Which node of self.graph does each triangle come from.
Returns
---------
triangles_index: (len(self.triangles),) node name for each triangle
'''
populate = self.triangles
return self._cache['triangles_node']
@util.cache_decorator
def geometry_identifiers(self):
'''
Look up geometries by identifier MD5
Returns
---------
identifiers: dict, identifier md5: key in self.geometry
'''
identifiers = {mesh.identifier_md5: name
for name, mesh in self.geometry.items()}
return identifiers
@util.cache_decorator
def duplicate_nodes(self):
'''
Return a sequence of node keys of identical meshes.
Will combine meshes duplicated by copying in space with different keys in
self.geometry, as well as meshes repeated by self.nodes.
Returns
-----------
duplicates: (m) sequence of keys to self.nodes that represent
identical geometry
'''
# if there is no geometry we can have no duplicate nodes
if len(self.geometry) == 0:
return []
# geometry name : md5 of mesh
mesh_hash = {k: int(m.identifier_md5, 16)
for k, m in self.geometry.items()}
# the name of nodes in the scene graph with geometry
node_names = np.array(self.graph.nodes_geometry)
# the geometry names for each node in the same order
node_geom = np.array([self.graph[i][1] for i in node_names])
# the mesh md5 for each node in the same order
node_hash = np.array([mesh_hash[v] for v in node_geom])
# indexes of identical hashes
node_groups = grouping.group(node_hash)
# sequence of node names, where each sublist has identical geometry
duplicates = [np.sort(node_names[g]).tolist() for g in node_groups]
return duplicates
def set_camera(self, angles=None, distance=None, center=None):
'''
Add a transform to self.graph for 'camera'
If arguments are not passed sane defaults will be figured out.
Parameters
-----------
angles: (3,) float, initial euler angles in radians
distance: float, distance away camera should be
center: (3,) float, point camera should center on
'''
if len(self.geometry) == 0:
return
if center is None:
center = self.centroid
if distance is None:
# for a 60.0 degree horizontal FOV
distance = ((self.extents.max() / 2) /
np.tan(np.radians(60.0) / 2.0))
if angles is None:
angles = np.zeros(3)
translation = np.eye(4)
translation[0:3, 3] = center
# offset by a distance set by the model size
# the FOV is set for the Y axis, we multiply by a lightly
# padded aspect ratio to make sure the model is in view initially
translation[2][3] += distance * 1.35
transform = np.dot(transformations.rotation_matrix(angles[0],
[1, 0, 0],
point=center),
transformations.rotation_matrix(angles[1],
[0, 1, 0],
point=center))
transform = np.dot(transform, translation)
self.graph.update(frame_from='camera',
frame_to=self.graph.base_frame,
matrix=transform)
def rezero(self):
'''
Move the current scene so that the AABB of the whole scene is centered
at the origin.
Does this by changing the base frame to a new, offset base frame.
'''
if self.is_empty or np.allclose(self.centroid, 0.0):
# early exit since what we want already exists
return
# the transformation to move the overall scene to the AABB centroid
matrix = np.eye(4)
matrix[:3, 3] = -self.centroid
# we are going to change the base frame
new_base = str(self.graph.base_frame) + '_I'
self.graph.update(frame_from=new_base,
frame_to=self.graph.base_frame,
matrix=matrix)
self.graph.base_frame = new_base
def dump(self):
'''
Append all meshes in scene to a list of meshes.
Returns
----------
dumped: (n,) list, of Trimesh objects transformed to their
location the scene.graph
'''
result = collections.deque()
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
current = self.geometry[geometry_name].copy()
current.apply_transform(transform)
result.append(current)
return np.array(result)
@util.cache_decorator
def convex_hull(self):
'''
The convex hull of the whole scene
Returns
---------
hull: Trimesh object, convex hull of all meshes in scene
'''
points = util.vstack_empty([m.vertices for m in self.dump()])
hull = convex.convex_hull(points)
return hull
@util.cache_decorator
def bounding_box(self):
'''
An axis aligned bounding box for the current scene.
Returns
----------
aabb: trimesh.primitives.Box object with transform and extents defined
to represent the axis aligned bounding box of the scene
'''
from .. import primitives
center = self.bounds.mean(axis=0)
aabb = primitives.Box(
transform=transformations.translation_matrix(center),
extents=self.extents,
mutable=False)
return aabb
@util.cache_decorator
def bounding_box_oriented(self):
'''
An oriented bounding box for the current mesh.
Returns
---------
obb: trimesh.primitives.Box object with transform and extents defined
to represent the minimum volume oriented bounding box of the mesh
'''
from .. import primitives
to_origin, extents = bounds_module.oriented_bounds(self)
obb = primitives.Box(transform=np.linalg.inv(to_origin),
extents=extents,
mutable=False)
return obb
def export(self, file_type=None):
'''
Export a snapshot of the current scene.
Parameters
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
'''
if file_type == 'gltf':
return gltf.export_gltf(self)
elif file_type == 'glb':
return gltf.export_glb(self)
export = {'graph': self.graph.to_edgelist(),
'geometry': {},
'scene_cache': {'bounds': self.bounds.tolist(),
'extents': self.extents.tolist(),
'centroid': self.centroid.tolist(),
'scale': self.scale}}
if file_type is None:
file_type = {'Trimesh': 'ply',
'Path2D': 'dxf'}
# if the mesh has an export method use it, otherwise put the mesh
# itself into the export object
for geometry_name, geometry in self.geometry.items():
if hasattr(geometry, 'export'):
if isinstance(file_type, dict):
# case where we have export types that are different
# for different classes of objects.
for query_class, query_format in file_type.items():
if util.is_instance_named(geometry, query_class):
export_type = query_format
break
else:
# if file_type is not a dict, try to export everything in the
# scene as that value (probably a single string, like
# 'ply')
export_type = file_type
exported = {'data': geometry.export(file_type=export_type),
'file_type': export_type}
export['geometry'][geometry_name] = exported
else:
# case where mesh object doesn't have exporter
# might be that someone replaced the mesh with a URL
export['geometry'][geometry_name] = geometry
return export
def save_image(self, resolution=(1024, 768), **kwargs):
'''
Get a PNG image of a scene.
Parameters
-----------
resultion: (2,) int, resolution to render image
**kwargs: passed to SceneViewer constructor
Returns
-----------
png: bytes, render of scene in PNG form
'''
from .viewer import render_scene
png = render_scene(scene=self,
resolution=resolution,
**kwargs)
return png
def convert_units(self, desired, guess=False):
'''
If geometry has units defined, convert them to new units.
Returns a new scene with geometries and transforms scaled.
Parameters
----------
units: str, target unit system. EG 'inches', 'mm', etc
'''
# if there is no geometry do nothing
if len(self.geometry) == 0:
return self
existing = [i.units for i in self.geometry.values()]
if any(existing[0] != e for e in existing):
# if all of our geometry doesn't have the same units already
# this function will only do some hot nonsense
raise ValueError('Models in scene have inconsistent units!')
current = existing[0]
if current is None:
if guess:
current = units.unit_guess(self.scale)
else:
raise ValueError('units not defined and not allowed to guess!')
# exit early if our current units are the same as desired units
if current == desired:
return self
scale = units.unit_conversion(current=current,
desired=desired)
result = self.scaled(scale=scale)
for geometry in result.geometry.values():
geometry.units = desired
return result
def explode(self, vector=None, origin=None):
'''
Explode a scene around a point and vector.
Parameters
-----------
vector: (3,) float, or float, explode in a direction or spherically
origin: (3,) float, point to explode around
'''
if origin is None:
origin = self.centroid
if vector is None:
vector = self.scale / 25.0
vector = np.asanyarray(vector, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
centroids = collections.deque()
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
centroid = self.geometry[geometry_name].centroid
# transform centroid into nodes location
centroid = np.dot(transform,
np.append(centroid, 1))[:3]
if vector.shape == ():
# case where our vector is a single number
offset = (centroid - origin) * vector
elif np.shape(vector) == (3,):
projected = np.dot(vector, (centroid - origin))
offset = vector * projected
else:
raise ValueError('explode vector wrong shape!')
transform[0:3, 3] += offset
self.graph[node_name] = transform
def scaled(self, scale):
'''
Return a copy of the current scene, with meshes and scene graph
transforms scaled to the requested factor.
Parameters
-----------
scale: float, factor to scale meshes and transforms by
'''
scale = float(scale)
scale_matrix = np.eye(4) * scale
transforms = np.array([self.graph[i][0]
for i in self.graph.nodes_geometry])
geometries = np.array([self.graph[i][1]
for i in self.graph.nodes_geometry])
result = self.copy()
result.graph.clear()
for group in grouping.group(geometries):
geometry = geometries[group[0]]
original = transforms[group[0]]
new_geom = np.dot(scale_matrix, original)
inv_geom = np.linalg.inv(new_geom)
result.geometry[geometry].apply_transform(new_geom)
for node, t in zip(self.graph.nodes_geometry[group],
transforms[group]):
transform = util.multi_dot([scale_matrix,
t,
np.linalg.inv(new_geom)])
transform[:3, 3] *= scale
result.graph.update(frame_to=node,
matrix=transform,
geometry=geometry)
return result
def copy(self):
'''
Return a deep copy of the current scene
Returns
----------
copied: trimesh.Scene, copy of the current scene
'''
copied = copy.deepcopy(self)
return copied
def show(self, **kwargs):
'''
Open a pyglet window to preview the current scene
Parameters
-----------
smooth: bool, turn on or off automatic smooth shading
'''
# this imports pyglet, and will raise an ImportError
# if pyglet is not available
from .viewer import SceneViewer
SceneViewer(self, **kwargs)
def split_scene(geometry):
'''
Given a possible sequence of geometries, decompose them into parts.
Parameters
----------
geometry: splittable
Returns
---------
scene: trimesh.Scene
'''
if util.is_instance_named(geometry, 'Scene'):
return geometry
if util.is_sequence(geometry):
return Scene(geometry)
split = collections.deque()
for g in util.make_sequence(geometry):
split.extend(g.split())
scene = Scene(split)
return scene
| [
"mik3dh@gmail.com"
] | mik3dh@gmail.com |
9bbca24acbdb33b693c0cfe366ca75c6eda5c2cb | ecd2aa3d12a5375498c88cfaf540e6e601b613b3 | /Facebook/Pro102. Binary Tree Level Order Traversal.py | f3281376de4d813a8b2c1a50aaab5967a6e63165 | [] | no_license | YoyinZyc/Leetcode_Python | abd5d90f874af5cd05dbed87f76885a1ca480173 | 9eb44afa4233fdedc2e5c72be0fdf54b25d1c45c | refs/heads/master | 2021-09-05T17:08:31.937689 | 2018-01-29T21:57:44 | 2018-01-29T21:57:44 | 103,157,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
ans = []
q1 = deque()
q2 = deque()
q1.append(root)
l = []
while q1:
node = q1.popleft()
if node:
l.append(node.val)
q2.append(node.left)
q2.append(node.right)
if not q1:
temp = q2
q2 = q1
q1 = temp
if l:
ans.append(l)
l = []
return ans
| [
"yoyinzyc@gmail.com"
] | yoyinzyc@gmail.com |
a5664b4f4645595f91b6599c4663995f72bf40e4 | 553b34a101c54090e68f540d96369ac7d5774d95 | /python/python_koans/python2/koans/about_class_attributes.py | 7764eabd335da8b297a8638233b84f4d0ea2f652 | [
"MIT"
] | permissive | topliceanu/learn | fd124e1885b5c0bfea8587510b5eab79da629099 | 1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3 | refs/heads/master | 2022-07-16T19:50:40.939933 | 2022-06-12T15:40:20 | 2022-06-12T15:40:20 | 21,684,180 | 26 | 12 | MIT | 2020-03-26T20:51:35 | 2014-07-10T07:22:17 | JavaScript | UTF-8 | Python | false | false | 5,195 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog(object):
pass
def test_new_style_class_objects_are_objects(self):
# Note: Old style class instances are not objects but they are being
# phased out in Python 3.
fido = self.Dog()
self.assertEqual(True, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(True, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(True, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(18, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(18, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(15, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(4, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda: 'fidos wag'
self.assertEqual('fidos wag', fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
try:
rover.wag()
except Exception as ex:
self.assertMatch("'Dog' object has no attribute 'wag'", ex[0])
# ------------------------------------------------------------------
class Dog2(object):
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_like_all_objects_classes_can_have_singleton_methods(self):
self.assertMatch("classmethod growl, arg: cls=Dog2", self.Dog2.growl())
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertMatch("classmethod growl, arg: cls=Dog2", fido.growl())
self.assertMatch("classmethod growl, arg: cls=Dog2", self.Dog2.growl())
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertMatch("staticmethod bark, arg: None", self.Dog2.bark())
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertMatch("staticmethod bark, arg: None", fido.bark())
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(
get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
try:
fido.name = "Fido"
except Exception as ex:
self.assertMatch("'classmethod' object is not callable", ex[0])
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual('Fido', fido.get_name_from_instance())
self.assertEqual('Rover', self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual('Fido', fido.get_name())
self.assertEqual('Fido', self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4(object):
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual('dogs class method', self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual('dogs static method', self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_you_can_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual('dogs class method', fido.__class__.a_class_method())
| [
"alexandru.topliceanu@gmail.com"
] | alexandru.topliceanu@gmail.com |
82c9712218f271eea11ef452affd3917de8e6229 | 52a32a93942b7923b7c0c6ca5a4d5930bbba384b | /unittests/tools/test_mobsfscan_parser.py | 76e805852a9ecf97e3cb18161944979530a92095 | [
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi... | permissive | DefectDojo/django-DefectDojo | 43bfb1c728451335661dadc741be732a50cd2a12 | b98093dcb966ffe972f8719337de2209bf3989ec | refs/heads/master | 2023-08-21T13:42:07.238370 | 2023-08-14T18:00:34 | 2023-08-14T18:00:34 | 31,028,375 | 2,719 | 1,666 | BSD-3-Clause | 2023-09-14T19:46:49 | 2015-02-19T17:53:47 | HTML | UTF-8 | Python | false | false | 6,947 | py | from ..dojo_test_case import DojoTestCase
from dojo.tools.mobsfscan.parser import MobsfscanParser
from dojo.models import Test
class TestMobsfscanParser(DojoTestCase):
def test_parse_no_findings(self):
testfile = open("unittests/scans/mobsfscan/no_findings.json")
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
testfile = open("unittests/scans/mobsfscan/many_findings.json")
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("android_certificate_transparency", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(295, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("android_kotlin_hardcoded", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(798, finding.cwe)
self.assertIsNotNone(finding.references)
self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path)
self.assertEqual(10, finding.line)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("android_prevent_screenshot", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(200, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=3):
finding = findings[3]
self.assertEqual("android_root_detection", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(919, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=4):
finding = findings[4]
self.assertEqual("android_safetynet", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(353, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=5):
finding = findings[5]
self.assertEqual("android_ssl_pinning", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(295, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("android_tapjacking", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(200, finding.cwe)
self.assertIsNotNone(finding.references)
def test_parse_many_findings_cwe_lower(self):
testfile = open("unittests/scans/mobsfscan/many_findings_cwe_lower.json")
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("android_certificate_transparency", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(295, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("android_kotlin_hardcoded", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(798, finding.cwe)
self.assertIsNotNone(finding.references)
self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path)
self.assertEqual(10, finding.line)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("android_prevent_screenshot", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(200, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=3):
finding = findings[3]
self.assertEqual("android_root_detection", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(919, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=4):
finding = findings[4]
self.assertEqual("android_safetynet", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(353, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=5):
finding = findings[5]
self.assertEqual("android_ssl_pinning", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(295, finding.cwe)
self.assertIsNotNone(finding.references)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("android_tapjacking", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(1, finding.nb_occurences)
self.assertIsNotNone(finding.description)
self.assertEqual(200, finding.cwe)
self.assertIsNotNone(finding.references)
| [
"noreply@github.com"
] | DefectDojo.noreply@github.com |
a7b04ac6451fd8e623987f821b95ad4564fed2a5 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/booking_end_req_session_activity.py | ed703010be55ccae9fe7bcbab08770bacac0b6cd | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 238 | py | from __future__ import annotations
from enum import Enum
__NAMESPACE__ = "http://www.travelport.com/schema/sharedBooking_v52_0"
class BookingEndReqSessionActivity(Enum):
END = "End"
END_QUEUE = "EndQueue"
IGNORE = "Ignore"
| [
"chris@komposta.net"
] | chris@komposta.net |
66940ed8359ce0693b72954371aec6f4d8f3b6d7 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn9 - minobot/M-17105-1054.py | ba11db24d90c1085ae689b63e7fddc49161b02db | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | py | def premik(ukaz, x, y, smer):
smeri = "NESW"
premiki = [(0, -1), (1, 0), (0, 1), (-1, 0)]
ismer = smeri.index(smer)
if ukaz == "R":
smer = smeri[(ismer + 1) % 4]
elif ukaz == "L":
smer = smeri[(ismer - 1) % 4]
else:
dx, dy = premiki[ismer]
x += dx * ukaz
y += dy * ukaz
return x, y, smer
def izvedi(ime_datoteke):
koord = [(0, 0, 'N')]
smer = {"DESNO": "R", "LEVO": "L"}
with open(ime_datoteke) as d:
for vrstica in d:
x, y, s = koord[-1]
vrstica = vrstica.rstrip()
if len(vrstica.split()) == 1:
koord.append((premik(smer[vrstica], x, y, s)))
else:
koord.append(premik(int(vrstica.split()[1]), x, y, s))
return koord
def opisi_stanje(x, y, smer):
smeri = "NESW"
premiki = ['^', '>', 'v', '<']
ismer = premiki[smeri.index(smer)]
return "{0:3}:{1:<3} {2}".format(x,y,ismer)
def prevedi(ime_vhoda, ime_izhoda):
koord = izvedi(ime_vhoda)
seznam = [opisi_stanje(x,y,s) for x,y,s in koord]
with open(ime_izhoda, 'w') as d:
for vnos in seznam:
d.write(vnos+"\n")
def opisi_stanje_2(x, y, smer):
return "{stanje[1]}{xk:>5}:{yk}".format(stanje=opisi_stanje(x,y,smer).split(), xk = "("+str(x), yk=str(y)+")")
import unittest
class TestObvezna(unittest.TestCase):
def test_branje(self):
self.assertEqual(
izvedi("primer.txt"),
[(0, 0, 'N'), (0, 0, 'E'), (12, 0, 'E'), (12, 0, 'S'), (12, 2, 'S'),
(12, 2, 'E'), (15, 2, 'E'), (15, 2, 'N'), (15, 2, 'W')]
)
self.assertEqual(
izvedi("ukazi.txt"),
[(0, 0, 'N'), (0, 0, 'E'), (1, 0, 'E'), (1, 0, 'S'), (1, 0, 'W'),
(0, 0, 'W'), (0, 0, 'S'), (0, 0, 'E'), (1, 0, 'E'), (1, 0, 'S'),
(1, 3, 'S'), (1, 3, 'E'), (2, 3, 'E'), (2, 3, 'S'), (2, 3, 'W')]
)
def test_opisi_stanje(self):
self.assertEqual(opisi_stanje(0, 12, "N"), " 0:12 ^")
self.assertEqual(opisi_stanje(111, 0, "E"), "111:0 >")
self.assertEqual(opisi_stanje(-2, 111, "S"), " -2:111 v")
self.assertEqual(opisi_stanje(0, 0, "W"), " 0:0 <")
def test_prevedi(self):
from random import randint
import os
ime = "izhod{:05}.txt".format(randint(0, 99999))
try:
self.assertIsNone(prevedi("primer.txt", ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:0 >
12:0 >
12:0 v
12:2 v
12:2 >
15:2 >
15:2 ^
15:2 <""")
self.assertIsNone(prevedi("ukazi.txt", ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:0 >
1:0 >
1:0 v
1:0 <
0:0 <
0:0 v
0:0 >
1:0 >
1:0 v
1:3 v
1:3 >
2:3 >
2:3 v
2:3 <""")
finally:
os.remove(ime)
vime = "vhod{:05}.txt".format(randint(0, 99999))
open(vime, "wt").write("NAPREJ 23\nLEVO\nNAPREJ 17\n")
try:
self.assertIsNone(prevedi(vime, ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:-23 ^
0:-23 <
-17:-23 <""")
finally:
os.remove(ime)
os.remove(vime)
class TestDodatna(unittest.TestCase):
def test_opisi_stanje(self):
self.assertEqual(opisi_stanje_2(0, 12, "N"), "^ (0:12)")
self.assertEqual(opisi_stanje_2(111, 0, "E"), "> (111:0)")
self.assertEqual(opisi_stanje_2(-2, 111, "S"), "v (-2:111)")
self.assertEqual(opisi_stanje_2(0, 0, "W"), "< (0:0)")
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
39bf297a2b22b2f1e698994ab979b7182ee0c3ef | 965fc2f0e22f0d0a14ac2b65fb7ae1a94371c9d1 | /harvester/tests/integration/test_molecularmatch_trials_convert.py | b6c4f52ad36535fea889a4722dc066bbf2686458 | [] | no_license | ohsu-comp-bio/g2p-aggregator | 9a2ca19612905f7d7aca950aa81cb7a1d83dd56c | 2a0d57c31bdad67b759322407b2c9c91a59c615b | refs/heads/v0.12 | 2022-12-11T14:07:34.994534 | 2019-06-25T21:13:27 | 2019-06-25T21:13:27 | 88,804,628 | 46 | 16 | null | 2022-08-23T17:12:16 | 2017-04-20T00:51:34 | HTML | UTF-8 | Python | false | false | 36,958 | py | import molecularmatch_trials
import json
from collections import namedtuple
import evidence_label as el
EVIDENCE = json.loads("""
{"status": "Recruiting", "startDate": "June 2016", "title": "Hyperpolarized Xenon-129 Functional Magnetic Resonance Imaging of Healthy Volunteers and Participants With Alzheimer's Disease", "molecularAlterations": [], "_score": -7, "interventions": [{"intervention_name": "Traditional Proton fMRI", "description": "Conventional proton fMRI will be performed. During the functional imaging acquisitions, the participant will be asked by the research team to perform simple tasks. These tasks will be completed while the participant breathes air (normal breathing).", "arm_group_label": ["Healthy Volunteers", "Alzheimer's Disease Participants"], "intervention_type": "Other"}, {"intervention_name": "Hyperpolarized Xenon-129 fMRI", "other_name": "HP 129Xe fMRI", "description": "HP 129Xe fMRI data will be acquired from all participants. Hyperpolarized xenon (NeuroXene) is expected to produce images that provide more clinically relevant information than traditional proton scans. Simple tasks will be performed by participants while breathing NeuroXene according to several inhalation procedures.", "arm_group_label": ["Healthy Volunteers", "Alzheimer's Disease Participants"], "intervention_type": "Other"}, {"intervention_name": "Hyperpolarized Xenon-129", "other_name": "NeuroXene", "description": "Participants will be asked to inhale specified amounts of NeuroXene according to several inhalation procedures. NeuroXene is the trade name for hyperpolarization of xenon-129 balanced with oxygen and nitrogen using a Xemed LLC polarizer. Hyperpolarization does not change the chemical or physical properties of xenon gas.", "arm_group_label": ["Healthy Volunteers", "Alzheimer's Disease Participants"], "intervention_type": "Drug"}, {"intervention_name": "1H-129Xe Dual-Tuned Quadrature Head Coil", "description": "A 1H-129Xe dual-tuned quadrature head coil (Clinical MR Solutions, LLC) will be used in this study. The RF coil will be used to acquire MRI images of the human brain after inhalation of hyperpolarized xenon-129 gas, and permits the acquisition of both conventional proton and HP xenon gas images.", "arm_group_label": ["Healthy Volunteers", "Alzheimer's Disease Participants"], "intervention_type": "Device"}], "locations": [{"status": "Recruiting", "city": "Thunder Bay", "last_name": "Mitchell S Albert, PhD", "phone_backup": "807-684-6958", "zip": "P7B 6V4", "country": "Canada", "last_name_backup": "Jennifer Plata, HBSc", "email": "albertmi@tbh.net", "phone": "807-684-7270", "state": "Ontario", "email_backup": "plataj@tbh.net", "geo": {"lat": 48.416, "lon": -89.267}, "id": "thunderbayregionalhealthsciencescentre_thunderbay_ontario_ca_p7b6v4", "name": "Thunder Bay Regional Health Sciences Centre"}], "briefTitle": "HP Xenon-129 fMRI of Healthy Volunteers and Participants With Alzheimer's Disease", "overallContact": {"phone": "807-684-7270", "last_name": "Mitchell Albert, Ph.D.", "email": "albertmi@tbh.net"}, "phase": "Early Phase 1", "tags": [{"facet": "DRUG", "compositeKey": "Oxygen compressedDRUGinclude", "suppress": false, "filterType": "include", "term": "Oxygen compressed", "custom": false, "priority": 1, "alias": "Oxygen compressed"}, {"facet": "DRUG", "compositeKey": "NitrogenDRUGinclude", "suppress": false, "filterType": "include", "term": "Nitrogen", "custom": false, "priority": 1, "alias": "Nirtogen"}, {"facet": "DRUG", "compositeKey": "Xenon xe-133DRUGinclude", "suppress": false, "filterType": "include", "term": "Xenon xe-133", "custom": false, "priority": 1, "alias": "Xenon xe-133"}, {"facet": "DRUGCLASS", "compositeKey": "Inhalation Diagnostic AgentDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Inhalation Diagnostic Agent", "custom": false, "priority": 0, "alias": "Inhalation Diagnostic Agent", "generatedByTerm": "Xenon xe-133"}, {"facet": "DRUGCLASS", "compositeKey": "Radiopharmaceutical ActivityDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Radiopharmaceutical Activity", "custom": false, "priority": 0, "alias": "Radiopharmaceutical Activity", "generatedByTerm": "Xenon xe-133"}, {"facet": "DRUG", "compositeKey": "NirtogenDRUGinclude", "suppress": false, "filterType": "include", "term": "Nirtogen", "custom": false, "priority": 1, "alias": "Nirtogen"}, {"facet": "CONDITION", "compositeKey": "DementiaCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Dementia", "custom": false, "priority": 1, "alias": "Dementia"}, {"facet": "CONDITION", "compositeKey": "Infective disorderCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Infective disorder", "custom": false, "priority": 1, "alias": "Infection"}, {"facet": "PHASE", "compositeKey": "Phase 1PHASEinclude", "suppress": false, "filterType": "include", "term": "Phase 1", "custom": false, "priority": 1, "alias": "Phase 1"}, {"facet": "CONDITION", "compositeKey": "AD - Alzheimer's diseaseCONDITIONinclude", "suppress": false, "filterType": "include", "term": "AD - Alzheimer's disease", "custom": false, "priority": 1, "alias": "AD - Alzheimer's disease"}, {"facet": "SITE", "compositeKey": "CerebralSITEinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Cerebral", "custom": false, "priority": 0, "alias": "Cerebral", "generatedByTerm": "AD - Alzheimer's disease"}, {"facet": "FINDING", "compositeKey": "Pregnancy confirmedFINDINGexclude", "suppress": false, "filterType": "exclude", "term": "Pregnancy confirmed", "custom": false, "priority": 1, "alias": "Pregnant"}, {"facet": "GENE", "compositeKey": "HPGENEinclude", "suppress": false, "filterType": "include", "term": "HP", "custom": false, "priority": 1, "alias": "HP"}, {"facet": "GENE", "compositeKey": "PTPRFGENEinclude", "suppress": false, "filterType": "include", "term": "PTPRF", "custom": false, "priority": 2, "alias": "PTPRF"}, {"facet": "STATUS", "compositeKey": "RecruitingSTATUSinclude", "suppress": false, "filterType": "include", "term": "Recruiting", "custom": false, "priority": 1, "alias": "Recruiting"}, {"facet": "TRIALTYPE", "compositeKey": "DiagnosticTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Diagnostic", "custom": false, "priority": 1, "alias": "Diagnostic"}, {"facet": "TRIALTYPE", "compositeKey": "InterventionalTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Interventional", "custom": false, "priority": 1, "alias": "Interventional"}], "id": "NCT02638519", "studyType": "Interventional"}
""") # NOQA
PHENOTYPE = json.loads("""
{"status": "Recruiting", "startDate": "April 2016", "interventions": [{"intervention_name": "Trabectedin", "arm_group_label": "Trabectedin", "other_name": "Yondelis", "intervention_type": "Drug"}, {"intervention_name": "Pegylated Liposomal Doxorubicin", "arm_group_label": "Standard Treatment", "intervention_type": "Drug"}, {"intervention_name": "Topotecan", "arm_group_label": "Standard Treatment", "intervention_type": "Drug"}, {"intervention_name": "Gemcitabine", "arm_group_label": "Standard Treatment", "intervention_type": "Drug"}, {"intervention_name": "Weekly Paclitaxel", "arm_group_label": "Standard Treatment", "intervention_type": "Drug"}, {"intervention_name": "Carboplatin", "arm_group_label": "Standard Treatment", "intervention_type": "Drug"}], "molecularAlterations": [], "_score": 24, "title": "Randomized Phase III Trial on Trabectedin (ET-743) vs Clinician's Choice Chemotherapy in Recurrent Ovarian, Primary Peritoneal or Fallopian Tube Cancers of BRCA Mutated or BRCAness Phenotype patients_MITO-23", "locations": [{"status": "Recruiting", "city": "Milan", "last_name": "Domenica Lorusso, MD", "phone_backup": "+390223903818", "zip": "20133", "created": "2016-09-18T00:42:50.831Z", "_validMessage": "geocoder failed", "country": "Italy", "last_name_backup": "Elisa Grassi", "lastUpdated": "2017-12-29T12:51:07.590Z", "email": "domenica.lorusso@istitutotumori.mi.it", "phone": "+390223903697", "state": "", "failedGeocode": true, "email_backup": "elisa.grassi@istitutotumori.mi.it", "id": "domenicalorusso_milan_it_20133", "_valid": false, "name": "Domenica Lorusso"}], "briefTitle": "Trial on Trabectedin (ET-743) vs Clinician's Choice Chemotherapy in Recurrent Ovarian, Primary Peritoneal or Fallopian Tube Cancers of BRCA Mutated or BRCAness Phenotype Patients _MITO-23", "overallContact": {"phone": "+390223903697", "last_name": "Domenica Lorusso, MD", "email": "domenica.lorusso@istitutotumori.mi.it"}, "link": "http://clinicaltrials.gov/ct2/show/NCT02903004", "studyType": "Interventional", "phase": "Phase 3", "id": "NCT02903004", "tags": [{"priority": 1, "compositeKey": "Ovarian CaCONDITIONinclude", "suppress": false, "generatedBy": "", "filterType": "include", "term": "Ovarian Ca", "custom": true, "facet": "CONDITION", "alias": "Ovarian cancer", "manualSuppress": 0, "generatedByTerm": ""}, {"priority": 1, "compositeKey": "Carcinoma of ovaryCONDITIONinclude", "suppress": false, "generatedBy": "", "filterType": "include", "term": "Carcinoma of ovary", "custom": true, "facet": "CONDITION", "alias": "Carcinoma of ovary", "manualSuppress": 0, "generatedByTerm": ""}, {"priority": 1, "compositeKey": "Carcinoma of peritoneumCONDITIONinclude", "suppress": false, "generatedBy": "", "filterType": "include", "term": "Carcinoma of peritoneum", "custom": true, "facet": "CONDITION", "alias": "Primary Peritoneal Carcinoma", "manualSuppress": 0, "generatedByTerm": ""}, {"priority": 1, "compositeKey": "Tumour of fallopian tubeCONDITIONinclude", "suppress": false, "generatedBy": "", "filterType": "include", "term": "Tumour of fallopian tube", "custom": true, "facet": "CONDITION", "alias": "Fallopian tube cancer", "manualSuppress": 0, "generatedByTerm": ""}, {"priority": 2, "compositeKey": "PARP1GENEinclude", "suppress": false, "filterType": "include", "term": "PARP1", "custom": false, "facet": "GENE", "alias": "PARP1", "manualSuppress": 0}, {"priority": 3, "compositeKey": "BRCA1GENEinclude", "manualPriority": 3, "suppress": false, "filterType": "include", "term": "BRCA1", "custom": false, "facet": "GENE", "alias": "BRCA1", "manualSuppress": 0}, {"priority": 3, "compositeKey": "BRCAGENEinclude", "manualPriority": 3, "suppress": false, "filterType": "include", "term": "BRCA", "custom": false, "facet": "GENE", "alias": "BRCA", "manualSuppress": 0}, {"priority": 3, "compositeKey": "BRCA2GENEinclude", "manualPriority": 3, "suppress": false, "filterType": "include", "term": "BRCA2", "custom": false, "facet": "GENE", "alias": "BRCA2", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Infective disorderCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Infective disorder", "custom": false, "facet": "CONDITION", "alias": "Infection", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Carcinoma of cervixCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Carcinoma of cervix", "custom": false, "facet": "CONDITION", "alias": "Carcinoma of cervix", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Platinum-based Drug resistanceRESISTANCEinclude", "suppress": false, "generatedBy": "DRUGCLASS", "filterType": "include", "term": "Platinum-based Drug resistance", "custom": false, "facet": "RESISTANCE", "alias": "Platinum-based Drug resistance", "manualSuppress": 0, "generatedByTerm": "Platinum-based Drug"}, {"priority": 1, "compositeKey": "Congestive cardiac failureCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Congestive cardiac failure", "custom": false, "facet": "CONDITION", "alias": "Congestive cardiac failure", "manualSuppress": 0}, {"priority": 1, "compositeKey": "MI - Myocardial infarctionCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "MI - Myocardial infarction", "custom": false, "facet": "CONDITION", "alias": "Heart attack", "manualSuppress": 0}, {"priority": 1, "compositeKey": "HF - Heart failureCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "HF - Heart failure", "custom": false, "facet": "CONDITION", "alias": "Heart failure", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Carcinoma in situCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Carcinoma in situ", "custom": false, "facet": "CONDITION", "alias": "Carcinoma in situ", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Secondary malignant neoplastic diseaseCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Secondary malignant neoplastic disease", "custom": false, "facet": "CONDITION", "alias": "Metastases", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Gemcitabine hydrochlorideDRUGinclude", "suppress": false, "filterType": "include", "term": "Gemcitabine hydrochloride", "custom": false, "facet": "DRUG", "alias": "Gemcitabine hydrochloride", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Nucleic Acid Synthesis InhibitorsDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Nucleic Acid Synthesis Inhibitors", "custom": false, "facet": "DRUGCLASS", "alias": "Nucleic Acid Synthesis Inhibitors", "manualSuppress": 0, "generatedByTerm": "Gemcitabine hydrochloride"}, {"priority": 0, "compositeKey": "Nucleoside Metabolic InhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Nucleoside Metabolic Inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Nucleoside Metabolic Inhibitor", "generatedByTerm": "Gemcitabine hydrochloride"}, {"priority": 0, "compositeKey": "AdjuvantDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Adjuvant", "custom": false, "facet": "DRUGCLASS", "alias": "Adjuvant", "manualSuppress": 0, "generatedByTerm": "Paclitaxel"}, {"priority": 0, "compositeKey": "ChemotherapyDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Chemotherapy", "custom": false, "facet": "DRUGCLASS", "alias": "Chemotherapy", "manualSuppress": 0, "generatedByTerm": "Trabectedin"}, {"priority": 2, "compositeKey": "MUC16GENEinclude", "suppress": false, "filterType": "include", "term": "MUC16", "custom": false, "facet": "GENE", "alias": "CA-125", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Phase 3PHASEinclude", "suppress": false, "filterType": "include", "term": "Phase 3", "custom": false, "facet": "PHASE", "alias": "Phase 3", "manualSuppress": 0}, {"priority": 1, "compositeKey": "RecruitingSTATUSinclude", "suppress": false, "filterType": "include", "term": "Recruiting", "custom": false, "facet": "STATUS", "alias": "Recruiting", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Doxorubicin hydrochlorideDRUGinclude", "suppress": false, "filterType": "include", "term": "Doxorubicin hydrochloride", "custom": false, "facet": "DRUG", "alias": "Doxorubicin hydrochloride", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Topoisomerase InhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Topoisomerase Inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Topoisomerase Inhibitor", "generatedByTerm": "Topotecan hydrochloride"}, {"priority": 0, "compositeKey": "TOP1 inhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "TOP1 inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "TOP1 inhibitor", "generatedByTerm": "Topotecan hydrochloride"}, {"priority": 0, "compositeKey": "Topoisomerase inhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Topoisomerase inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Topoisomerase Inhibitor", "manualSuppress": 0, "generatedByTerm": "Topotecan hydrochloride"}, {"priority": 0, "compositeKey": "Topoisomerase InhibitorsDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Topoisomerase Inhibitors", "custom": false, "facet": "DRUGCLASS", "alias": "Topoisomerase Inhibitors", "generatedByTerm": "Topotecan hydrochloride"}, {"priority": 0, "compositeKey": "Vinca AlkaloidDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Vinca Alkaloid", "custom": false, "facet": "DRUGCLASS", "alias": "Vinca Alkaloid", "manualSuppress": 0, "generatedByTerm": "Doxorubicin hydrochloride"}, {"priority": 0, "compositeKey": "Anthracycline Topoisomerase InhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Anthracycline Topoisomerase Inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Anthracycline Topoisomerase Inhibitor", "generatedByTerm": "Doxorubicin hydrochloride"}, {"priority": 1, "compositeKey": "Topotecan hydrochlorideDRUGinclude", "suppress": false, "filterType": "include", "term": "Topotecan hydrochloride", "custom": false, "facet": "DRUG", "alias": "Topotecan hydrochloride", "manualSuppress": 0}, {"priority": 1, "compositeKey": "PaclitaxelDRUGinclude", "suppress": false, "filterType": "include", "term": "Paclitaxel", "custom": false, "facet": "DRUG", "alias": "Paclitaxel", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Microtubule InhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Microtubule Inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Microtubule Inhibitor", "generatedByTerm": "Paclitaxel"}, {"priority": 0, "compositeKey": "Microtubule inhibitorDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Microtubule inhibitor", "custom": false, "facet": "DRUGCLASS", "alias": "Microtubule inhibitor", "manualSuppress": 0, "generatedByTerm": "Paclitaxel"}, {"priority": 1, "compositeKey": "CarboplatinDRUGinclude", "suppress": false, "filterType": "include", "term": "Carboplatin", "custom": false, "facet": "DRUG", "alias": "Carboplatin", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Superficial basal cell carcinomaCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Superficial basal cell carcinoma", "custom": false, "facet": "CONDITION", "alias": "Basal cell carcinoma", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Jns002DRUGinclude", "suppress": false, "filterType": "include", "term": "Jns002", "custom": false, "facet": "DRUG", "alias": "Jns002", "manualSuppress": 0}, {"priority": 1, "compositeKey": "TrabectedinDRUGinclude", "suppress": false, "filterType": "include", "term": "Trabectedin", "custom": false, "facet": "DRUG", "alias": "Yondelis", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Alkylating DrugDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Alkylating Drug", "custom": false, "facet": "DRUGCLASS", "alias": "Alkylating Drug", "generatedByTerm": "Trabectedin"}, {"priority": 1, "compositeKey": "RandomizedTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Randomized", "custom": false, "facet": "TRIALTYPE", "alias": "Randomized", "manualSuppress": 0}, {"priority": 1, "compositeKey": "InterventionalTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Interventional", "custom": false, "facet": "TRIALTYPE", "alias": "Interventional", "manualSuppress": 0}, {"priority": 1, "compositeKey": "Carcinoma, NOSCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Carcinoma, NOS", "custom": false, "facet": "CONDITION", "alias": "carcinoma of unknown primary site", "manualSuppress": 0}, {"priority": 1, "compositeKey": "DexamethasoneDRUGexclude", "suppress": false, "filterType": "exclude", "term": "Dexamethasone", "custom": false, "facet": "DRUG", "alias": "Dexamethasone", "manualSuppress": 0}, {"priority": 0, "compositeKey": "Malignant tumor of abdomenCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant tumor of abdomen", "custom": false, "facet": "CONDITION", "alias": "Malignant tumor of abdomen", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant tumor of soft tissue of abdomenCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant tumor of soft tissue of abdomen", "custom": false, "facet": "CONDITION", "alias": "Malignant tumor of soft tissue of abdomen", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Neoplasm of the peritoneumCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of the peritoneum", "custom": false, "facet": "CONDITION", "alias": "Neoplasm of the peritoneum", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Malignant neoplastic diseaseCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant neoplastic disease", "custom": false, "facet": "CONDITION", "alias": "Cancer", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Peritoneal cancerCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Peritoneal cancer", "custom": false, "facet": "CONDITION", "alias": "Peritoneal cancer", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Neoplasm of soft tissues of abdomenCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of soft tissues of abdomen", "custom": false, "facet": "CONDITION", "alias": "Neoplasm of soft tissues of abdomen", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Neoplasm of soft tissueCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of soft tissue", "custom": false, "facet": "CONDITION", "alias": "Soft tissues cancer", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Neoplasm of connective tissuesCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of connective tissues", "custom": false, "facet": "CONDITION", "alias": "Connective tissue cancer", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Neoplasm of soft tissues of trunkCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of soft tissues of trunk", "custom": false, "facet": "CONDITION", "alias": "Neoplasm of soft tissues of trunk", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "NeoplasiaCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasia", "custom": false, "facet": "CONDITION", "alias": "Neoplasia", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant tumour of soft tissueCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant tumour of soft tissue", "custom": false, "facet": "CONDITION", "alias": "Malignant tumour of soft tissue", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Solid tumorCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Solid tumor", "custom": false, "facet": "CONDITION", "alias": "Solid tumor", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "PeritoneumSITEinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Peritoneum", "custom": false, "facet": "SITE", "alias": "Peritoneum", "manualSuppress": 0, "generatedByTerm": "Carcinoma of peritoneum"}, {"priority": 0, "compositeKey": "Malignant neoplasm of genital structureCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant neoplasm of genital structure", "custom": false, "facet": "CONDITION", "alias": "Malignant neoplasm of genital structure", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Tumour of ovaryCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Tumour of ovary", "custom": false, "facet": "CONDITION", "alias": "Tumour of ovary", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant tumor of pelvisCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant tumor of pelvis", "custom": false, "facet": "CONDITION", "alias": "Malignant tumor of pelvis", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Neoplasm of uterine adnexaCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasm of uterine adnexa", "custom": false, "facet": "CONDITION", "alias": "Uterine adnexa cancer", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant neoplasm of uterine adnexaCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant neoplasm of uterine adnexa", "custom": false, "facet": "CONDITION", "alias": "Malignant neoplasm of uterine adnexa", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant tumour of female genital organCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant tumour of female genital organ", "custom": false, "facet": "CONDITION", "alias": "Malignant tumour of female genital organ", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Malignant neoplasm of genitourinary organCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant neoplasm of genitourinary organ", "custom": false, "facet": "CONDITION", "alias": "Malignant neoplasm of genitourinary organ", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "OvarySITEinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Ovary", "custom": false, "facet": "SITE", "alias": "Ovary", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}, {"priority": 0, "compositeKey": "Salpinx uterinaSITEinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Salpinx uterina", "custom": false, "facet": "SITE", "alias": "Salpinx uterina", "manualSuppress": 0, "generatedByTerm": "Tumour of fallopian tube"}, {"priority": 0, "compositeKey": "Epithelial tumour of ovaryCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Epithelial tumour of ovary", "custom": false, "facet": "CONDITION", "alias": "Epithelial tumour of ovary", "manualSuppress": 0, "generatedByTerm": "Carcinoma of ovary"}]}
""") # NOQA
phenotype2 = """
{"status": "Unknown status", "startDate": "August 2009", "interventions": [{"intervention_name": "endostar and CHOP", "arm_group_label": "treatment", "description": "endostar, 7.5m g/m2, intravenous, Day 2-15, repeat every 3 weeks\r\nCHOP: Cyclophosphamide, Hydroxydaunorubicin, Oncovin, Prednisone", "other_name": "endostar", "intervention_type": "Drug"}], "molecularAlterations": [], "_score": 18, "title": "Phase 2 Study of Endostar Combined With CHOP Regimen as the First Line Chemotherapy for Untreated Peripheral T Cell Lymphoma", "locations": [{"status": "Recruiting", "city": "Shanghai", "last_name": "Haiyi Guo, MD", "_valid": true, "zip": "200032", "country": "China", "id": "fudanuniversitycancerhospital_shanghai_shanghai_cn_200032", "phone": "86(021)64175590", "state": "Shanghai", "location": {"type": "Point", "coordinates": [121.474, 31.23]}, "phone_ext": "5008", "geo": {"lat": 31.23, "lon": 121.474}, "email": "guohaiyi@csco.org.cn", "name": "Fudan University Cancer Hospital"}], "briefTitle": "Endostar Combined With CHOP Regimen as First Line Chemotherapy for Peripheral T Cell Lymphoma", "overallContact": {"phone": "86(021)64175590", "last_name": "Haiyi Guo, MD", "email": "guohaiyi@csco.org.cn", "phone_ext": "5008"}, "link": "http://clinicaltrials.gov/ct2/show/NCT00974324", "studyType": "Interventional", "phase": "Phase 2", "id": "NCT00974324", "tags": [{"priority": 2, "compositeKey": "ALKGENEinclude", "suppress": false, "filterType": "include", "term": "ALK", "custom": false, "facet": "GENE", "alias": "ALK"}, {"priority": 0, "compositeKey": "Lymphoreticular tumourCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Lymphoreticular tumour", "custom": false, "facet": "CONDITION", "alias": "Lymphoreticular tumour", "generatedByTerm": "Malignant lymphoma"}, {"priority": 0, "compositeKey": "Hematologic malignancyCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Hematologic malignancy", "custom": false, "facet": "CONDITION", "alias": "Hematologic malignancy", "generatedByTerm": "Malignant lymphoma"}, {"priority": 0, "compositeKey": "Malignant neoplastic diseaseCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Malignant neoplastic disease", "custom": false, "facet": "CONDITION", "alias": "Cancer", "generatedByTerm": "Malignant lymphoma"}, {"priority": 0, "compositeKey": "Lymphoproliferative disorderCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Lymphoproliferative disorder", "custom": false, "facet": "CONDITION", "alias": "Lymphoproliferative disorder", "generatedByTerm": "Malignant lymphoma"}, {"priority": 0, "compositeKey": "NeoplasiaCONDITIONinclude", "suppress": false, "generatedBy": "CONDITION", "filterType": "include", "term": "Neoplasia", "custom": false, "facet": "CONDITION", "alias": "Neoplasia", "generatedByTerm": "Malignant lymphoma"}, {"priority": 1, "compositeKey": "Normal breast feedingFINDINGexclude", "suppress": false, "filterType": "exclude", "term": "Normal breast feeding", "custom": false, "facet": "FINDING", "alias": "Breast feeding"}, {"priority": 1, "compositeKey": "Pregnancy confirmedFINDINGexclude", "suppress": false, "filterType": "exclude", "term": "Pregnancy confirmed", "custom": false, "facet": "FINDING", "alias": "Pregnant"}, {"priority": 1, "compositeKey": "VEGFGENEinclude", "suppress": false, "filterType": "include", "term": "VEGF", "custom": false, "facet": "GENE", "alias": "Vascular endothelial growth factor"}, {"priority": 1, "compositeKey": "Congestive cardiac failureCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Congestive cardiac failure", "custom": false, "facet": "CONDITION", "alias": "Congestive cardiac failure"}, {"priority": 1, "compositeKey": "MI - Myocardial infarctionCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "MI - Myocardial infarction", "custom": false, "facet": "CONDITION", "alias": "Heart attack"}, {"priority": 1, "compositeKey": "HF - Heart failureCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "HF - Heart failure", "custom": false, "facet": "CONDITION", "alias": "Heart failure"}, {"priority": 1, "compositeKey": "Phase 2PHASEinclude", "suppress": false, "filterType": "include", "term": "Phase 2", "custom": false, "facet": "PHASE", "alias": "Phase 2"}, {"priority": 1, "compositeKey": "EndostarDRUGinclude", "suppress": false, "filterType": "include", "term": "Endostar", "custom": false, "facet": "DRUG", "alias": "Endostar"}, {"priority": 1, "compositeKey": "Vincristine sulfateDRUGinclude", "suppress": false, "filterType": "include", "term": "Vincristine sulfate", "custom": false, "facet": "DRUG", "alias": "Vincristine sulfate"}, {"priority": 0, "compositeKey": "Vinca AlkaloidDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Vinca Alkaloid", "custom": false, "facet": "DRUGCLASS", "alias": "Vinca Alkaloid", "generatedByTerm": "Vincristine sulfate"}, {"priority": 0, "compositeKey": "ChemotherapyDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Chemotherapy", "custom": false, "facet": "DRUGCLASS", "alias": "Chemotherapy", "generatedByTerm": "Cyclophosphamide"}, {"priority": 1, "compositeKey": "CyclophosphamideDRUGinclude", "suppress": false, "filterType": "include", "term": "Cyclophosphamide", "custom": false, "facet": "DRUG", "alias": "Cyclophosphamide"}, {"priority": 0, "compositeKey": "Alkylating DrugDRUGCLASSinclude", "suppress": false, "generatedBy": "DRUG", "filterType": "include", "term": "Alkylating Drug", "custom": false, "facet": "DRUGCLASS", "alias": "Alkylating Drug", "generatedByTerm": "Cyclophosphamide"}, {"priority": 1, "compositeKey": "Squamous cell carcinoma of skinCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Squamous cell carcinoma of skin", "custom": false, "facet": "CONDITION", "alias": "Squamous cell carcinoma of skin"}, {"priority": 1, "compositeKey": "Malignant neoplasm of skinCONDITIONexclude", "suppress": false, "filterType": "exclude", "term": "Malignant neoplasm of skin", "custom": false, "facet": "CONDITION", "alias": "Malignant neoplasm of skin"}, {"priority": 1, "compositeKey": "CombinationTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Combination", "custom": false, "facet": "TRIALTYPE", "alias": "Combination"}, {"priority": 1, "compositeKey": "InterventionalTRIALTYPEinclude", "suppress": false, "filterType": "include", "term": "Interventional", "custom": false, "facet": "TRIALTYPE", "alias": "Interventional"}, {"priority": 1, "compositeKey": "RadiationDRUGCLASSexclude", "suppress": false, "filterType": "exclude", "term": "Radiation", "custom": false, "facet": "DRUGCLASS", "alias": "Radiation"}, {"priority": 1, "compositeKey": "VEGFDGENEinclude", "suppress": false, "generatedBy": "GENE", "filterType": "include", "term": "VEGFD", "custom": false, "facet": "GENE", "alias": "VEGFD", "generatedByTerm": "VEGF"}, {"priority": 1, "compositeKey": "PIGFGENEinclude", "suppress": false, "generatedBy": "GENE", "filterType": "include", "term": "PIGF", "custom": false, "facet": "GENE", "alias": "PIGF", "generatedByTerm": "VEGF"}, {"priority": 1, "compositeKey": "VEGFAGENEinclude", "suppress": false, "generatedBy": "GENE", "filterType": "include", "term": "VEGFA", "custom": false, "facet": "GENE", "alias": "VEGFA", "generatedByTerm": "VEGF"}, {"priority": 1, "compositeKey": "VEGFBGENEinclude", "suppress": false, "generatedBy": "GENE", "filterType": "include", "term": "VEGFB", "custom": false, "facet": "GENE", "alias": "VEGFB", "generatedByTerm": "VEGF"}, {"priority": 1, "compositeKey": "VEGFCGENEinclude", "suppress": false, "generatedBy": "GENE", "filterType": "include", "term": "VEGFC", "custom": false, "facet": "GENE", "alias": "VEGFC", "generatedByTerm": "VEGF"}]}
""" # NOQA
PHENOTYPE2 = json.loads(phenotype2, strict=False) # control character
def convert(dictionary):
return namedtuple('GenericDict', dictionary.keys())(**dictionary)
def test_convert_association():
# convert returns a generator
evidences = list(molecularmatch_trials.convert(EVIDENCE))
evidence = convert(evidences[0])
association = convert(evidence.association)
assert association.evidence_label == 'C'
def test_convert_association2():
# convert returns a generator
association = {}
evidence = {'phase': 'Phase 3'}
association = el.evidence_label(evidence['phase'],
association, na=False)
assert association['evidence_label'] == 'B'
def test_convert_phenotype():
# convert returns a generator
evidences = list(molecularmatch_trials.convert(PHENOTYPE))
evidence = convert(evidences[0])
association = convert(evidence.association)
assert association.phenotype['description'], \
'association.phenotype.description empty'
assert association.phenotype['description'] == 'Tumour of fallopian tube'
def test_convert_phenotype2():
# convert returns a generator
evidences = list(molecularmatch_trials.convert(PHENOTYPE2))
evidence = convert(evidences[0])
association = convert(evidence.association)
assert association.phenotype['description'], \
'association.phenotype.description empty'
assert association.phenotype['description'] == 'Malignant lymphoma'
| [
"brian@bwalsh.com"
] | brian@bwalsh.com |
868d9325e4d3169cb0219450aed0e49a34dc2b90 | 851f7fde684774ca0388a28cb7035aa1e95f5de0 | /Ercess/settings.py | 57b1e7e01b4a74cf330b883c7aad6860f19d7c65 | [] | no_license | aditya2222/django-tickets | 01451a724cf97c8f2f338ba85a704e85ae57b008 | 3c2ecd252479fc5821873823cdbbb4641268a2d2 | refs/heads/master | 2022-12-16T17:17:07.821446 | 2019-05-12T02:58:05 | 2019-05-12T02:58:05 | 186,204,071 | 0 | 0 | null | 2022-11-22T03:22:25 | 2019-05-12T02:55:47 | JavaScript | UTF-8 | Python | false | false | 6,147 | py | """
Django settings for Ercess project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR1 = os.path.join(BASE_DIR , 'templates/dashboard')
TEMPLATES_DIR2 = os.path.join(BASE_DIR , 'templates/Ercesscorp')
TEMPLATES_DIR3 = os.path.join(BASE_DIR , 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'templates/static')
MEDIA_DIR = os.path.join(BASE_DIR ,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$%kn8r7jbv#*&)=vyq$q9dg=*kwj!zhuvcu#re@v$%y*g6elc$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['178.128.11.7', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'ckeditor',
'rest_framework.authtoken',
'dashboard',
'Ercesscorp.apps.ErcesscorpConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'Ercess.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR1,TEMPLATES_DIR2,TEMPLATES_DIR3,] ,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ercess.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
#
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'testSchema',
'USER': 'root' ,
'PASSWORD': 'tiktik123',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#SESSION_COOKIE_SECURE = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
#CSRF_COOKIE_SECURE = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR,]
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_SSL = True
EMAIL_PORT = 465
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
INTERNAL_IPS = ('127.0.0.1',)
MEDIA_ROOT = 'media/'
MEDIA_URL = '/media/'
'''
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
) ,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
'''
#cors-headers
#CORS_ORIGIN_ALLOW_ALL = True
#CORS_ALLOW_CREDENTIALS = True
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono-lisa',
#'skin': 'office2013',
'toolbar':'custom',
'width':700,
'height': 300,
'toolbar_custom': [
['Preview'],
['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo'],
['TextColor', 'BGColor'],
['Link', 'Unlink', 'Anchor'],
['Styles', 'Format', 'Font', 'FontSize'],
['Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe'],
['Find', 'Replace', '-', 'SelectAll'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language'],
],
}
}
| [
"adityasingh222247@gmail.com"
] | adityasingh222247@gmail.com |
0e2200d7cc05f631a298302b7d0012d4fc312b33 | 7e9bfbcd31faa30cdad64dff947e80cfe3f3a526 | /Backend/apps/users/adminx.py | 3aa920ca8f60c5eaeb3da689a605c911d57a03f2 | [
"Apache-2.0"
] | permissive | skyroot/Dailyfresh-B2C | 6c02578b5b39b8746c4bf9ebb2288775ffeabf33 | 7c94e9a4428e5116c91bf27cf696e6eee430748a | refs/heads/master | 2023-01-04T09:47:26.834763 | 2019-02-25T02:38:37 | 2019-02-25T02:38:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # encoding: utf-8
import xadmin
from xadmin import views
from .models import VerifyCode
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
site_title = "天天生鲜"
site_footer = "github.com/BeanWei"
# menu_style = "accordion"
class VerifyCodeAdmin(object):
list_display = ['code', 'mobile', "add_time"]
xadmin.site.register(VerifyCode, VerifyCodeAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings) | [
"157318439@qq.com"
] | 157318439@qq.com |
0b2a2046c0731579d937a1254246088e4dca3a45 | 94d70c1c19cf115aa415a04cd85ff687a17f5eca | /exp_classif.py | 4f8a6d76d95e2f51fec470bb4570b20bb22a9af1 | [
"MIT"
] | permissive | RandallBalestriero/PMASO | 50ca98b2ea918f38fed4cc442562a6322c80409f | 780b06f8d8496000f3ecda04a49c8eda72393b5d | refs/heads/master | 2021-04-15T12:20:19.250244 | 2019-01-28T21:16:33 | 2019-01-28T21:16:33 | 126,507,496 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | from pylab import *
import tensorflow as tf
from sklearn.datasets import make_moons
from sklearn.datasets import load_digits
import tensorflow as tf
from layers import *
from utils import *
import cPickle
DATASET = 'MNIST'
neuronsss=10
sigmass=sys.argv[-2]
nonlinearity = int(sys.argv[-1])
print nonlinearity
x_train,y_train,x_test,y_test = load_data(DATASET)
x_train = transpose(x_train,[0,2,3,1])
x_test = transpose(x_test,[0,2,3,1])
pp = permutation(x_train.shape[0])
x_train = x_train[pp]+randn(len(pp),28,28,1)*0.05
y_train = y_train[pp]
XX = x_train[:2000]
YY = y_train[:2000]
input_shape = XX.shape
layers1 = [InputLayer(input_shape)]
layers1.append(DenseLayer(layers1[-1],K=32*nonlinearity,R=2,nonlinearity=None,sparsity_prior=0.,sigma=sigmass,learn_pi=1,p_drop=0.,bn=BN(0,0),U=0))
layers1.append(DenseLayer(layers1[-1],K=16*nonlinearity,R=2,nonlinearity=None,sparsity_prior=0.,sigma=sigmass,learn_pi=1,p_drop=0.,bn=BN(0,0),U=0))
layers1.append(FinalLayer(layers1[-1],R=neuronsss,sparsity_prior=0.00,sigma=sigmass,bn=BN(0,0)))
model1 = model(layers1)
model1.init_dataset(XX,YY)
LOSSE=train_layer_model(model1,rcoeff_schedule=schedule(0.000000000001,'linear'),CPT=132,random=0,fineloss=0)
for i in xrange(1,10):
model1.init_dataset(x_train[2000*i:2000*(i+1)])
model1.init_thetaq()
model1.E_step(10)
y_hat = argmax(model1.predict(),1)
print mean((y_hat==y_train[2000*i:2000*(i+1)]).astype('float32'))
model1.E_step(1)
y_hat = argmax(model1.predict(),1)
print mean((y_hat==y_train[2000*i:2000*(i+1)]).astype('float32'))
model1.E_step(0.001)
y_hat = argmax(model1.predict(),1)
print mean((y_hat==y_train[2000*i:2000*(i+1)]).astype('float32'))
| [
"randallbalestriero@gmail.com"
] | randallbalestriero@gmail.com |
62b067d9e926d110db5569bf37336152e61ec68f | 9867cb1c684aa1087f6320ad28b4e718b2879a70 | /examples/basics/location.py | 24066f16dd61b7b576f3e0b01007bc856f289836 | [
"MIT"
] | permissive | mcauser/Python_MQTT | 0c90a2264e523129ea1db15904dd2b66c85aaa28 | c5795d64f12c576433a3d59b528351f02aa84036 | refs/heads/master | 2023-01-22T03:14:49.070321 | 2018-07-15T19:58:00 | 2018-07-15T19:58:00 | 319,333,029 | 0 | 0 | NOASSERTION | 2020-12-07T13:45:36 | 2020-12-07T13:45:36 | null | UTF-8 | Python | false | false | 1,930 | py | """
'location.py'
====================================
Example of sending GPS data points
to an Adafruit IO Feed using the API
Author(s): Brent Rubell, Todd Treece
"""
# Import standard python modules
import time
# Import Adafruit IO REST client.
from Adafruit_IO import Client, Feed, RequestError
# Set to your Adafruit IO key.
# Remember, your key is a secret,
# so make sure not to publish it when you publish this code!
ADAFRUIT_IO_KEY = 'YOUR_AIO_KEY'
# Set to your Adafruit IO username.
# (go to https://accounts.adafruit.com to find your username)
ADAFRUIT_IO_USERNAME = 'YOUR_AIO_USERNAME'
# Create an instance of the REST client.
aio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Assign a location feed, if one exists already
try:
location = aio.feeds('location')
except RequestError: # Doesn't exist, create a new feed
feed = Feed(name="location")
location = aio.create_feed(feed)
# limit feed updates to every 3 seconds, avoid IO throttle
loop_delay = 5
# We dont' have a GPS hooked up, but let's fake it for the example/test:
# (replace this data with values from a GPS hardware module)
value = 0
lat = 40.726190
lon = -74.005334
ele = 6 # elevation above sea level (meters)
while True:
print('\nSending Values to location feed...\n')
print('\tValue: ', value)
print('\tLat: ', lat)
print('\tLon: ', lon)
print('\tEle: ', ele)
# Send location data to Adafruit IO
aio.send_location_data(location.key, value, lat, lon, ele)
# shift all values (for test/demo purposes)
value += 1
lat -= 0.01
lon += -0.02
ele += 1
# Read the location data back from IO
print('\nData Received by Adafruit IO Feed:\n')
data = aio.receive(location.key)
print('\tValue: {0}\n\tLat: {1}\n\tLon: {2}\n\tEle: {3}'
.format(data.value, data.lat, data.lon, data.ele))
# wait loop_delay seconds to avoid api throttle
time.sleep(loop_delay)
| [
"robots199@me.com"
] | robots199@me.com |
43167f9749dfd64a1b65c4c7780c913464dc1586 | f211cc8b7129523fb466c67af7e5d06c60d5eb1c | /python/algorithms/warmup/a very big sum.py | 6dd91cf25d9da0e1a0dca886602330bb1c60c2f8 | [] | no_license | JayWelborn/HackerRank-problems | ebef5fe4c78eba12fa21ff6ff99be7d11b4345b4 | f9386ca970055a52824304aa55a7fd6e392efe26 | refs/heads/master | 2021-06-16T20:06:13.118415 | 2018-10-03T12:51:04 | 2018-10-03T12:51:04 | 96,048,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
print(sum(ar))
| [
"jesse.welborn@gmail.com"
] | jesse.welborn@gmail.com |
afca54c190d92048b7f0dc6a287b5d5f3d152993 | c47d0e1a9e256a7cade218ccf4173c82183f67b4 | /codedining/urls.py | 8b35f409571998b5bec9e4657e0754f8a5e2def0 | [] | no_license | nagyistge/FoodDuk | 9e85c3e077a24ad8daa3e2f587bf40913d4eebcc | 2c89b2e99e3ec1d8833f422a4a9f1c372a350b5e | refs/heads/master | 2021-01-20T11:30:03.271597 | 2014-09-30T14:44:50 | 2014-09-30T14:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'codedining.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^', include('core.urls', namespace='core')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"carpedm20@gmail.com"
] | carpedm20@gmail.com |
d4dd0cc6c611da8ad78ddfa152f67b1c6c957144 | 7434ef0a0840da62c449b73a3810d11bcf300644 | /fms/migrations/0005_auto_20171019_1201.py | af961ed9df24aca650ddd579956428bce93c07ec | [] | no_license | webmalc/maxibooking-billing-django | 40f497dc794e0b29868a01b482b5865764b35fd3 | d5ca86c3701b86e359b0648a5b76b0b71faa7810 | refs/heads/master | 2022-12-17T21:50:42.210125 | 2019-08-14T09:33:02 | 2019-08-14T09:33:02 | 202,347,569 | 2 | 0 | null | 2022-12-09T22:25:58 | 2019-08-14T12:36:22 | Python | UTF-8 | Python | false | false | 779 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-19 12:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fms', '0004_auto_20171019_1145'),
]
operations = [
migrations.AlterField(
model_name='fms',
name='end_date',
field=models.CharField(db_index=True, default=1, max_length=100, verbose_name='end date'),
preserve_default=False,
),
migrations.AlterField(
model_name='kpp',
name='end_date',
field=models.CharField(db_index=True, default='test', max_length=100, verbose_name='end date'),
preserve_default=False,
),
]
| [
"m@webmalc.pw"
] | m@webmalc.pw |
27b07d65edf49577d7ee69214610b12ac1107ae3 | dd1fa9020beb9b0205a5d05e0026ccae1556d14b | /gongmo/smote.py | b9cce4e206cdacd3f801b0f1984e2c4bf7a26b53 | [] | no_license | kimjieun6307/itwill | 5a10250b6c13e6be41290e37320b15681af9ad9a | 71e427bccd82af9f19a2a032f3a08ff3e1f5911d | refs/heads/master | 2022-11-13T11:55:12.502959 | 2020-07-15T08:14:21 | 2020-07-15T08:14:21 | 267,373,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,817 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 28 18:01:47 2020
@author: user
"""
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 11:23:29 2020
@author: user
"""
#################
## plant_a_df.csv
#################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression # 로지스틱 회귀분석
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from xgboost import plot_importance # 중요변수 시각화
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score , classification_report
# 히트맵 : 분류정확도 결과를 시각화
import seaborn as sn # heatmap - Accuracy Score
import matplotlib.pyplot as plt
# tree 시각화 관련
from sklearn.tree.export import export_text # ==> print(export_text(model))
from sklearn import tree # ==> tree.plot_tree(model)
plant_a_df = pd.read_csv('plant_a_df.csv')
plant_a_df=plant_a_df.iloc[:, 1:8]
plant_a_df.info()
'''
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 18815 entries, 0 to 18814
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 plant1_train_mea_ddhr 18815 non-null object
1 plant1_train_tem_in_loc1 18815 non-null float64
2 plant1_train_hum_in_loc1 18815 non-null float64
3 plant1_train_tem_coil_loc1 18815 non-null float64
4 plant1_train_tem_out_loc1 18815 non-null float64
5 plant1_train_hum_out_loc1 18815 non-null float64
6 24hour_cond_loc1
'''
plant_a_df.columns = ['ddhr', 'tem_in', 'hum_in', 'tem_coil', 'tem_out', 'hum_out', '24hour_cond']
col = plant_a_df.columns
x_col = col[1:6]
y_col = col[-1]
X=plant_a_df[x_col]
y=plant_a_df[y_col]
X.shape # (18815, 5)
y.value_counts()
'''
0.0 18700
1.0 115
'''
###############################
###불균형 데이터 처리 SMOTE 함수
#############################
# pip install -U imbalanced-learn # Anaconda Promt에서 설치
from imblearn.over_sampling import SMOTE
## auto##
sm = SMOTE(k_neighbors=5, random_state=71)
X_data, y_data = sm.fit_sample(X, y)
X_data.shape # (37400, 5)
y_data.shape # (37400,)
18815-37400 # -18585
y_data.value_counts()
'''
1.0 18700
0.0 18700
'''
x_train, x_test, y_train, y_test = train_test_split(X_data, y_data, test_size =0.3)
##########
# XGB
##########
xgb = XGBClassifier(random_state=123)
model_xgb = xgb.fit(x_train, y_train)
y_pred = model_xgb.predict(x_test)
acc = accuracy_score(y_test, y_pred)
acc # 0.9884135472370766
report = classification_report(y_test, y_pred)
print(report)
'''
precision recall f1-score support
0.0 1.00 0.98 0.99 5558
1.0 0.98 1.00 0.99 5662
accuracy 0.99 11220
macro avg 0.99 0.99 0.99 11220
weighted avg 0.99 0.99 0.99 11220
'''
pd.crosstab(y_pred, y_test)
'''
24hour_cond 0.0 1.0
row_0
0.0 5443 15
1.0 115 5647
'''
y_test.value_counts() #11,220
'''
1.0 5662
0.0 5558
'''
plant_b_df = pd.read_csv('plant_b_df.csv')
plant_b_df=plant_b_df.iloc[:, 1:8]
plant_b_df.info()
'''
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 19406 entries, 0 to 19405
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 plant1_train_mea_ddhr 19406 non-null object
1 plant1_train_tem_in_loc2 19406 non-null float64
2 plant1_train_hum_in_loc2 19406 non-null float64
3 plant1_train_tem_coil_loc2 19406 non-null float64
4 plant1_train_tem_out_loc1 19406 non-null float64
5 plant1_train_hum_out_loc1 19406 non-null float64
6 24hour_cond_loc2 19406 non-null float64
'''
plant_b_df.columns = ['ddhr', 'tem_in', 'hum_in', 'tem_coil', 'tem_out', 'hum_out', '24hour_cond']
col
'''
Index(['ddhr', 'tem_in', 'hum_in', 'tem_coil', 'tem_out', 'hum_out',
'24hour_cond'],
'''
X_b = plant_b_df[x_col]
y_b = plant_b_df[y_col]
X_b.shape # (19406, 5)
y_b.value_counts()
'''
0.0 19199
1.0 207
'''
y_b_pred = model_xgb.predict(X_b)
acc = accuracy_score(y_b, y_b_pred)
acc # 0.9685148923013501
report=classification_report(y_b, y_b_pred)
print(report)
#########
# svm
##########
params = [0.001, 0.01, 0.1, 1, 10, 100]
kernel = ['linear', 'rbf']
best_score = 0
best_params={}
for k in kernel:
for g in params :
for c in params:
svc = SVC(kernel=k, gamma=g, C=c)
model = svc.fit(x_train, y_train)
score = model.score(x_test, y_test)
if score > best_score :
best_score = score
best_params = {'kernel': k, 'gamma' : g, 'C' : c}
print('best score : ', best_score)
print('best parameter : ', best_params)
svc = SVC( C =10, gamma=0.01, kernel ='rbf')
model = svc.fit(x_train, y_train)
y_pred = model.predict(x_test)
y_true = y_test
acc = accuracy_score(y_true, y_pred)
acc #0.9834224598930481
report = classification_report(y_test, y_pred)
print(report)
##################
### RandomForest
##################
rf = RandomForestClassifier()
model = rf.fit(X=x_train, y=y_train)
y_pred = model.predict(x_test)
y_true = y_test
acc = accuracy_score(y_true, y_pred)
acc # 0.9939393939393939
report = classification_report(y_true, y_pred)
print(report)
'''
precision recall f1-score support
0.0 1.00 0.99 0.99 5597
1.0 0.99 1.00 0.99 5623
accuracy 0.99 11220
macro avg 0.99 0.99 0.99 11220
weighted avg 0.99 0.99 0.99 11220
'''
model.feature_importances_
# array([0.20234667, 0.13226774, 0.35807944, 0.20498742, 0.10231872])
import matplotlib.pyplot as plt
plt.barh(x_col, model.feature_importances_ )
'''
plant1_train_tem_coil_loc1 >> 0.35807944
plant1_train_tem_out_loc1 >> 0.20498742
plant1_train_tem_in_loc1 >> 0.20234667
'''
#######################################################
## 0.5 ##
sm2 = SMOTE(0.5, k_neighbors=5, random_state=71)
X_data2, y_data2 = sm2.fit_sample(X, y)
X_data2.shape # (28050, 5)
y_data2.shape
28050-18815 #9235
y_data2.value_counts()
'''
0.0 18700
1.0 9350
'''
| [
"kofj2000@gmail.com"
] | kofj2000@gmail.com |
e4a7ea7735a2385fa442e00ebb8e4cf93689e97b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_212/ch50_2020_04_12_18_47_48_906877.py | ced60abc8d98108d8be1a72830cb9967001ff5bf | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def junta_nome_sobrenome (a,b):
nomes=[]
i=0
while i< len(a):
junto=[a[i], b[i]]
nomes.append(junto)
i += 1
return nomes
| [
"you@example.com"
] | you@example.com |
f8551e4ac31de6d0d2cc5bd77cce8f2222ffd7f3 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_03_01_preview/aio/_configuration.py | 3d2b5f478c605e719aca28c70de7f710418b88c2 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 3,692 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AppPlatformManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-03-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(AppPlatformManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-03-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-appplatform/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"noreply@github.com"
] | VSChina.noreply@github.com |
6ba35c246673106ae94ae50591a6cd0547b333af | 0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded | /Sungjin/Math/n17626/17626.py | ce114cdc116413798b61bd317e01f0b80f045652 | [] | no_license | comojin1994/Algorithm_Study | 0379d513abf30e3f55d6a013e90329bfdfa5adcc | 965c97a9b858565c68ac029f852a1c2218369e0b | refs/heads/master | 2021-08-08T14:55:15.220412 | 2021-07-06T11:54:33 | 2021-07-06T11:54:33 | 206,978,984 | 0 | 1 | null | 2020-05-14T14:06:46 | 2019-09-07T14:23:31 | Python | UTF-8 | Python | false | false | 590 | py | import sys
input = sys.stdin.readline
if __name__ == '__main__':
n = int(input())
min_ = 4
for i in range(int(n**0.5), int((n//4)**0.5), -1):
if i*i ==n: min_ = 1; break
else:
temp = n - i*i
for j in range(int(temp**0.5), int((temp//3)**0.5), -1):
if i*i + j*j == n: min_ = min(min_, 2); continue
else:
temp = n - i*i - j*j
for k in range(int(temp**0.5), int((temp//2)**0.5), -1):
if i*i + j*j + k*k == n: min_ = min(min_, 3)
print(min_) | [
"comojin1994@gmail.com"
] | comojin1994@gmail.com |
a723e981b85752bf424cb2f10500a05148ebd07f | 76e931912629c37beedf7c9b112b53e7de5babd7 | /1-mouth01/day06/exe03.py | 2edcd5342b7f4bc8001dc0ad5d233dcae9b416ca | [
"Apache-2.0"
] | permissive | gary-gggggg/gary | c59ac21d8e065f296ff986d11a0e4cbf186a1bc4 | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | refs/heads/main | 2023-02-23T06:54:34.500683 | 2021-02-01T10:17:02 | 2021-02-01T10:17:02 | 334,905,744 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | """在终端中打印香港的现有人数
在终端中打印上海的新增和现有人数
新疆新增人数增加 1
"""
hkinfo={"region":"hk","new":15,"now have":39,"total":4801,\
"cured":4320,"死亡":88}
shinfo={"region":"hk","new":6,"now have":61,"total":903,\
"cured":835,"死亡":7}
xjinfo={"region":"hk","new":0,"now have":49,"total":902,\
"cured":850,"死亡":3}
print(hkinfo["now have"])
print(shinfo["new"])
print(shinfo["now have"])
xjinfo["new"]=1
print(xjinfo) | [
"673248932@qq.com"
] | 673248932@qq.com |
2fa9bde1a5bb7a7c232b3647aeecd183a2c840f8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_299/ch39_2020_04_19_01_30_42_281299.py | 02bfb76937bd86b9878c58fa8cf5282fe1d1fcc3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | m = 2
termomax = 1
numrespon = 1
termos_n_p_m = {}
while m<1001:
n = m
while n != 1:
termos = 1
if n%2 == 0:
n = n/2
termos += 1
else:
n = 3*n + 1
termos += 1
termos_n_p_m[m] = termos
m += 1
for num,termos in termos_n_p_m.items():
if termos>termomax:
termomax = termos
numrespon = num
print(numrespon)
| [
"you@example.com"
] | you@example.com |
6060e0d04713e19612bcae4d6aefb8ec0cc87fd5 | 0aa0f7c36cab7580a33383ff07a1f1434811ea96 | /gzbj/optimus_2.1/optimus/backend/myBluePrint/ericic_v2/base_dao/cpu_layout_dao.py | b2ee6d955f879b4e41400b5a7e03ef4d36c1ebbc | [] | no_license | yanliangchen/ware_house | a1146b23e76bce39be67e9d65a240270e0f08c10 | 44780726924f9a398fb6197645b04cdcfb0305e7 | refs/heads/main | 2023-06-15T13:52:45.890814 | 2021-07-20T10:09:22 | 2021-07-20T10:09:22 | 387,731,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : cpu_layout_dao.py
# @Author: gaofzhan
# @Email: gaofeng.a.zhang@ericssoin.com
# @Date : 2020/12/21 11:04
# @Desc :
from sqlalchemy import and_
from backend.Model.connection import SESSION
from backend.myBluePrint.ericic_v2.model.nova_service_table import NovaServiceModel
from backend.myBluePrint.ericic_v2.model.nova_table import NovaModel
from backend.myBluePrint.ericic_v2.model.data_center_table import DataCenterModel
class CpuLayoutDao:
@classmethod
def get_dc_host_info(cls, cid, host_id):
db_session = SESSION()
try:
dc_entity = db_session.query(DataCenterModel).filter(DataCenterModel.id == cid).one_or_none()
host_entity = db_session.query(NovaServiceModel).filter(
and_(NovaServiceModel.dc_id == cid, NovaServiceModel.id == host_id)).one_or_none()
finally:
db_session.close()
return dict(dc_entity=dc_entity, host_entity=host_entity)
@classmethod
def get_vm_info(cls, cid, instance_name_list):
res = dict()
db_session = SESSION()
try:
for name in instance_name_list:
_filter_entity = and_(NovaModel.dc_id==cid, NovaModel.instance_name == name)
nova_entity = db_session.query(NovaModel).filter(_filter_entity).one_or_none()
res[name] = nova_entity
finally:
db_session.close()
return res
| [
"yanliang.li@ericsson.com"
] | yanliang.li@ericsson.com |
a2b0931d4d13460355caaeefdee16c122ece5713 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/scaleform/daapi/view/metaretraincrewwindowmeta.py | 6fbc9747b2f0ba83a498c613a60f73314c7e2603 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/RetrainCrewWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class RetrainCrewWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
"""
def submit(self, operationId):
self._printOverrideError('submit')
def changeRetrainType(self, retrainTypeIndex):
self._printOverrideError('changeRetrainType')
def as_setCrewDataS(self, data):
"""
:param data: Represented by RetrainCrewBlockVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setCrewData(data)
def as_setVehicleDataS(self, data):
"""
:param data: Represented by RetrainVehicleBlockVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setVehicleData(data)
def as_setCrewOperationDataS(self, data):
"""
:param data: Represented by RetrainCrewOperationVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setCrewOperationData(data)
def as_setAllCrewDataS(self, data):
"""
:param data: Represented by RetrainCrewBlockVOBase (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setAllCrewData(data) | [
"m4rtijn@gmail.com"
] | m4rtijn@gmail.com |
b0586cf5ec5ec204813d6e8f17ba8d4d356447cc | 314d628f165973451977f31e3860d015af00b4de | /dictionary/BaseSearcher.py | b1384d90228eae3dd638e19df05ad6f53ef08ef5 | [] | no_license | michaelliu03/py-seg | c76a2788ea1dbbf415825bb094cc0776d1873ea4 | 0230e32f0c066ad329a7e972166a61a7a7979569 | refs/heads/master | 2020-06-20T01:45:53.060309 | 2019-08-29T00:14:03 | 2019-08-29T00:14:03 | 196,948,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:Segment.py
# @Author: Michael.liu
# @Date:2019/2/12
# @Desc: NLP Segmentation ToolKit - Hanlp Python Version
from abc import ABCMeta,abstractmethod
class BaseSearcher(object):
def __init__(self):
# 待分词文本的char
self.c = []
# 指向当前处理字串的开始位置(前面的已经分词分完了)
self.offset = int()
def init1(self, c):
self.c = c
return self
def init2(self, text):
return self.init1(text.decode())
@abstractmethod
def next_item(self):
"""
分出下一个词
:return:
"""
pass
def getOffset(self):
"""
获取当前偏移
:return:
"""
return self.offset | [
"liuyu5@liepin.com"
] | liuyu5@liepin.com |
a167fe9589155c1ba39e2ee2c0e267543a62272a | 5d4f50e3b2e4043af8e625d5eb68c318612b0e1e | /79. Word Search.py | 01acb65b957fecb1c952176897c925d49edd02e5 | [] | no_license | Iansdfg/Leetcode2ndTime | 86b77204915286c66a3e2d95036f2577f1c85665 | 483ef12002a5b12bd5df6586a2133ed2bb7ae7e8 | refs/heads/master | 2020-06-04T17:39:57.137246 | 2019-08-01T04:44:52 | 2019-08-01T04:44:52 | 192,127,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for i in range(len(board)):
for j in range(len(board[0])):
if self.findNext(i,j, board, word):return True
return False
def findNext(self, i,j, board, word):
if len(word)==0: return True
if i<0 or j<0 or i>= len(board) or j>= len(board[0]) or board[i][j]!=word[0]:return False
tmp = board[i][j]
board[i][j] = "#"
res = self.findNext(i+1,j,board, word[1:]) or self.findNext(i-1,j,board, word[1:]) or self.findNext(i,j+1,board, word[1:]) or self.findNext(i,j-1,board, word[1:])
board[i][j] = tmp
return res
| [
"noreply@github.com"
] | Iansdfg.noreply@github.com |
9d7f2e04641bfbd7f08ff5845fc6ef11252821f2 | bf25182c7288ed020d323635619d801f6544a196 | /GreatestExampleusingelif.py | a7ef9658c068c1775f0e151f1f063eb417528e5f | [] | no_license | shivaconceptsolution/pythononlineclass | d84c19f9730e29738f788099a72c1b0bdf0f2f30 | 03b6b729140450c0ea5f6849b2882334b1f8546b | refs/heads/master | 2020-05-23T22:21:44.122672 | 2019-06-01T07:38:26 | 2019-06-01T07:38:26 | 186,972,067 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | a= int(input("enter first number")) #5
b= int(input("enter second number")) #7
c= int(input("enter third number")) #19
d = int(input("enter fourth number")) #45
if a>b and a>c and a>d:
print("a")
elif b>c and b>d:
print("b")
elif c>d:
print("c")
else:
print("d")
| [
"noreply@github.com"
] | shivaconceptsolution.noreply@github.com |
e7988e0c5bd222216752a575b4d3569b990cdd64 | 4e926970702605867b6c57635de09944d1edd7ef | /settings.py | dc8d4e7809aa0b6d435393fc69d2fd80244c6c47 | [] | no_license | kod3r/djangopatterns.com | 79bc4eb7892926e225a73639797c676eb5fd3918 | e05e47668ec15aeb2e3781598f86dd27714af5af | refs/heads/master | 2021-01-24T04:29:31.637764 | 2011-08-13T00:46:22 | 2011-08-13T00:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,203 | py | # Django settings for project project.
import calloway
import os
import sys
CALLOWAY_ROOT = os.path.abspath(os.path.dirname(calloway.__file__))
sys.path.insert(0, os.path.join(CALLOWAY_ROOT, 'apps'))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'apps'))
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'lib'))
SITE_ROOT = PROJECT_ROOT
try:
from local_settings import DEBUG as LOCAL_DEBUG
DEBUG = LOCAL_DEBUG
except ImportError:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
from calloway.settings import *
ADMINS = (
('coordt', 'webmaster@djangopatterns.com'),
)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL='webmaster@djangopatterns.com'
SERVER_EMAIL='webmaster@djangopatterns.com'
SECRET_KEY = 'ipviu=n(t&27lxc+-a=nuoiw_1pn0gmik%=%c2nr@upyeu=gv_'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = False
CACHE_MIDDLEWARE_KEY_PREFIX = 'djptrns'
try:
from local_settings import MEDIA_URL_PREFIX
except ImportError:
MEDIA_URL_PREFIX = "/media/"
try:
from local_settings import MEDIA_ROOT_PREFIX
except ImportError:
MEDIA_ROOT_PREFIX = os.path.join(PROJECT_ROOT, 'media')
try:
from local_settings import MEDIA_ROOT
except ImportError:
MEDIA_ROOT = os.path.join(MEDIA_ROOT_PREFIX, 'uploads')
try:
from local_settings import STATIC_ROOT
except ImportError:
STATIC_ROOT = os.path.join(MEDIA_ROOT_PREFIX, 'static')
MEDIA_URL = '%suploads/' % MEDIA_URL_PREFIX
STATIC_URL = "%sstatic/" % MEDIA_URL_PREFIX
MMEDIA_DEFAULT_STORAGE = 'media_storage.MediaStorage'
MMEDIA_IMAGE_UPLOAD_TO = 'image/%Y/%m/%d'
AUTH_PROFILE_MODULE = ''
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
) + CALLOWAY_TEMPLATE_DIRS
CACHE_BACKEND = 'memcached://localhost:11211/'
INSTALLED_APPS = APPS_DJANGO_BASE + \
APPS_MESSAGES + \
APPS_ADMIN + \
APPS_STAFF + \
APPS_REVERSION + \
APPS_STORIES + \
APPS_CALLOWAY_DEFAULT + \
APPS_TINYMCE + \
APPS_CACHING + \
APPS_MPTT + \
APPS_CATEGORIES + \
APPS_MEDIA + \
APPS_REGISTRATION + \
APPS_REVERSION + \
APPS_TINYMCE + (
'viewpoint',
'staticfiles',
'calloway',
'debug_toolbar',
'hiermenu',
'google_analytics',
'robots',
'native_tags',
'positions',
'doc_builder',
'django.contrib.redirects',
'disqus',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
##########################
# Viewpoint settings
##########################
VIEWPOINT_SETTINGS = {
'ENTRY_RELATION_MODELS': [
'massmedia.audio', 'massmedia.image', 'massmedia.document',
'massmedia.video', 'massmedia.collection',
'viewpoint.entry', ],
'DEFAULT_STORAGE': 'media_storage.MediaStorage',
'AUTHOR_MODEL': 'staff.StaffMember',
'USE_CATEGORIES': True,
'USE_TAGGING': False,
'STAFF_ONLY': True,
'USE_APPROVAL': False,
'DEFAULT_BLOG': 'default',
'MONTH_FORMAT': r"%b",
'URL_REGEXES': {
'blog': r'^(?P<blog_slug>[-\w]+)/$',
'year': r'^(?P<blog_slug>[-\w]+)/(?P<year>\d{4})/$',
'month': r'^(?P<blog_slug>[-\w]+)/(?P<year>\d{4})/(?P<month>%b)/$',
'day': r'^(?P<blog_slug>[-\w]+)/(?P<year>\d{4})/(?P<month>%b)/(?P<day>\d{1,2})/$',
'entry': r'^(?P<blog_slug>[-\w]+)/(?P<year>\d{4})/(?P<month>%b)/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
}
}
##########################
# Massmedia settings
##########################
MMEDIA_DEFAULT_STORAGE = 'media_storage.MediaStorage'
MMEDIA_IMAGE_UPLOAD_TO = 'image/%Y/%m/%d'
MASSMEDIA_STORAGE = {
'DEFAULT': 'media_storage.MediaStorage',
'IMAGE': 'media_storage.MediaStorage',
'VIDEO': 'media_storage.MediaStorage',
'AUDIO': 'media_storage.MediaStorage',
'FLASH': 'media_storage.MediaStorage',
'DOC': 'media_storage.MediaStorage',
}
MASSMEDIA_UPLOAD_TO = {
'IMAGE': 'image/%Y/%m/%d'
}
MASSMEDIA_SERVICES = {
'YOUTUBE': {
'EMAIL': 'twtweb@gmail.com',
'USERNAME': 'washingtontimes',
'PASSWORD': 'timesweb10',
}
}
##########################
# Tiny MCE settings
##########################
TINYMCE_JS_URL = '/media/static/js/tiny_mce/tiny_mce.js'
TINYMCE_JS_ROOT = '/media/static/js/tiny_mce'
TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced",
'relative_urls': False,
'plugins': "safari,paste,advimage,advlink,preview,fullscreen,searchreplace",
'theme_advanced_toolbar_location' : "top",
'theme_advanced_toolbar_align' : "left",
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,blockquote,justifyleft,justifycenter,justifyright|,bullist,numlist,|,link,unlink,|,charmap,image,media,pastetext,pasteword,|,code,preview",
'theme_advanced_buttons2' : "",
'theme_advanced_buttons3' : "",
'theme_advanced_statusbar_location' : "bottom",
'width': "600",
'height': "600",
'gecko_spellcheck': True,
'valid_elements' : "@[id|class|title|dir<ltr?rtl|lang|xml::lang|onclick|"
"ondblclick|onmousedown|onmouseup|onmouseover|onmousemove|onmouseout|"
"onkeypress|onkeydown|onkeyup],"
"a[rel|rev|charset|hreflang|tabindex|accesskey|type|name|href|target|"
"onfocus|onblur],"
"strong/b,em/i,strike,u,#p,-ol[type|compact],-ul[type|compact],-li,br,"
"img[longdesc|usemap|src|border|alt=|title|hspace|vspace|width|height|align],"
"-sub,-sup,-blockquote,"
"-table[border=0|cellspacing|cellpadding|width|frame|rules|height|"
"align|summary|bgcolor|background|bordercolor],"
"-tr[rowspan|width|height|align|valign|bgcolor|background|bordercolor],"
"tbody,thead,tfoot,#td[colspan|rowspan|width|height|align|valign|"
"bgcolor|background|bordercolor|scope],"
"#th[colspan|rowspan|width|height|align|valign|scope],"
"caption,-div,-span,-code,-pre,address,-h1,-h2,-h3,-h4,-h5,-h6,"
"hr[size|noshade]|size|color],dd,dl,dt,cite,abbr,acronym,"
"del[datetime|cite],ins[datetime|cite],"
"object[classid|width|height|codebase|*],"
"param[name|value|_value],embed[type|width|height|src|*],"
"script[src|type],map[name],area[shape|coords|href|alt|target],"
"bdo,button,col[align|char|charoff|span|valign|width],"
"colgroup[align|char|charoff|span|valign|width],dfn,fieldset,"
"form[action|accept|accept-charset|enctype|method],"
"input[accept|alt|checked|disabled|maxlength|name|readonly|size|src|type|value],"
"kbd,label[for],legend,noscript,optgroup[label|disabled],"
"option[disabled|label|selected|value],q[cite],samp,"
"select[disabled|multiple|name|size],small,"
"textarea[cols|rows|disabled|name|readonly],tt,var,big,"
"iframe[align<bottom?left?middle?right?top|frameborder|height"
"|longdesc|marginheight|marginwidth|name|scrolling<auto?no?yes|src|style"
"|width]",
}
NATIVE_TAGS = (
'viewpoint.template',
'native_tags.contrib.comparison',
'native_tags.contrib.generic_content',
)
ADMIN_TOOLS_THEMING_CSS = 'calloway/admin/css/theming.css'
ADMIN_TOOLS_MENU = 'menu.CustomMenu'
TINYMCE_JS_URL = '%scalloway/js/tiny_mce/tiny_mce.js' % STATIC_URL
TINYMCE_JS_ROOT = os.path.join(STATIC_ROOT, 'js/tiny_mce')
DOC_GIT = "git://github.com/coordt/djangopatterns.git"
DOC_SOURCE = os.path.join(PROJECT_ROOT, 'doc_src')
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'staticfiles.finders.LegacyAppDirectoriesFinder',
)
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
GOOGLE_ANALYTICS_TRACK_PAGE_LOAD_TIME = True
VIEWPOINT_SETTINGS = {
'DEFAULT_BLOG': 'default'
}
TINYMCE_JS_ROOT = os.path.join(STATIC_ROOT, 'js', 'tiny_mce')
TINYMCE_JS_URL = "%sjs/tiny_mce/tiny_mce.js" % STATIC_URL
try:
from local_settings import *
except ImportError:
pass
| [
"coordt@washingtontimes.com"
] | coordt@washingtontimes.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.