blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ee34a36c6cbeac87b5646c5dbbb11eab6be70e7 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/xia2/Wrappers/XDS/XDSIdxrefHelpers.py | 1ece3814e6d03fb1f8d8c1c98da05f3ab386cc0f | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 4,598 | py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from xia2.Experts.LatticeExpert import ApplyLattice
def _parse_idxref_lp_distance_etc(lp_file_lines):
"""Parse the LP file for refined distance, beam centre and so on..."""
beam = None
diatance = None
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if "DETECTOR COORDINATES" in line and "DIRECT BEAM" in line:
beam = tuple(map(float, line.split()[-2:]))
if "CRYSTAL TO DETECTOR" in line:
distance = float(line.split()[-1])
if distance < 0:
distance *= -1
return beam, distance
def _parse_idxref_index_origin(lp_file_lines):
"""Parse the LP file for the possible index origin etc."""
origins = {}
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if "INDEX_" in line and "QUALITY" in line and "DELTA" in line:
while not "SELECTED" in line:
line = lp_file_lines[i]
i += 1
try:
hkl = tuple(map(int, line.split()[:3]))
quality, delta, xd, yd = tuple(map(float, line.split()[3:7]))
origins[hkl] = quality, delta, xd, yd
except Exception:
pass
return origins
raise RuntimeError("should never reach this point")
def _parse_idxref_lp(lp_file_lines):
"""Parse the list of lines from idxref.lp."""
lattice_character_info = {}
i = 0
mosaic = 0.0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
# get the mosaic information
if "CRYSTAL MOSAICITY" in line:
mosaic = float(line.split()[-1])
# get the lattice character information - coding around the
# non-standard possibility of mI, by simply ignoring it!
# bug # 2355
if "CHARACTER LATTICE OF FIT a b c" in line:
# example line (note potential lack of white space between b and c cell parameters):
# 9 hR 999.0 3966.3 5324.610528.6 85.6 64.6 132.0
j = i + 1
while lp_file_lines[j].strip() != "":
l = lp_file_lines[j].replace("*", " ")
character = int(l[:12].strip())
lattice = l[12:23].strip()
fit = float(l[23:32].strip())
cell = tuple(
float(c)
for c in (
l[32:39],
l[39:46],
l[46:53],
l[53:59],
l[59:65],
l[65:71],
)
)
# FIXME need to do something properly about this...
# bug # 2355
if lattice == "mI":
j += 1
continue
# reindex_card = tuple(map(int, record[9:]))
reindex_card = () # XXX need example where this is present in the IDXREF.LP
constrained_cell = ApplyLattice(lattice, cell)[0]
lattice_character_info[character] = {
"lattice": lattice,
"fit": fit,
"cell": constrained_cell,
"mosaic": mosaic,
"reidx": reindex_card,
}
j += 1
return lattice_character_info
def _parse_idxref_lp_subtree(lp_file_lines):
subtrees = {}
i = 0
while i < len(lp_file_lines):
line = lp_file_lines[i]
i += 1
if line.split() == ["SUBTREE", "POPULATION"]:
j = i + 1
line = lp_file_lines[j]
while line.strip():
subtree, population = tuple(map(int, line.split()))
subtrees[subtree] = population
j += 1
line = lp_file_lines[j]
return subtrees
def _parse_idxref_lp_quality(lp_file_lines):
fraction = None
rmsd = None
rmsphi = None
for record in lp_file_lines:
if "OUT OF" in record and "SPOTS INDEXED" in record:
fraction = float(record.split()[0]) / float(record.split()[3])
if "STANDARD DEVIATION OF SPOT POSITION" in record:
rmsd = float(record.split()[-1])
if "STANDARD DEVIATION OF SPINDLE POSITION" in record:
rmsphi = float(record.split()[-1])
return fraction, rmsd, rmsphi
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
154c06a1de6e9daa5b49b9d632fe6d9e1f3aca12 | f549367629d0a7cb04a7b39e5e1231a0cb9facd1 | /meter_mount/cnc/drill.py | a8ccf00b40ca4744656a641e5fdee5ffc29b43f0 | [] | no_license | iorodeo/lasercutter | 9d0a64e549a688eb7efa93d765dab5ed1b753110 | f99dddd183bdd200b2367ef11b4b72fefe82bbbe | refs/heads/master | 2022-11-05T06:59:56.153523 | 2016-01-17T02:25:38 | 2016-01-17T02:25:38 | 273,791,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | from __future__ import print_function
import os
import sys
from py2gcode import gcode_cmd
from py2gcode import cnc_dxf
feedrate = 50.0
fileName = 'meter_mount.dxf'
stockThickness = 0.25
drillMargin = 0.125
startZ = 0.0
stopZ = -(stockThickness + drillMargin)
safeZ = 0.3
stepZ = 0.05
startDwell = 0.5
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
param = {
'fileName' : fileName,
'layers' : ['4-40_THROUGH_HOLE'],
'dxfTypes' : ['CIRCLE'],
'startZ' : startZ,
'stopZ' : stopZ,
'safeZ' : safeZ,
'stepZ' : stepZ,
'startDwell' : startDwell,
}
drill = cnc_dxf.DxfDrill(param)
prog.add(drill)
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.End(),comment=True)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
print('generating: {0}'.format(fileName))
prog.write(fileName)
| [
"will@iorodeo.com"
] | will@iorodeo.com |
d6e362aeef9e06deff41345d07bc7e077179895f | c6ed09339ff21fa70f154f34328e869f0dd8e394 | /python/PIL/img_resize.py | df58e1a603cfe03dbd1eaf110ceb6417453cdd19 | [] | no_license | fits/try_samples | f9b15b309a67f7274b505669db4486b17bd1678b | 0986e22d78f35d57fe1dd94673b68a4723cb3177 | refs/heads/master | 2023-08-22T14:35:40.838419 | 2023-08-07T12:25:07 | 2023-08-07T12:25:07 | 642,078 | 30 | 19 | null | 2022-12-28T06:31:24 | 2010-05-02T02:23:55 | Java | UTF-8 | Python | false | false | 177 | py |
import sys
from PIL import Image
img_file = sys.argv[1]
w = int(sys.argv[2])
h = int(sys.argv[3])
dest_file = sys.argv[4]
Image.open(img_file).resize((w, h)).save(dest_file)
| [
"wadays_wozx@nifty.com"
] | wadays_wozx@nifty.com |
bb624387b9809c5be48f30160e3823420ebc7d8c | 11763b1150a3a05db89c13dcd6152f8fcca87eaa | /designs/nonlinear/permutation/multipermutation.py | 4eadfbacef2804804aa094c272ce232c834a6bb7 | [] | no_license | acad2/crypto | 343c32fa25aaec73e169290579fc3d02c4b226f6 | cb283df4101fcd618a0478a0018273f00d0734ae | refs/heads/master | 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null | UTF-8 | Python | false | false | 1,866 | py | from crypto.utilities import random_bytes
WORD_SIZE_BITS = 32
MASK64 = (2 ** WORD_SIZE_BITS) - 1
STATE_LENGTH = 8
def generate_state(length=STATE_LENGTH):
return range(length)
def generate_key(length=STATE_LENGTH, mask=MASK64):
key_m = [(item & mask) | 1 for item in bytearray(random_bytes(length))]
key_e = [item & mask for item in bytearray(random_bytes(length))]
return key_m + key_e
def permute_columns(state, key, mask=MASK64):
for index, word in enumerate(state):
state[index] = ((word * key[index]) + key[STATE_LENGTH + index]) & mask
def permute_row(state, key):
size = len(state)
for index in range(size - 1):
for index2 in range(index + 1, size):
word1 = state[index]; word2 = state[index2]
word1, word2 = choice_swap(key, word1, word2)
state[index] = word1; state[index2] = word2
def choice(a, b, c):
return c ^ (a & (b ^ c))
def choice_swap(key, word1, word2):
# if key:
# key = 0x63
t = word1
word1 = choice(key, word1, word2)
word2 = choice(key, word2, t)
return word1, word2
# a1x + b1
#a2(a1x + b1) + b2
#a1a2x + a2b1 + b2
#a3(a1a2x + a2b1 + b2) + b3
#a1a2a3x + a2a3b1 + a3b2 + b3
def permutation(state, key=generate_key()):
# permutation /\ 4 1 2 3
# | 3 4 1 2
# \/ 2 3 4 1
# -------
#permutation <--> 1 2 3 4
state = list(state)
# permute_columns(state, key)
permute_row(state, key)
return state
def visualize_permutation():
from crypto.analysis.visualization import test_8x32_function
test_8x32_function(lambda *args: permutation(args), generate_state())
if __name__ == "__main__":
visualize_permutation()
| [
"python_pride@protonmail.com"
] | python_pride@protonmail.com |
07f4280c356519b82898efa367c3a2e25905248c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03033/s453850284.py | f13a451bed63db4a6bf67d8ade18d71786d676fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | def main():
n,q = map(int, input().split())
tl = []
que = []
d = dict()
for _ in range(n):
s,t,x = map(int, input().split())
tl.append((s-x,1,x))
tl.append((t-x,0,x))
for _ in range(q):
t = int(input())
tl.append((t,2))
tl.sort()
wor = set()
wcur = 0
cur = -1
flg = 0
for x in tl:
if x[1] == 1:
wcur += 1
wor.add(x[2])
if cur < 0 or x[2] < cur:
cur = x[2]
flg = 0
elif x[1] == 0:
wcur -= 1
wor.remove(x[2])
if x[2] == cur:
flg = 1
if not wcur:
cur = -1
flg = 0
else:
if flg:
cur = min(wor)
flg = 0
print(cur)
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
78ece2afb6dc8d9e7775dd7cae8618c12045b454 | 49e1b436eaeb7064b674d611aa33d70ed8138cb5 | /examples/composing_pdf.py | e06a4c6f02e07f766bbd9ac4cf061e11f0852073 | [
"BSD-3-Clause"
] | permissive | aburke1605/zfit | ee810cf786b5121eee3cc2770d0d1b3c02ff86ac | d49fb5513b61b653cf0ca5b5720d4210862b2a70 | refs/heads/master | 2023-09-05T05:08:59.214839 | 2021-09-20T11:16:42 | 2021-09-20T11:16:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Copyright (c) 2020 zfit
import zfit
# create space
obs = zfit.Space("x", limits=(-10, 10))
# parameters
mu = zfit.Parameter("mu", 1., -4, 6)
sigma = zfit.Parameter("sigma", 1., 0.1, 10)
lambd = zfit.Parameter("lambda", -1., -5., 0)
frac = zfit.Parameter("fraction", 0.5, 0., 1.)
# pdf creation
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
exponential = zfit.pdf.Exponential(lambd, obs=obs)
sum_pdf = zfit.pdf.SumPDF([gauss, exponential], fracs=frac)
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
ffb5aae78efbd94d9112b4913b0759fb644ffd30 | d50bf972c9e4321eb77aad8a0126b27d70432779 | /apps/person/api/attribute/views.py | e350811058631c777e41b4a0d4984b9b8ffe0072 | [] | no_license | PUYUP/kawalmedia | 1778b3473220ff64e2f5c998649fc0f637787976 | ffff74b94f111bb17d7a290ba57a13c63e32e5fa | refs/heads/master | 2022-10-20T19:36:45.413061 | 2019-11-07T08:02:31 | 2019-11-07T08:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,487 | py | from uuid import UUID
from itertools import chain
from django.db.models import F, Subquery, OuterRef
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
# THIRD PARTY
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.parsers import (
FormParser, FileUploadParser, MultiPartParser)
from rest_framework import status as response_status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, NotAcceptable
# SERIALIZERS
from .serializers import AttributeSerializer, AttributeValueSerializer
# PERMISSIONS
from ..permissions import IsOwnerOrReject, IsEntityOwnerOrReject
# LOCAL UTILS
from ...utils.attributes import update_attribute_values
# GET MODELS FROM GLOBAL UTILS
from utils.validators import get_model
Attribute = get_model('person', 'Attribute')
AttributeValue = get_model('person', 'AttributeValue')
class AttributeApiView(viewsets.ViewSet):
""" Get attribute options for persons
Read only... """
lookup_field = 'uuid'
permission_classes = (IsAuthenticated,)
parser_class = (FormParser, FileUploadParser, MultiPartParser,)
permission_action = {
# Disable update if not owner
'update': [IsOwnerOrReject],
'partial_update': [IsOwnerOrReject],
'destroy': [IsEntityOwnerOrReject],
}
def get_permissions(self):
"""
Instantiates and returns
the list of permissions that this view requires.
"""
try:
# return permission_classes depending on `action`
return [permission() for permission in self.permission_action
[self.action]]
except KeyError:
# action is not set return default permission_classes
return [permission() for permission in self.permission_classes]
def list(self, request, format=None):
context = {'request': self.request}
identifiers = request.GET.get('identifiers', None)
# Attributes
if hasattr(request.user, 'person') and identifiers:
person = getattr(request.user, 'person', None)
identifiers = identifiers.split(',')
# ContentType berdasarkan entity (model)
entity_type = ContentType.objects.get_for_model(person)
# Get roles from person
roles = person.roles.filter(is_active=True) \
.values_list('id', flat=True)
# Get attributes by roles
queryset = Attribute.objects \
.prefetch_related('option_group', 'content_type', 'roles') \
.select_related('option_group') \
.filter(
content_type=entity_type,
roles__in=roles,
identifier__in=identifiers,
attributevalue__object_id=person.pk) \
.distinct()
if queryset.exists():
for qs in queryset:
identifiers.remove(qs.identifier)
annotate = dict()
for q in queryset:
field = 'value_' + q.field_type
if q.field_type == 'multi_option':
annotate[field] = F('attributevalue')
else:
annotate[field] = F('attributevalue__%s' % field)
annotate['value_uuid'] = F('attributevalue__uuid')
# Call value each field
queryset = queryset.annotate(**annotate)
# Here we get all attributes
# But filter by empty attributevalue
queryset_all = Attribute.objects \
.prefetch_related('option_group', 'content_type', 'roles') \
.select_related('option_group') \
.filter(
content_type=entity_type,
roles__in=roles,
identifier__in=identifiers,
secured=False) \
.distinct()
# Combine two or more queryset
queryset = list(chain(queryset, queryset_all))
# JSON Api
serializer = AttributeSerializer(
queryset, many=True, context=context)
return Response(serializer.data, status=response_status.HTTP_200_OK)
raise NotAcceptable(detail=_("Data tidak valid."))
# Update person attributes
@method_decorator(csrf_protect)
@transaction.atomic
def update(self, request, uuid=None):
"""Update attribute values
UUID used is Person identifier"""
context = {'request': self.request}
if type(uuid) is not UUID:
try:
uuid = UUID(uuid)
except ValueError:
raise NotFound()
person = getattr(request.user, 'person', None)
if person and request.data:
# Append file
if request.FILES:
setattr(request.data, 'files', request.FILES)
# Update attribute
update_attribute_values(
person, identifiers=None, values=request.data)
# Get last inserted value
entity_type = ContentType.objects.get_for_model(person)
attribute_value = AttributeValue.objects \
.filter(object_id=person.pk, content_type=entity_type) \
.order_by('date_created') \
.last()
serializer = AttributeValueSerializer(
attribute_value, many=False, context=context)
return Response(serializer.data, status=response_status.HTTP_200_OK)
raise NotAcceptable()
# Delete...
@method_decorator(csrf_protect)
@method_decorator(never_cache)
@transaction.atomic
def destroy(self, request, uuid=None):
"""uuid used uuid from attribute value"""
queryset = AttributeValue.objects.filter(uuid=uuid)
if queryset.exists():
queryset.delete()
return Response(
{'detail': _("Berhasil dihapus.")},
status=response_status.HTTP_204_NO_CONTENT)
| [
"hellopuyup@gmail.com"
] | hellopuyup@gmail.com |
b7270e1e061dbd90bf3c7a6898118fd3d3223cfd | 222dbb2f43dccbd4538ef76798a26457edffe07c | /utils/plot_utils.py | 87ac03b4a10ee2e0494afbd36ccfe9a6ea1c7221 | [] | no_license | MJHutchinson/PytorchBayes | 9699351822416deeb61e95a34653580fdfbbb5ae | e95a9bd308c595b9603bdfb799288a0ed50cc7c6 | refs/heads/master | 2020-04-09T18:39:57.643468 | 2019-01-15T16:06:05 | 2019-01-15T16:06:05 | 160,519,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,693 | py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
# plt.style.use('ggplot')
# matplotlib.rcParams['text.usetex'] = True
def plot_training_curves(input, val = 'accuracies', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel(val)
ax.set_title(val)
if legend is None:
legend = []
for results in input:
result = results['results']
ax.plot(result[val])
legend.append(f'{results["hidden_size"]} lr: {results["lr"]} prior width: {results["prior_var"]}')
ax.legend(legend)
def plot_training_curves_rv(input, legend=None, rolling_av_len=5):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
if legend is None:
legend = []
for results in input:
for key in results.keys():
acc = results[key]['results']['accuracies']
av_acc = [0] * (len(acc) - rolling_av_len)
for i, _ in enumerate(av_acc):
for j in range(rolling_av_len):
av_acc[i] += acc[i+j]/rolling_av_len
ax.plot(av_acc)
ax.legend(legend)
def plot_cost_curves(*input, legend=None, key='rmse'):
_, ax = plt.subplots(1, 1)
ax.set_xlabel('Epoch')
ax.set_ylabel('Cost')
legend = []
for results in input:
result = results['results']
ax.plot(result['costs'])
legend.append(key)
ax.legend(legend)
def plot_min_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Minimum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(min(r))
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_min_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'Epoch {i+1} {val}')
ax.set_ylabel(f'Minimum {val}')
ax.set_title(f'Plot of epoch {i+1} {val} vs minimum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[i])
best_accs.append(min(r))
ax.scatter(initial_accs, best_accs)
# ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_max_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Maximum {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(max(r))
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_max_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'Epoch {i+1} {val}')
ax.set_ylabel(f'Maximum {val}')
ax.set_title(f'Plot of epoch {i+1} {val} vs maximum {val}')
initial_accs = []
best_accs = []
legend = []
for result in input:
r = result['results'][val]
initial_accs.append(r[i])
best_accs.append(max(r))
ax.scatter(r[i], max(r))
legend.append(f'{result["hidden_size"]} lr: {result["lr"]} prior width: {result["prior_var"]}')
# ax.scatter(initial_accs, best_accs)
# ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_last_vs_first(input, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'First epoch {val}')
ax.set_ylabel(f'Final epoch {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(r[-1])
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_last_vs_i(input, i, val = 'costs', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(f'{i} epoch {val}')
ax.set_ylabel(f'Final epoch {val}')
initial_accs = []
best_accs = []
for result in input:
r = result['results'][val]
initial_accs.append(r[0])
best_accs.append(r[-1])
ax.scatter(initial_accs, best_accs)
ax.plot(np.unique(initial_accs), np.poly1d(np.polyfit(initial_accs, best_accs, 1))(np.unique(initial_accs)))
if legend is not None:
ax.legend(legend)
def plot_xy(x, y, x_lablel='', y_label='', legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(x_lablel)
ax.set_ylabel(y_label)
ax.scatter(x, y)
if legend is not None:
ax.legend(legend)
def plot_dict(x_dict, y_dict, x_lablel='', y_label='', log_scale=False, legend=None):
_, ax = plt.subplots(1, 1)
ax.set_xlabel(x_lablel)
ax.set_ylabel(y_label)
if log_scale: ax.set_xscale('log')
legend = list(x_dict.keys())
for key in legend:
ax.scatter(x_dict[key], y_dict[key])
if legend is not None:
ax.legend(legend)
def rank_best_value(input, n=10, value = 'accuracies', minimum=False):
print(f'{"Minimum" if minimum else "Maximum"} {value} (limited to {n})')
pairs = []
for results in input:
pairs.append((results['hidden_size'], min(results['results'][value]) if minimum else max(results['results'][value])))
pairs = sorted(pairs, key = lambda t: t[1], reverse=not minimum)
for i, pair in enumerate(pairs):
if i<10:
print(f'{pair[0]}: {value}: {pair[1]}')
print('\n')
def rank_final_value(*input, n=10, value = 'accuracies', minimum=False):
print(f'{"Minimum" if minimum else "Maximum"} final {value} (limited to {n})')
for results in input:
pairs = []
for result in results:
pairs.append((f'{result["hidden_size"]} lr: {result["lr"]} prior width: {result["prior_var"]}', np.mean(result['results'][value][-20:])))
pairs = sorted(pairs, key = lambda t: t[1], reverse=not minimum)
for i, pair in enumerate(pairs):
if i<10:
print(f'{pair[0]}: {value}: {pair[1]}') | [
"hutchinson.michael.john@gmail.com"
] | hutchinson.michael.john@gmail.com |
f0541ee8e9970bfd430b5485a39c0009a9631e76 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /pushbase/user_component.py | 4dbab9d5ed556ff989cde117595a4b3af7d19a17 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 2,340 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/user_component.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens, task
from ableton.v2.control_surface import Component
from . import sysex
class UserComponentBase(Component):
__events__ = (u'mode', u'before_mode_sent', u'after_mode_sent')
defer_sysex_sending = False
def __init__(self, value_control=None, *a, **k):
assert value_control is not None
super(UserComponentBase, self).__init__(*a, **k)
self._value_control = value_control
self.__on_value.subject = self._value_control
self._selected_mode = sysex.LIVE_MODE
self._pending_mode_to_select = None
return
def toggle_mode(self):
self.mode = sysex.LIVE_MODE if self.mode == sysex.USER_MODE else sysex.USER_MODE
def _get_mode(self):
return self._selected_mode
def _set_mode(self, mode):
self._do_set_mode(mode)
mode = property(_get_mode, _set_mode)
def _do_set_mode(self, mode):
if self.is_enabled():
self._apply_mode(mode)
else:
self._pending_mode_to_select = mode
def update(self):
super(UserComponentBase, self).update()
if self.is_enabled() and self._pending_mode_to_select:
self._apply_mode(self._pending_mode_to_select)
self._pending_mode_to_select = None
return
def force_send_mode(self):
self._do_apply_mode(self._selected_mode)
def _apply_mode(self, mode):
if mode != self._selected_mode:
self._do_apply_mode(mode)
def _do_apply_mode(self, mode):
self.notify_before_mode_sent(mode)
if self.defer_sysex_sending:
self._tasks.add(task.sequence(task.delay(1), task.run(lambda : self._send_mode_change(mode))))
else:
self._send_mode_change(mode)
def _send_mode_change(self, mode):
self._selected_mode = mode
self._value_control.send_value((mode,))
self.notify_after_mode_sent(mode)
@listens('value')
def __on_value(self, value):
mode = value[0]
self._selected_mode = mode
self.notify_mode(mode)
| [
"julien@julienbayle.net"
] | julien@julienbayle.net |
70b37329ad3c3cce6622f4f307c71cffaad6359f | ab19b1e637109f6a6f32e99714ea1c7cbe1d5ec0 | /month/migrations/0003_theme_slug.py | 1fbf6c4bf241ba87a30afe5e9f364aafa2a4cbf7 | [] | no_license | devonwarren/totemag | daf05876cfe636c4dcfe83b764900a0bc4c9c29d | 304ab0e2f72b926e63de706a6e3dc0b043db36fd | refs/heads/master | 2021-01-17T20:48:48.671352 | 2016-06-02T00:57:11 | 2016-06-02T00:57:11 | 58,146,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-31 19:21
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('month', '0002_auto_20160218_1741'),
]
operations = [
migrations.AddField(
model_name='theme',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, null=True, populate_from='name', unique=True, verbose_name='URL'),
),
]
| [
"devon.warren@gmail.com"
] | devon.warren@gmail.com |
fcac3eef07e02c904d9de6b2316190c32ac0beb8 | 6550cc368f029b3955261085eebbddcfee0547e1 | /第6部分-Django(哪吒,肖锋)/django-2-进阶-肖锋/day69/day69/about_middleware/app01/views.py | 5de771df74370c060b8d9679eadaa846206f9934 | [] | no_license | vividyellow/oldboyeduPython14qi | d00c8f45326e16464c3d4e8df200d93779f68bd3 | de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11 | refs/heads/master | 2022-09-17T21:03:17.898472 | 2020-01-31T10:55:01 | 2020-01-31T10:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from django.shortcuts import render, HttpResponse
def index(request,num):
# print(id(request))
print('这是index函数')
print(num)
# int('xxxx')
ret = HttpResponse('ok')
def xxxx():
print('这是index中的xxxx')
return HttpResponse('这是index中的xxxx')
ret.render = xxxx
print(id(ret))
return ret
| [
"524991368@qq.com"
] | 524991368@qq.com |
e7f5999714364ca89bfd6d481fb3df9478301b51 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_fondue.py | 40fa3e508406b5f07e49c5433e9cbb0992d0ff0d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py |
#calss header
class _FONDUE():
def __init__(self,):
self.name = "FONDUE"
self.definitions = [u'a hot dish prepared by keeping a container of either hot oil or melted cheese over a flame at the table and putting pieces of meat in the oil to be cooked or pieces of bread into the cheese: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cbf38e40373cc0ba6702a4cd35e5d42ac32433e4 | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/allauth/socialaccount/app_settings.py | feffe48c9996e4b546c0e9e2cfdecc3b368227bf | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 2,466 | py | class AppSettings(object):
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def QUERY_EMAIL(self):
"""
Request e-mail address from 3rd party account provider?
E.g. using OpenID AX
"""
from allauth.account import app_settings as account_settings
return self._setting("QUERY_EMAIL",
account_settings.EMAIL_REQUIRED)
@property
def AUTO_SIGNUP(self):
"""
Attempt to bypass the signup form by using fields (e.g. username,
email) retrieved from the social account provider. If a conflict
arises due to a duplicate e-mail signup form will still kick in.
"""
return self._setting("AUTO_SIGNUP", True)
@property
def PROVIDERS(self):
"""
Provider specific settings
"""
return self._setting("PROVIDERS", {})
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_VERIFICATION",
account_settings.EMAIL_VERIFICATION)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.socialaccount.adapter'
'.DefaultSocialAccountAdapter')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def STORE_TOKENS(self):
return self._setting('STORE_TOKENS', True)
@property
def UID_MAX_LENGTH(self):
return 191
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys # noqa
app_settings = AppSettings('SOCIALACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| [
"levabd@gmail.com"
] | levabd@gmail.com |
43e93fad15954e6b80db34598f7a446137cb7af2 | f1614f3531701a29a33d90c31ab9dd6211c60c6b | /alembic/versions/a27b6e57783e_add_price_field_into_product.py | abe46cef631b593c42bf69355694e6b56fccead4 | [] | no_license | pfpacheco/menu-sun-api | 8a1e11543b65db91d606b2f3098847e3cc5f2092 | 9bf2885f219b8f75d39e26fd61bebcaddcd2528b | refs/heads/master | 2022-12-29T13:59:11.644409 | 2020-10-16T03:41:54 | 2020-10-16T03:41:54 | 304,511,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | """add_price_field_into_product
Revision ID: a27b6e57783e
Revises: 7bbdc8a9d923
Create Date: 2020-05-15 16:10:48.441492
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'a27b6e57783e'
down_revision = '7bbdc8a9d923'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('customer', 'active',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.add_column('product', sa.Column('list_price', sa.Float(), nullable=True))
op.add_column('product', sa.Column('sale_price', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('product', 'sale_price')
op.drop_column('product', 'list_price')
op.alter_column('customer', 'active',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
# ### end Alembic commands ###
| [
"pfpacheco@gmail.com"
] | pfpacheco@gmail.com |
0e9112638e0087b9dbc8196f6bbc82002dfc1c6f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_104/257.py | 07a54af8721054894995ab319662d0fe7ec4ce8d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | #!/usr/bin/env python
import sys
import multiprocessing
import itertools
log = sys.stderr
def doit(line):
n = [int(i) for i in line.split()]
n = n[1:n[0]+1]
log.write("Case: {0}\n".format(n))
for c in sorted([c for c in itertools.combinations(range(1, len(n)), 2)], key = lambda x: sum(x)):
log.write("Sizes: {0}\n".format(c))
for e1 in itertools.combinations(n, c[0]):
sum_e1 = sum(e1)
for e2 in itertools.combinations(n, c[1]):
sum_e2 = sum(e2)
if sum_e1 == sum_e2 and e1 != e2:
return (e1, e2)
return ()
def main():
input = sys.stdin
output = sys.stdout
worker = multiprocessing.Pool(multiprocessing.cpu_count())
count = int(input.readline().strip())
for caseno, result in enumerate(worker.map(doit,[line.strip() for line in input][:count])):
output.write("Case #{0}:\n".format(caseno + 1))
if len(result) > 0:
for rl in result:
output.write("{0}\n".format(" ".join([str(i) for i in rl])))
else:
output.write("Impossible\n")
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a6028b97150017982a67e819999bbc0d43e86378 | 61d248b587c6a89f30caa8bc92daeda4d30cbcd2 | /Crawler/tools/convert.py | cd292c49afd90d4afe1a0b67b660424dedb0e5b6 | [] | no_license | luckyyd/hackathon-ocw | 0f6aab5899c628246a6391fd94116245bc4505e4 | b03fbbe7cad9456c84093b00cb946f2d14de50c5 | refs/heads/master | 2021-01-17T10:26:05.793986 | 2016-04-27T05:47:42 | 2016-04-27T05:47:42 | 52,526,118 | 0 | 1 | null | 2016-04-27T05:47:43 | 2016-02-25T13:26:36 | Python | UTF-8 | Python | false | false | 1,458 | py | # 把爬下内容融合到items.json中
import json
import codecs
import io
from pprint import pprint
def get_duration(olist, url):
for item in olist:
if (item['courselink']) == url: return item['duration']
return ''
#input_file = open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\items.json', "r")
#output_file = codecs.open(r'C:\Users\foamliu.FAREAST\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\output.json', "w", encoding="utf-8")
def downloaded(items, link):
for item in items:
if item['link'] == link:
return True
return False
input_file_1 = open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\Crawler\infoqsub\out.json', "r", encoding="utf-8")
input_file_2 = open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\items.json', "r", encoding="utf-8")
output_file = codecs.open(r'C:\Users\Foam\Documents\GitHub\hackathon-ocw\FeedAPI\app\assets\jsons\output.json', "w", encoding="utf-8")
items = json.load(input_file_2, encoding='utf-8')
lines = input_file_1.readlines()
i = 32964
for line in lines:
line = line.replace('\\','\\\\')
#print(line)
item = json.loads(line)
if not downloaded(items, item['link']):
item['item_id'] = i
item['duration'] = ''
item['enabled'] = True
items.append(item)
i += 1
json.dump(items ,output_file, indent=4,ensure_ascii=False,sort_keys=True)
| [
"foamliu@yeah.net"
] | foamliu@yeah.net |
112ef319e46a7d047de72eaa19d85ade85d1b4c9 | f807e5aecbe175e493ea1c47304ceca2817e6083 | /logging_exam/bar_logging.py | 53b4272d799d6cd9d906feec5f680ea020fd6675 | [] | no_license | jbking/logging-custom-for-json | d2a22745488d44fd667cb59a011c0232f531550c | 28613ef67fb0d1a4f7a440dcd838638ef8f9ee78 | refs/heads/master | 2023-03-24T03:47:12.295311 | 2021-03-18T04:44:10 | 2021-03-18T04:53:33 | 348,305,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | import logging
from loguru import logger
_logger = logging.getLogger(__name__)
def log_out(msg, additional_context=None):
if additional_context is None:
logger.info("logging out")
_logger.info("logging.logger out")
else:
with logger.contextualize(**additional_context):
logger.info("logging out with-in context")
_logger.info("logging.logger out with-in context")
| [
"yusuke@jbking.org"
] | yusuke@jbking.org |
8e10ece00228060d469cc533d663c551c8f60b8d | 5942e3e75ef7dc22a67b04fb1f12e14658a2093d | /documentation_files/findertools.py | f6431928c62051fbea22ed0766b2c89ba6574eb3 | [] | no_license | the-factory/kdevelop-python | 9e94d2a4d4906a31a4d2a8a08300766e02d41a59 | 1e91f2cb4c94d9455a2ee22fef13df680aeed1ab | refs/heads/master | 2021-01-18T08:57:16.707711 | 2012-04-09T22:37:47 | 2012-04-09T22:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""":platform: Mac
:synopsis: Wrappers around the finder's Apple Events interface.
"""
def launch(file):
"""
Tell the finder to launch *file*. What launching means depends on the file:
applications are started, folders are opened and documents are opened in the
correct application.
"""
pass
def Print(file):
"""
Tell the finder to print a file. The behaviour is identical to selecting the
file and using the print command in the finder's file menu.
"""
pass
def copy(file,destdir):
"""
Tell the finder to copy a file or folder *file* to folder *destdir*. The
function returns an :class:`Alias` object pointing to the new file.
"""
pass
def move(file,destdir):
"""
Tell the finder to move a file or folder *file* to folder *destdir*. The
function returns an :class:`Alias` object pointing to the new file.
"""
pass
def sleep():
"""
Tell the finder to put the Macintosh to sleep, if your machine supports it.
"""
pass
def restart():
"""
Tell the finder to perform an orderly restart of the machine.
"""
pass
| [
"svenbrauch@googlemail.com"
] | svenbrauch@googlemail.com |
ad3e0221462b158b16454706a9dd1f8ccf500736 | 7a649b4969eecc48a13924c610409f32502e945f | /workspace_tools/data/support.py | d46c99ee9bc24d6d2c330faaf52eccb1eb69432c | [
"Apache-2.0"
] | permissive | giapdangle/mbed | 3434cfa485220a3997653742e85a020ab6eb488a | 4a6e8aa5f6f6ee6749dbf7ff4dade7501f73c996 | refs/heads/master | 2020-12-29T02:07:13.522820 | 2013-04-11T16:45:02 | 2013-04-11T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | DEFAULT_SUPPORT = {
"LPC1768" : ["ARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"],
"LPC11U24": ["ARM", "uARM"],
"LPC2368" : ["ARM"],
"KL25Z" : ["ARM", "GCC_CW"],
"LPC812" : ["uARM"],
}
CORTEX_ARM_SUPPORT = {
"LPC1768" : ["ARM"],
"LPC11U24": ["ARM", "uARM"],
"KL25Z" : ["ARM"],
"LPC812" : ["uARM"],
} | [
"emilmont@gmail.com"
] | emilmont@gmail.com |
5c1c5c7f5bf537882d403059f0342a0d9cb50424 | b8b26feac86b66b0b534996cf9c3fbf7ec660240 | /aoc/2017/p3-2.py | b3003e4e3d06cb26545a197036c34205d9cd62a1 | [
"MIT"
] | permissive | neizod/problems | 775fffe32166c5b124d0e4c973b8d0aba7f3900b | 180aaf7d0ecfc3d0dd5f1d4345a7a4d83b1b884a | refs/heads/master | 2021-07-08T12:30:31.100320 | 2021-05-26T09:34:19 | 2021-05-26T09:34:19 | 6,245,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | #!/usr/bin/env python3
grid = [[1]]
def iter_ring(size):
x = size - 1
y = size - 2
while y > 0:
yield x, y
y -= 1
while x > 0:
yield x, y
x -= 1
while y < size - 1:
yield x, y
y += 1
while x < size - 1:
yield x, y
x += 1
yield x, y
def iter_surround(x, y, size):
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if (dx, dy) == (0, 0):
continue
if 0 <= x+dx < size and 0 <= y+dy < size:
yield grid[y+dy][x+dx]
def expand(grid):
for line in grid:
line[:0] = ['?']
line[len(line):] = ['?']
size = len(line)
grid[:0] = [['?' for _ in range(size)]]
grid[len(grid):] = [['?' for _ in range(size)]]
size = len(grid)
for x, y in iter_ring(size):
grid[y][x] = sum(v for v in iter_surround(x, y, size) if v is not '?')
yield grid[y][x]
def find_larger(n):
while True:
for m in expand(grid):
if m > n:
return m
if __name__ == '__main__':
print(find_larger(int(input())))
for line in grid:
print(' '.join('{:>6}'.format(x) for x in line))
| [
"neizod@gmail.com"
] | neizod@gmail.com |
93bf28364394d57d3c7f4a0af0218e1624b63385 | 429d5ec5f3d4941391807f2a46582938698f82dc | /doc/Homeworks/Solutions/CodesPart2firstmidterm.py | 536c95a48aeffc6437934bd27ab13e3e29d4e9e0 | [
"CC0-1.0"
] | permissive | mhjensen/Physics321 | b24548bbe69633b4618f39ed0b0cf2eb94f10266 | 91970ed5502de694e4812dc77d886c02701f300e | refs/heads/master | 2023-08-04T06:29:22.148776 | 2023-07-24T20:38:14 | 2023-07-24T20:38:14 | 190,315,277 | 30 | 42 | CC0-1.0 | 2021-07-13T17:34:44 | 2019-06-05T02:52:53 | HTML | UTF-8 | Python | false | false | 1,248 | py | import numpy as np
from math import *
import matplotlib.pyplot as plt
# The acceleration a = F/m with F = -dV/dx
def acceleration(x):
return (-1/m)*((10/(x**2)) + (-2*3/(x**3)) + 1)
def potential_energy(x):
return -10.0/x +3.0/(x**2) + 1.0*x
# initial time
t0 = 0
#final time
tf = 10.0
dt = 0.00001
# set up array for time steps
t = np.arange(t0,tf+dt,dt)
# mass and potential parameters part 2
m = 1.0
V0 = 0.1
d = 0.1
# initial values
v0 = 0.0
x0 = 2.0
x = np.zeros(len(t))
v = np.zeros(len(t))
v[0] = v0
x[0] = x0
# integrate v and x
for i in range(len(t)-1):
a_i = acceleration(x[i])
x[i+1] = x[i] + dt*v[i] + 0.5*a_i*(dt**2)
a_ip1 = acceleration(x[i+1])
v[i+1] = v[i] + 0.5*dt*(a_i + a_ip1)
plt.plot(t,x)
plt.show()
# now use the arrays of x and v to test energy conservation
# define potential, kinetic and total energies
Ekin = np.zeros(len(t))
Epot = np.zeros(len(t))
Etot = np.zeros(len(t))
Ekin[0] = 0.5*v0*v0/m
Epot[0] = potential_energy(x0)
Etot[0] = Ekin[0]+Epot[0]
ekin = epot =0.0
# set up total energy as function of time
for i in range(1,len(t)):
ekin = 0.5*v[i]*v[i]/m
Ekin[i] += ekin
epot = potential_energy(x[i])
Epot[i] += epot
Etot[i] += ekin+epot
plt.plot(t,Etot)
plt.show()
| [
"morten.hjorth-jensen@fys.uio.no"
] | morten.hjorth-jensen@fys.uio.no |
2bf0f812aba10ee36b3e812498abf89e69bdafa1 | 3b4e8cc46c1373d36150ad839d2f6539ea8f92b3 | /qmcpy/accumulate_data/_accumulate_data.py | 9b04e7fbd1ad2b0356f426d5bc5ce79ae29a46f9 | [
"Apache-2.0"
] | permissive | kachiann/QMCSoftware | a244efb085c95924ee80a5aa8b8480ea4a9f8e72 | 0ed9da2f10b9ac0004c993c01392b4c86002954c | refs/heads/master | 2023-01-11T12:01:19.527177 | 2020-10-29T03:49:47 | 2020-10-29T03:49:47 | 313,940,226 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from ..true_measure._true_measure import TrueMeasure
from ..util import ParameterError, MethodImplementationError, _univ_repr, DimensionError
class AccumulateData(object):
""" Accumulated Data abstract class. DO NOT INSTANTIATE. """
def __init__(self):
""" Initialize data instance """
prefix = 'A concrete implementation of AccumulateData must have '
if not hasattr(self,'stopping_criterion'):
raise ParameterError(prefix + 'self.stopping_criterion (a StoppingCriterion)')
if not hasattr(self,'integrand'):
raise ParameterError(prefix + 'self.integrand (an Integrand)')
if not hasattr(self,'measure'):
raise ParameterError(prefix + 'self.measure (a TrueMeasure)')
if not hasattr(self,'distribution'):
raise ParameterError(prefix + 'self.distribution (a DiscreteDistribution)')
if not hasattr(self, 'solution'):
raise ParameterError(prefix + 'self.solution')
if not hasattr(self, 'n_total'):
raise ParameterError(prefix + 'self.n_total (total number of samples)')
if not hasattr(self,'parameters'):
self.parameters = []
def update_data(self):
""" ABSTRACT METHOD to update the accumulated data."""
raise MethodImplementationError(self, 'update_data')
def __repr__(self):
string = "Solution: %-15.4f\n" % (self.solution)
for qmc_obj in [self.integrand, self.distribution, self.measure, self.stopping_criterion]:
if qmc_obj:
string += str(qmc_obj)+'\n'
string += _univ_repr(self, 'AccumulateData', self.parameters + ['time_integrate'])
return string
| [
"agsorokin3@gmail.com"
] | agsorokin3@gmail.com |
9dc8c6f47259ee29a2b53f1fabf5e6d90e53eb36 | ea8a9889534df7323b3d159ff4ba9563191b8eba | /phantomcli/scripts/util.py | 1dfa842bb230f1dcc79c38592d4b01c65ae2acaa | [
"MIT"
] | permissive | the16thpythonist/phantom-cli | 946b7fb69fd4917ba10f46fe2ecbd3884ee49513 | 921588dda66bf84bf79569493f4e4312b59cd56d | refs/heads/master | 2023-01-11T21:22:28.962704 | 2020-01-15T10:07:16 | 2020-01-15T10:07:16 | 171,679,615 | 2 | 2 | null | 2022-12-26T20:47:38 | 2019-02-20T13:37:21 | Python | UTF-8 | Python | false | false | 3,142 | py | # standard library imports
import logging
from collections import defaultdict
# local imports
from phantomcli.network import PhantomSocket
# ##############
# LOGGING CONFIG
# ##############
# This will be the translation table, which will be used to return the appropriate constant for defining the logging
# level based on the string passed through the command line option. We are using a default dict, so we do not have to
# deal with a if statement. In case an invalid string is given, the default dict will just return the constant for
# the debug mode, even though the given string isnt even one of its keys.
kwargs = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR
}
logging_config = defaultdict(lambda: logging.DEBUG, **kwargs)
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
###########################
# HANDLING TRANSFER FORMATS
###########################
# We are defining a default dict here to prevent the long if structure to sort out whether a passed format string is
# valid or not. This way, either a valid string is passed and it works or the default option is used anyways.
_formats = {
'P16': 'P16',
'P16R': 'P16R',
'P10': 'P10',
'P8': 'P8',
'8': 'P8',
'P8R': 'P8R',
'8R': 'P8R',
}
formats = defaultdict(lambda: 'P16', **_formats)
##############################
# HANDLING ACQUISITION MODES #
##############################
# This is a mapping from the strings, the user can pass as identifiers for acquisition modes to the actual constants
# needed to be passed to the according method of the phantom socket object.
_modes = {
'S': PhantomSocket.MODE_STANDARD,
'standard': PhantomSocket.MODE_STANDARD,
'SB': PhantomSocket.MODE_STANDARD_BINNED,
'standard-binned': PhantomSocket.MODE_STANDARD_BINNED,
'HS': PhantomSocket.MODE_HIGH_SPEED,
'high-speed': PhantomSocket.MODE_HIGH_SPEED,
'HSB': PhantomSocket.MODE_HIGH_SPEED_BINNED,
'high-speed-binned': PhantomSocket.MODE_HIGH_SPEED_BINNED
}
# ##################
# COMMAND HELP TEXTS
# ##################
# Many of the commands use the same options, to is makes sense to define the help texts here for them all instead of
# copy pasting them for each of them...
format_help = "The transfer format to be used, when transmitting image data. " \
"The possible options are 'P10', 'P16' and 'P8'. Default is 'P16' with 16 bit per pixel"
log_help = "The level of logging to be displayed in the console output. The options are 'ERROR' for only displaying " \
"error messages, 'INFO' for log messages marking important steps in the program execution or 'DEBUG' " \
"for displaying all log messages. Default is 'ERROR'"
xnetwork_help = "Setting this flag will enable the transmission using the 10G interface. Make sure, that you are " \
"indeed connected using the 10G ethernet interface before setting this flag."
| [
"jonseb1998@gmail.com"
] | jonseb1998@gmail.com |
578c8787ceceb57fd6f1b00ccf5a18c45bce3112 | d62863d049c0206bfa744ca4c9e886030bfce1ab | /core/sw_content/api/urls.py | bb16923771941697e7d045724f76f110488d8588 | [] | no_license | jurgeon018/box | 51738b99e640202936ed72357d3c67d2517e589b | 50b84a0afa73fab85a00eef54194f3c126d15397 | refs/heads/master | 2021-07-17T13:37:08.665292 | 2020-10-15T09:50:33 | 2020-10-15T09:50:33 | 232,013,297 | 0 | 1 | null | 2020-03-27T02:16:44 | 2020-01-06T03:01:34 | Python | UTF-8 | Python | false | false | 159 | py | from django.urls import path, include
from .views import *
urlpatterns = [
path('contents/', contents_list),
path('contents/<code>/', content),
]
| [
"jurgeon018@gmail.com"
] | jurgeon018@gmail.com |
fe51faa3136ef571cd703ca4d4e1d4e76927e009 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /pytools/map_partitioning/bin/logfile_utils/show_trajectory.py | d9f9db720d302afbb88b0a3504799fb9d6148ab7 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | from pyTklib import tklib_log_gridmap
import carmen_maptools
from sys import argv
from pylab import *
def load_logfile(filename):
filein = open(filename, 'r')
C =[]
X = []
Y = []
for line in filein:
c, x, y = line.split()
C.append(float(c))
X.append(float(x))
Y.append(float(y))
filein.close()
return C, X, Y
def show_trajectory(map_file, log_file):
#load the map and plot it
gridmap = tklib_log_gridmap()
gridmap.load_carmen_map(map_file)
themap = gridmap.to_probability_map_carmen()
carmen_maptools.plot_map(themap, gridmap.x_size, gridmap.y_size);
#initialize all this junk
L, X, Y = load_logfile(log_file)
pltypes = ['o','^','<','>','s','d','p','h','x', 'o']
plcolors = ['r','g','b','m','k','y','c','r','g','b']
#plot the trajectory
XHash = {}
YHash = {}
for i in range(len(L)):
try:
XHash[L[i]].append(X[i])
YHash[L[i]].append(Y[i])
except(KeyError):
XHash[L[i]] = []
YHash[L[i]] = []
for key in XHash.keys():
plot(XHash[key], YHash[key], plcolors[int(key)]+pltypes[int(key)]);
plot([X[0]], [Y[0]], 'go');
plot([X[len(X)-1]], [Y[len(Y)-1]], 'ro');
show()
if __name__=="__main__":
if(len(argv)==3):
show_trajectory(argv[1], argv[2])
else:
print "usage:\n\t>>python show_trajectory.py map_file emma_logfile"
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
0725144ab0336ebe177e178f18b16ccb28c97f24 | ea5b878376318675931f21ffda41c5914ad0e382 | /keras/optimizers/optimizer_experimental/rmsprop.py | f0ae4683563940c788925ed6817093352ec74525 | [
"Apache-2.0"
] | permissive | Wajih-O/keras | 44089847c6f284b2c2150da8530c5fe05c2a8bb5 | 9628af85a0a2cb04cf433b1ad991017b70ae2005 | refs/heads/master | 2022-03-03T15:20:18.045765 | 2022-02-19T00:16:17 | 2022-02-19T00:16:55 | 125,854,516 | 0 | 0 | null | 2018-03-19T12:32:07 | 2018-03-19T12:32:07 | null | UTF-8 | Python | false | false | 7,968 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RMSprop optimizer implementation."""
from keras.optimizers.optimizer_experimental import optimizer
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
@generic_utils.register_keras_serializable()
class RMSprop(optimizer.Optimizer):
r"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: Initial value for the learning rate:
either a floating point value,
or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Defaults to 0.001.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
clipnorm: see the `clipnorm` argument of `optimizer_experimental.Optimizer`.
clipvalue: see the `clipvalue` argument of
`optimizer_experimental.Optimizer`.
global_clipnorm: see the `global_clipnorm` argument of
`optimizer_experimental.Optimizer`.
use_ema: see the `use_ema` argument of `optimizer_experimental.Optimizer`.
ema_momentum: see the `ema_momentum` argument of
`optimizer_experimental.Optimizer`.
ema_overwrite_frequency: see the `ema_overwrite_frequency` argument of
`optimizer_experimental.Optimizer`.
jit_compile: see the `jit_compile` argument of
`optimizer_experimental.Optimizer`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"RMSprop"`.
**kwargs: see the `**kwargs` argument of `optimizer_experimental.Optimizer`.
Usage:
>>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> var1.numpy()
9.683772
Reference:
- [Hinton, 2012](
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100,
jit_compile=False,
name='RMSprop',
**kwargs):
super(RMSprop, self).__init__(
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
name=name,
**kwargs)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
super().build(var_list)
if hasattr(self, '_built') and self._built:
return
self._built = True
self._velocities = []
for var in var_list:
self._velocities.append(
self.add_variable_from_reference(var, 'velocity'))
self._momentums = []
if self.momentum > 0:
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(var, 'momentum'))
self._average_gradients = []
if self.centered:
for var in var_list:
self._average_gradients.append(
self.add_variable_from_reference(var, 'average_gradient'))
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
if self._var_key(variable) not in self._index_dict:
raise KeyError(f'Optimizer cannot recognize variable {variable.name}, '
f'this usually means you are calling an optimizer '
f'previously used on a different model. Please try '
f'creating a new optimizer instance.')
lr = tf.cast(self.learning_rate, variable.dtype)
var_key = self._var_key(variable)
velocity = self._velocities[self._index_dict[var_key]]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._index_dict[var_key]]
average_grad = None
if self.centered:
average_grad = self._average_gradients[self._index_dict[var_key]]
rho = self.rho
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
velocity.assign(rho * velocity)
velocity.scatter_add(tf.IndexedSlices(
tf.square(gradient.values) * (1 - rho), gradient.indices))
if self.centered:
average_grad.assign(rho * average_grad)
average_grad.scatter_add(
tf.IndexedSlices(
tf.square(gradient.values) * (1 - rho), gradient.indices))
velocity.assign_add(-tf.square(average_grad))
velocity_value = tf.gather(velocity, gradient.indices)
transformed_grad = tf.IndexedSlices(
gradient.values / (tf.sqrt(velocity_value) + self.epsilon),
gradient.indices)
if self.momentum > 0:
momentum.assign(self.momentum * momentum)
momentum.scatter_add(transformed_grad)
variable.assign_add(-lr * momentum)
else:
variable.scatter_add(
tf.IndexedSlices(-lr * transformed_grad.values,
transformed_grad.indices))
else:
# Dense gradients.
velocity.assign(rho * velocity + (1 - rho) * tf.square(gradient))
if self.centered:
average_grad.assign(rho * average_grad +
(1 - rho) * tf.square(gradient))
velocity.assign_add(-tf.square(average_grad))
transformed_grad = gradient / (tf.sqrt(velocity) + self.epsilon)
if self.momentum > 0:
momentum.assign(self.momentum * momentum + transformed_grad)
variable.assign_add(-lr * momentum)
else:
variable.assign_add(-lr * transformed_grad)
def get_config(self):
config = super(RMSprop, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter(self._learning_rate),
'rho': self.rho,
'momentum': self.momentum,
'epsilon': self.epsilon,
'centered': self.centered,
})
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
862419c52e382090fdc0f8bc88d9d9b50545d941 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /PwGFjiSG3kXzp8rjw_0.py | 2c3f3f4190281dfb4dc976c0f2a808f87fb5bbdb | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | """
Write a function that returns the number of users in a chatroom based on the
following rules:
1. If there is no one, return `"no one online"`.
2. If there is 1 person, return `"user1 online"`.
3. If there are 2 people, return `user1 and user2 online"`.
4. If there are `n>2` people, return the first two names and add `"and n-2 more online"`.
For example, if there are 5 users, return:
"user1, user2 and 3 more online"
### Examples
chatroom_status([]) ➞ "no one online"
chatroom_status(["paRIE_to"]) ➞ "paRIE_to online"
chatroom_status(["s234f", "mailbox2"]) ➞ "s234f and mailbox2 online"
chatroom_status(["pap_ier44", "townieBOY", "panda321", "motor_bike5", "sandwichmaker833", "violinist91"])
➞ "pap_ier44, townieBOY and 4 more online"
### Notes
N/A
"""
def chatroom_status(users):
if len(users)>2:
return ', '.join(u for u in users[:2])+' and '+str(len(users)-2)+' more online'
return ' and '.join([u for u in users])+' online' if users else 'no one online'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
56701fd811a38660046b136f603979a0cef5719a | 964f2882117ff656d7a2757c233c6dd88226d975 | /services/catalog/src/simcore_service_catalog/core/application.py | 9738689b9d5b75e0d9567c716f0419f4c9c74665 | [
"MIT"
] | permissive | ignapas/osparc-simcore | a002dd47d7689af9c1c650eea33e31add2b182c1 | cb62e56b194265a907f260f3071c55a65f569823 | refs/heads/master | 2023-01-22T08:55:32.580775 | 2022-12-09T15:57:36 | 2022-12-09T15:57:36 | 170,852,656 | 0 | 0 | MIT | 2023-01-09T05:03:04 | 2019-02-15T11:12:34 | Python | UTF-8 | Python | false | false | 3,437 | py | import logging
import time
from typing import Callable, Optional
from fastapi import FastAPI, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.gzip import GZipMiddleware
from servicelib.fastapi.openapi import override_fastapi_openapi_method
from starlette import status
from starlette.exceptions import HTTPException
from starlette.middleware.base import BaseHTTPMiddleware
from ..api.errors.http_error import (
http_error_handler,
make_http_error_handler_for_exception,
)
from ..api.errors.validation_error import http422_error_handler
from ..api.root import router as api_router
from ..api.routes.health import router as health_router
from ..meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY
from ..services.function_services import setup_function_services
from .events import (
create_start_app_handler,
create_stop_app_handler,
on_shutdown,
on_startup,
)
from .settings import AppSettings, BootModeEnum
logger = logging.getLogger(__name__)
def init_app(settings: Optional[AppSettings] = None) -> FastAPI:
if settings is None:
settings = AppSettings.create_from_envs()
assert settings # nosec
logging.basicConfig(level=settings.CATALOG_LOG_LEVEL.value)
logging.root.setLevel(settings.CATALOG_LOG_LEVEL.value)
logger.debug(settings.json(indent=2))
app = FastAPI(
debug=settings.SC_BOOT_MODE
in [BootModeEnum.DEBUG, BootModeEnum.DEVELOPMENT, BootModeEnum.LOCAL],
title=PROJECT_NAME,
description=SUMMARY,
version=API_VERSION,
openapi_url=f"/api/{API_VTAG}/openapi.json",
docs_url="/dev/doc",
redoc_url=None, # default disabled
)
override_fastapi_openapi_method(app)
app.state.settings = settings
setup_function_services(app)
# events
app.add_event_handler("startup", on_startup)
app.add_event_handler("startup", create_start_app_handler(app))
app.add_event_handler("shutdown", on_shutdown)
app.add_event_handler("shutdown", create_stop_app_handler(app))
# exception handlers
app.add_exception_handler(HTTPException, http_error_handler)
app.add_exception_handler(RequestValidationError, http422_error_handler)
# SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy
app.add_exception_handler(
NotImplementedError,
make_http_error_handler_for_exception(
status.HTTP_501_NOT_IMPLEMENTED, NotImplementedError
),
)
app.add_exception_handler(
Exception,
make_http_error_handler_for_exception(
status.HTTP_500_INTERNAL_SERVER_ERROR, Exception
),
)
# Routing
# healthcheck at / and at /v0/
app.include_router(health_router)
# api under /v*
app.include_router(api_router, prefix=f"/{API_VTAG}")
# middleware to time requests (ONLY for development)
if settings.SC_BOOT_MODE != BootModeEnum.PRODUCTION:
async def _add_process_time_header(request: Request, call_next: Callable):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
app.add_middleware(BaseHTTPMiddleware, dispatch=_add_process_time_header)
# gzip middleware
app.add_middleware(GZipMiddleware)
return app
| [
"noreply@github.com"
] | ignapas.noreply@github.com |
b413251bf658c5caebaf53a11df9e546d7b74c8d | fb6037de54380ef9776fa18b099df03129cef27b | /config.py | 0f52e7589bdb34f27ca16a8992c3e2cb5ee0941f | [] | no_license | webclinic017/newmainbucketssurver | 1385dffe0ea573bb9cb81a4eeb5ddd341aabe88c | 71f86ec7d52b7d68960ecd2fed6b11713b11622e | refs/heads/main | 2023-07-08T10:03:02.915075 | 2021-08-05T13:25:13 | 2021-08-05T13:25:13 | 395,801,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from flask_restful import Api
from flask import Flask
from flask_cors import CORS
from flask_mail import Mail
from dotenv import load_dotenv
from cryptography.fernet import Fernet
from flask_jwt_extended import JWTManager
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from pymongo import MongoClient
import os
load_dotenv()
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['MAIL_SERVER'] = os.environ.get('MAIL_SERVER')
app.config['MAIL_PORT'] = os.environ.get('MAIL_PORT')
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = False
app.config['APCA_API_KEY_ID'] = os.environ.get('APCA_API_KEY_ID')
app.config['APCA_API_SECRET_KEY'] = os.environ.get('APCA_API_SECRET_KEY')
app.config['APCA_API_BASE_URL'] = os.environ.get('APCA_API_BASE_URL')
jobstores = {
'default': MongoDBJobStore(database='buckets', collection='scheduled_jobs', client=MongoClient(os.environ.get("DATABASE_URL")))
}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5)
}
fernet = Fernet(os.environ.get('FERNET_ENCRYPTION_KEY'))
CORS(app)
scheduler = BackgroundScheduler(jobstores=jobstores, job_defaults=job_defaults, executors=executors)
scheduler.start()
api = Api(app)
jwt = JWTManager(app)
mail = Mail(app) | [
"noreply@github.com"
] | webclinic017.noreply@github.com |
7f54b92aa08f3953e791158a06be7ed846bbe676 | 2029785d79244b601c978deb2617e88cc658dc9e | /config.py | f2cfc36ddde563db0c6ade78c57a0b1148b0b1fc | [] | no_license | JalexDooo/Pytorch_Learning | 025bcf422c5fb39b03a2a6521fc69502d899c37e | 622cddff30359763270fffa0b52dca79b02164bb | refs/heads/master | 2020-04-03T02:06:36.248827 | 2018-11-01T03:33:45 | 2018-11-01T03:33:45 | 154,947,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import torch as t
import warnings
class DefaultConfig(object):
"""docstring for DefaultConfig"""
env = 'default' # visdom
vis_port = 8097
model = 'AlexNet'
train_data_root = './data/train/'
test_data_root = './data/test'
load_model_path = None
batch_size = 32
use_gpu = True
num_workers = 4 # how many workers for loading data
print_freq = 20 # print every N batch
debug_file = './tmp/debug'
result_file = './result/result.csv'
max_epoch = 10
lr = 0.1
lr_decay = 0.95
weight_decay = 1e-4
device = t.device('cuda') if use_gpu else t.device('cpu')
def _parse(self, kwargs):
"""
update config
"""
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribute %s" %k)
setattr(self, k, v)
# opt.device = t.device('cuda') if opt.use_gpu else t.device('cpu')
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k, getattr(self, k))
opt = DefaultConfig()
"""
opt = DefaultConfig()
new_config = {
'batch_size':20,
'use_gpu':False,
}
opt._parse(new_config)
print(opt.batch_size)
print(opt.use_gpu)
""" | [
"393351322@qq.com"
] | 393351322@qq.com |
9da4f23ce929496941a6e018e60fc0d53ce7f602 | d4f9a423353fe79cf8824a8407690655fc1379fe | /django/virtualenv/django/lib/python2.7/site-packages/ansible-2.2.0-py2.7.egg/ansible/modules/extras/system/modprobe.py | 94c1a70437b97af6e6cb0e52a67fbc5c237d1106 | [] | no_license | 007root/python | 9ab62d433d17c8bb57622fd1d24a3b17cb3d13ad | 16bf729e5824555eab0c9de61ce6b8b055551bd1 | refs/heads/master | 2020-06-23T09:43:05.308328 | 2020-06-09T08:31:20 | 2020-06-09T08:31:20 | 74,656,519 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: modprobe
short_description: Add or remove kernel modules
requirements: []
version_added: 1.4
author:
- "David Stygstra (@stygstra)"
- "Julien Dauphant"
- "Matt Jeffery"
description:
- Add or remove kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present or absent.
params:
required: false
default: ""
version_added: "1.6"
description:
- Modules parameters.
'''
EXAMPLES = '''
# Add the 802.1q module
- modprobe: name=8021q state=present
# Add the dummy module
- modprobe: name=dummy state=present params="numdummies=2"
'''
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
import shlex
def main():
module = AnsibleModule(
argument_spec={
'name': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'params': {'default': ''},
},
supports_check_mode=True,
)
args = {
'changed': False,
'failed': False,
'name': module.params['name'],
'state': module.params['state'],
'params': module.params['params'],
}
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = args['name'].replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError:
e = get_exception()
module.fail_json(msg=str(e), **args)
# Check only; don't modify
if module.check_mode:
if args['state'] == 'present' and not present:
changed = True
elif args['state'] == 'absent' and present:
changed = True
else:
changed = False
module.exit_json(changed=changed)
# Add/remove module as needed
if args['state'] == 'present':
if not present:
command = [module.get_bin_path('modprobe', True), args['name']]
command.extend(shlex.split(args['params']))
rc, _, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
elif args['state'] == 'absent':
if present:
rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
module.exit_json(**args)
main()
| [
"wangzhishuai@gstianfu.com"
] | wangzhishuai@gstianfu.com |
c6e6f419fb3b519d47880163dc872c60998281b1 | 845058c3434ff43c5f9bd48df13818bef74f04e3 | /tyler/cs301/spring19/materials/code/lec-08/sec3/battleship.py | d2e064aef38d1dee1842b8a4e1b5a7e87db4a51a | [] | no_license | tylerharter/caraza-harter-com | ad7d7f76a382dfd1d4ff4c05922ea57425d1be2b | 70b983a28d94d744b92c9f00dfb8ec6ca20e080d | refs/heads/master | 2023-08-18T23:16:21.588376 | 2023-08-09T17:03:18 | 2023-08-09T17:03:18 | 142,339,324 | 19 | 114 | null | 2023-02-21T18:28:00 | 2018-07-25T18:35:07 | HTML | UTF-8 | Python | false | false | 685 | py | def draw_map(x, y, character):
width = 10
height = 10
print(('.' * width + "\n") * y, end="")
print("." * x + character + "." * (width - (x + 1)))
print(('.' * width + "\n") * (height - (y + 1)))
def ship1_hit(x, y):
ship1_x = 5
ship1_y = 4
return (x == ship1_x and y == ship1_y)
def ship2_hit(x, y):
ship2_x = 8
ship2_y = 8
return (x == ship2_x and y == ship2_y)
def is_hit(x, y):
return ship1_hit(x, y) or ship2_hit(x, y)
def guess():
x = int(input("x: "))
y = int(input("y: "))
hit = is_hit(x, y)
print("Hit? " + str(hit))
symbol = str(int(hit))
# draw the map
draw_map(x, y, symbol)
guess()
| [
"tylerharter@gmail.com"
] | tylerharter@gmail.com |
170b36e7ee43850c846de900bb40777b8a51c861 | c1eec99e798d71878b341cb016c4b1be193d5a68 | /tests/test_inplayserviceresources.py | 0ae060ae2704306a437f141d4dc57bad9881653c | [
"MIT"
] | permissive | KelvinVail/betfairlightweight | 7f7dc14ae80dc1778f8819d3990a5fe2f4d0703b | 3bcad61b2319e40c02fd41cd5179838e53e995ad | refs/heads/master | 2021-01-15T20:03:19.918425 | 2017-08-09T17:55:32 | 2017-08-09T17:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from __future__ import print_function
import unittest
import datetime
from betfairlightweight import resources
from tests.tools import create_mock_json
class InPlayServiceTest(unittest.TestCase):
def test_scores(self):
mock_response = create_mock_json('tests/resources/scores.json')
resource = resources.Scores(**mock_response.json())
assert isinstance(resource, resources.Scores)
assert resource.event_type_id == 1
def test_event_timeline(self):
mock_response = create_mock_json('tests/resources/eventtimeline.json')
resource = resources.EventTimeline(**mock_response.json())
assert isinstance(resource, resources.EventTimeline)
assert resource.event_type_id == 1
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
720f9ff24ca6f6b8bfcb316886af1ead65756ab9 | a2d13658503b9b921e27994152ab6adb554725bc | /store/migrations/0065_auto_20210205_1244.py | 4faf2433a9c2ce5bd22224da671c9cd04cea01fd | [] | no_license | avishkakavindu/sushi-chef-django | 40a1d7916d7f8c37ba1290cb717af517d2bce265 | 4c112d806720d903877822baaa26159c32704901 | refs/heads/master | 2023-03-18T11:12:41.721554 | 2021-03-11T08:22:52 | 2021-03-11T08:22:52 | 303,053,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | # Generated by Django 3.1.5 on 2021-02-05 07:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0064_auto_20210205_1227'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='valid_to',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 15, 12, 44, 48, 516859)),
),
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('payhere', 'payhere'), ('cashondelivery', 'cashondelivery')], max_length=50),
),
]
| [
"avishkakavindud@gmail.com"
] | avishkakavindud@gmail.com |
7254d4913dabf70b27f3b77a544a4b9cbf1d9990 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/programming/libs/indilib/actions.py | 6bc591a0b0b40e7bbf3c3b128c1d8577fa1bfce1 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import cmaketools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libindi0-%s" % get.srcVERSION()
def setup():
cmaketools.configure()
def build():
cmaketools.make()
def install():
cmaketools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING.LIB", "NEWS", "README*", "TODO")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
a20349f17c7be0c3eb620b4d2bc9924a7c4380e4 | ac9c04d564d781eab3a5a0e2d7fce8377047a6d5 | /obsoper/test/test_coordinates.py | 510fa864ce079dfd46ddaaecde821ccc965c12dc | [
"BSD-3-Clause"
] | permissive | met-office-ocean/obsoper | ee57fb4bc0f5f06e9126bbb161223aca4d535e45 | 15030dedc3cbdeb67407b940b4f923b054520fc3 | refs/heads/master | 2021-01-11T01:36:07.516059 | 2019-02-01T12:15:54 | 2019-02-01T12:15:54 | 70,684,510 | 0 | 0 | null | 2017-02-06T16:40:37 | 2016-10-12T09:28:36 | Python | UTF-8 | Python | false | false | 1,393 | py | # pylint: disable=missing-docstring, invalid-name
import unittest
import numpy as np
import obsoper
class TestCartesian(unittest.TestCase):
def test_cartesian_given_lists_returns_arrays(self):
x, y, z = obsoper.cartesian([], [])
self.assertIsInstance(x, np.ndarray)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(z, np.ndarray)
def test_cartesian_given_empty_arrays_returns_empty_arrays(self):
result = obsoper.cartesian([], [])
expect = [], [], []
self.assertCoordinatesEqual(expect, result)
def test_cartesian_given_greenwich_equator_returns_unit_x(self):
self.check_cartesian(longitudes=[0], latitudes=[0],
x=[1], y=[0], z=[0])
def test_cartesian_given_north_pole_returns_unit_z(self):
self.check_cartesian(longitudes=[0], latitudes=[90],
x=[0], y=[0], z=[1])
def check_cartesian(self, longitudes, latitudes, x, y, z):
result = obsoper.cartesian(longitudes, latitudes)
expect = x, y, z
self.assertCoordinatesEqual(expect, result)
@staticmethod
def assertCoordinatesEqual(expect, result):
np.testing.assert_array_almost_equal(expect[0], result[0])
np.testing.assert_array_almost_equal(expect[1], result[1])
np.testing.assert_array_almost_equal(expect[2], result[2])
| [
"andrew.ryan@metoffice.gov.uk"
] | andrew.ryan@metoffice.gov.uk |
d1bc5e61fa3c11ca862dc136ea3dbb0de9ae534f | bf049dd5150794070fb816b665e626559b29d5ed | /code/docmodel/metadata_parser.py | 4cce43ffcdfd329bfbd82d879b8e3eeb39fa42ed | [] | no_license | mnscholz/ttk | af8cbaeb7e7a15e00757a1e65c4d8e36402fd372 | 07291e45512ad9a819016f8891a3bfa6f462eef0 | refs/heads/master | 2021-01-18T06:22:46.658707 | 2016-05-16T18:35:43 | 2016-05-16T18:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,675 | py | """Metadata Parsers.
This module contains metadata parsers, that is, parsers that pull out the
metadata and add it to a TarsqiDocument. The only requirements on each parser is
that it defines an __init__() method that takes a dictionary of options and a
parse() method that takes a TarsqiDocument instance.
Current parsers only deal with the DCT.
"""
import re, time, os, sqlite3
from docmodel.document import TarsqiDocument
import utilities.logger as logger
class MetadataParser:
"""This is the minimal metadata parser that is used as a default. It sets the
DCT to today's date and provides some common functionality to subclasses."""
def __init__(self, options):
"""At the moment, initialization does not use any of the options,
but this could change."""
self.options = options
self.tarsqidoc = None # added in by the parse() method
def parse(self, tarsqidoc):
"""Adds metadata to the TarsqiDocument. The only thing it adds to the
metadata dictionary is the DCT, which is set to today."""
self.tarsqidoc = tarsqidoc
self.tarsqidoc.metadata['dct'] = self.get_dct()
def get_dct(self):
"""Return today's date in YYYYMMDD format."""
return get_today()
def get_source(self):
"""A convenience method to lift the SourceDoc out of the tarsqi
instance."""
return self.tarsqidoc.source
def _get_tag_content(self, tagname):
"""Return the text content of the first tag with name tagname, return
None if there is no such tag."""
try:
tag = self.get_source().tags.find_tags(tagname)[0]
content = self.get_source().text[tag.begin:tag.end].strip()
return content
except IndexError:
logger.warn("Cannot get the %s tag in this document" % tagname)
return None
class MetadataParserTTK(MetadataParser):
"""The metadata parser for the ttk format, simply copies the meta data."""
def parse(self, tarsqidoc):
"""Adds metadata to the TarsqiDocument. The only thing it adds to the
metadata dictionary is the DCT, which is copied from the metadata in the
SourceDoc."""
self.tarsqidoc = tarsqidoc
self.tarsqidoc.metadata['dct'] = self.get_dct(tarsqidoc.source)
def get_dct(self, sourcedoc):
return sourcedoc.metadata.get('dct')
class MetadataParserText(MetadataParser):
"""For now this one adds nothing to the default metadata parser."""
class MetadataParserTimebank(MetadataParser):
"""The parser for Timebank documents. All it does is overwriting the
get_dct() method."""
def get_dct(self):
"""Extracts the document creation time, and returns it as a string of
the form YYYYMMDD. Depending on the source, the DCT can be found in one
of the following tags: DOCNO, DATE_TIME, PUBDATE or FILEID."""
result = self._get_doc_source()
if result is None:
# dct defaults to today if we cannot find the DOCNO tag in the
# document
return get_today()
source_identifier, content = result
if source_identifier in ('ABC', 'CNN', 'PRI', 'VOA'):
return content[3:11]
elif source_identifier == 'AP':
dct = self._parse_tag_content("(?:AP-NR-)?(\d+)-(\d+)-(\d+)",
'FILEID')
# the DCT format is YYYYMMDD or YYMMDD
return dct if len(dct) == 8 else '19' + dct
elif source_identifier in ('APW', 'NYT'):
return self._parse_tag_content("(\d+)/(\d+)/(\d+)", 'DATE_TIME')
elif source_identifier == 'SJMN':
pubdate_content = self._get_tag_content('PUBDATE')
return '19' + pubdate_content
elif source_identifier == 'WSJ':
return '19' + content[3:9]
elif source_identifier in ('ea', 'ed'):
return '19' + content[2:8]
def _get_doc_source(self):
"""Return the name of the content provider as well as the content of the DOCNO
tag that has that information."""
content = self._get_tag_content('DOCNO')
content = str(content) # in case the above returned None
for source_identifier in ('ABC', 'APW', 'AP', 'CNN', 'NYT', 'PRI',
'SJMN', 'VOA', 'WSJ', 'ea', 'ed'):
if content.startswith(source_identifier):
return (source_identifier, content)
logger.warn("Could not determine document source from DOCNO tag")
return None
def _parse_tag_content(self, regexpr, tagname):
"""Return the DCT part of the tag content of tagname, requires a reqular
expression as one of the arguments."""
content_string = self._get_tag_content(tagname)
result = re.compile(regexpr).match(content_string)
if result:
(month, day, year) = result.groups()
return "%s%s%s" % (year, month, day)
else:
logger.warn("Could not get date from %s tag" % tagname)
return get_today()
class MetadataParserATEE(MetadataParser):
"""The parser for ATEE document."""
def get_dct(self):
"""All ATEE documents have a DATE tag with a value attribute, the value
of that attribute is returned."""
date_tag = self.sourcedoc.tags.find_tag('DATE')
return date_tag.attrs['value']
class MetadataParserRTE3(MetadataParser):
"""The parser for RTE3 documents, no differences with the default parser."""
def get_dct(self):
return get_today()
class MetadataParserVA(MetadataParser):
"""A minimal example parser for VA data. It is identical to MetadataParser
except for how it gets the DCT. This is done by lookup in a database. This
here is the simplest possible case, and it is quite inefficient. It assumes
there is an sqlite databse at 'TTK_ROOT/code/data/in/va/dct.sqlite' which
was created as follows:
$ sqlite3 dct.sqlite
sqlite> create table dct (filename TEXT, dct TEXT)
sqlite> insert into dct values ("test.xml", "1999-12-31");
The get_dct method uses this database. """
def get_dct(self):
fname = self.sourcedoc.filename
fname = os.path.basename(fname)
db_connection = sqlite3.connect('data/in/va/dct.sqlite')
db_cursor = db_connection.cursor()
db_cursor.execute('SELECT dct FROM dct WHERE filename=?', (fname,))
dct = db_cursor.fetchone()[0]
return dct
def get_today():
"""Return today's date in YYYYMMDD format."""
return time.strftime("%Y%m%d", time.localtime())
| [
"marc@cs.brandeis.edu"
] | marc@cs.brandeis.edu |
0147192e5e7915f56773a85531b0aa0143af88c2 | 8d6ae21b78b3b40382e21198c571a7957e055be5 | /Aug20/projectEuler/utilities/numeric.py | 8c0bf7fb94084c1085ac1ceaf2dfa7ca1596a545 | [] | no_license | vj-reddy/PythonBatch1 | 6c1a429e0ac57ea1db7b04af18187e84cd52f2d5 | b86a5a16b1004d1e4f855a57b019704c71425bbf | refs/heads/master | 2023-03-16T06:05:48.104363 | 2020-10-16T13:55:03 | 2020-10-16T13:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | def is_divisible_by(number, factor):
"""
This method will return true if the number is divisible by factor
:param number: number
:param factor: factor
:return: true if divisible false other wise
"""
return number % factor == 0
def is_prime(number):
"""
This method will be used to determine if the number passed is prime
or not
:param number: number on which check has to be performed
:return: true if prime false otherwise
"""
is_prime_number = True
for index in range((number//2)+1):
if is_divisible_by(number, index):
is_prime_number = False
break
return is_prime_number
| [
"qtdevops@gmail.com"
] | qtdevops@gmail.com |
75f825c43426ee4c037bbc78f9ee08315259bbd0 | 2afb1095de2b03b05c8b96f98f38ddeca889fbff | /web_scrapping/try_beautifulSoup.py | c821555604b1369258a76b38f3042aaacfd6ecbb | [] | no_license | draganmoo/trypython | 187316f8823296b12e1df60ef92c54b7a04aa3e7 | 90cb0fc8626e333c6ea430e32aa21af7d189d975 | refs/heads/master | 2023-09-03T16:24:33.548172 | 2021-11-04T21:21:12 | 2021-11-04T21:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # import requests
# from bs4 import BeautifulSoup
#
# url = "https://www.bilibili.com/video/av28951415?from=search&seid=12506065350425881349"
#
# r = requests.get(url)
# r_text = r.text
#
# soup = BeautifulSoup(r_text, "html.parser")
# ##查看第一个a标签
# a = soup.a
# print (a)
##查询a标签在bs4中的对象种类(tag,navigable string, beautifulsoup, comment)
# a_tage = soup.a
# print (type (a_tage))
##查询a标签下的href属性信息
# a_href = soup.a.attrs["href"]
# print (a_href)
##查看a标签的字符内容
# a_string = soup.a.string
# print (a_string)
##获取class为title所有a标签的title
# for string_content in soup.find_all ("a", class_="title"):
# print (string_content.get("title"))
##获取class为title所有a标签的href属性
# for link in soup.find_all ("a", class_="title"):
# print (link.get("href"))
##获取class为title所有a标签文本
# for string_content in soup.find_all ("a", class_="title"):
# print (string_content.get_text())
page = "50"
print('downloading page # '+page)
| [
"13701304462@163.com"
] | 13701304462@163.com |
6af7ce031ab72dbf0aff50709a14055d44efc7f8 | 6206ad73052b5ff1b6690c225f000f9c31aa4ff7 | /Code/Optimal Account Balancing.py | 1e1d342648e9e3f972af5beca08a82012254c012 | [] | no_license | mws19901118/Leetcode | 7f9e3694cb8f0937d82b6e1e12127ce5073f4df0 | 752ac00bea40be1e3794d80aa7b2be58c0a548f6 | refs/heads/master | 2023-09-01T10:35:52.389899 | 2023-09-01T03:37:22 | 2023-09-01T03:37:22 | 21,467,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | class Solution:
def minTransfers(self, transactions: List[List[int]]) -> int:
balance = collections.defaultdict(int) #Initialize final balances for each person.
for x, y, amount in transactions: #Traverse transcations and update balance.
balance[x] += amount
balance[y] -= amount
unsettled = [amount for amount in balance.values() if amount] #Filter out unsettled balances.
@cache #Cache result.
def dfs(mask: int) -> int: #DFS to find the max number of subgroups whose balance can be settled in current group.
if not mask: #If mask is 0, means no balance in current group, return 0.
return 0
balanceSum, result = 0, 0 #Initialize balance sum of current group and result.
for i in range(len(unsettled)): #Traverse all unsettled balance.
currentMask = 1 << i #Calculate mask for current balance.
if mask & currentMask: #If mask & currentMask, current balance is in current group.
balanceSum += unsettled[i] #Add its balance to balanceSum.
result = max(result, dfs(mask ^ currentMask)) #Keep DFS to calculate the result of removing current balance from current group and update result.
return result + (balanceSum == 0) #If balanceSum is 0, increase 1 to result and return because current group is already settled and removing a non zero balance will break it so the removed balance must belongs to a settled subgroup.
return len(unsettled) - dfs((1 << len(unsettled)) - 1) #The reason is that a settled group x balances needs x - 1 transactions to settle, so overall minimum transactions needed is the length of unsettled balance substract DFS result of all balances in one group.
| [
"noreply@github.com"
] | mws19901118.noreply@github.com |
7ab3beaed3f6386264eb6ef9b7bfcc566325c83f | eef64f44003dff45287b487bc7a8da589d85d9cc | /chatbot/twitter_qa.py | 1ba8c671b2a055f4723ba61405dd3e29f7cbe936 | [
"Apache-2.0"
] | permissive | k8tems/TwitterQA | 387462c8e47e4c3dadeb9861e4009837e8b22f6b | 938fc29a050ab736d88446e9d794e2047850b4df | refs/heads/master | 2020-06-14T10:40:19.616309 | 2016-11-30T08:09:13 | 2016-11-30T08:09:13 | 75,195,384 | 0 | 1 | null | 2016-11-30T14:36:54 | 2016-11-30T14:36:53 | null | UTF-8 | Python | false | false | 3,924 | py | import json
import os
import re
import itertools
from TwitterAPI import TwitterAPI
with open("chatbot/credentials.json") as f:
credentials = json.load(f)
api = TwitterAPI(**credentials)
def get_tweets(screen_name, max_tweets=None):
show = api.request("users/show", {"screen_name": screen_name}).json()
max_tweets = max_tweets or show.get("statuses_count")
max_tweets = min(max_tweets, 3200)
print("Gathering {} tweets. Through API, 3200 is max possible".format(max_tweets))
user_tweets = []
query_params = {"screen_name": screen_name, "max_id": None, "count": 200}
last_seen = True
print("Gathering tweets for", screen_name)
while True:
try:
r = api.request("statuses/user_timeline", query_params)
timeline_tweets = r.json()
if timeline_tweets[-1]['id'] == last_seen:
break
last_seen = timeline_tweets[-1]['id']
user_tweets.extend(timeline_tweets)
query_params['max_id'] = timeline_tweets[-1]['id']
print("latest ID", query_params['max_id'],
"number of new tweets", len(timeline_tweets))
except Exception as e:
print("ERROR", e)
if len(user_tweets) >= max_tweets:
break
seen = set()
tweets = []
for x in user_tweets:
if x['id'] not in seen:
tweets.append(x)
seen.add(x['id'])
return tweets
def find_questions_for_tweets(tweets):
origins = {tweet['in_reply_to_status_id']: tweet
for tweet in tweets if tweet.get('in_reply_to_status_id')}
origin_gen = (x for x in origins)
questions = []
answers = []
print("Getting original tweets to which <user> replied")
while True:
orig = list(itertools.islice(origin_gen, 100))
if not orig:
break
id_query = ",".join([str(x) for x in orig])
orig_tweets = api.request("statuses/lookup", {"id": id_query}).json()
for ot in orig_tweets:
if ot['id'] in origins:
questions.append(ot['text'])
answers.append(origins[ot['id']]['text'])
print("collected question/answer pairs", len(questions), len(answers))
return questions, answers
def normalize_tweet(x):
x = " ".join(x.split())
x = x.lower()
x = re.sub("http[^ ]+", "LINK", x)
x = re.sub("#[^ ]+", "TAG", x)
x = re.sub("(@[^ ]+ )*@[^ ]+", "MENTION", x)
for punc in [".", ",", "?", "!"]:
x = re.sub("[{}]+".format(punc), " " + punc, x)
x = x.replace("n't", " not")
x = " ".join(x.split())
x = x.lstrip("MENTION ")
return x.strip()
def get_tweet_qa(twitter_username, max_tweets=None, normalize_tweets=True):
tweets = get_tweets(twitter_username, max_tweets)
questions, answers = find_questions_for_tweets(tweets)
if normalize_tweets:
questions = [normalize_tweet(x) for x in questions]
answers = [normalize_tweet(x) for x in answers]
return questions, answers
def get_rate_limits():
rates = api.request("application/rate_limit_status").json()
timeline = rates['resources']['statuses']['/statuses/user_timeline']
lookup = rates['resources']['users']['/users/lookup']
print("lookup", lookup)
print("timeline", timeline)
return timeline['remaining'] != 0 and lookup['remaining'] != 0
def store_question_answers(username, max_number=None):
questions, answers = get_tweet_qa(username, max_number)
d = "data/tweets/" if os.path.isdir("data/tweets") else "../data/tweets/"
d += "{}-{}.txt"
with open(d.format(username, "questions"), "w") as f:
f.write("\n".join(questions))
print("Saved", d.format(username, "questions"))
with open(d.format(username, "answers"), "w") as f:
f.write("\n".join(answers))
print("Saved", d.format(username, "answers"))
return questions, answers
| [
"kootenpv@gmail.com"
] | kootenpv@gmail.com |
edeff8f4740317cae44fe79e7ea6e421a6b1a75a | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/runtime/models/composite_entity_model_py3.py | 45ac985fffc41176922ca0c663519fdecff97333 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,607 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CompositeEntityModel(Model):
"""LUIS Composite Entity.
All required parameters must be populated in order to send to Azure.
:param parent_type: Required. Type/name of parent entity.
:type parent_type: str
:param value: Required. Value for composite entity extracted by LUIS.
:type value: str
:param children: Required. Child entities.
:type children:
list[~azure.cognitiveservices.language.luis.runtime.models.CompositeChildModel]
"""
_validation = {
'parent_type': {'required': True},
'value': {'required': True},
'children': {'required': True},
}
_attribute_map = {
'parent_type': {'key': 'parentType', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'children': {'key': 'children', 'type': '[CompositeChildModel]'},
}
def __init__(self, *, parent_type: str, value: str, children, **kwargs) -> None:
super(CompositeEntityModel, self).__init__(**kwargs)
self.parent_type = parent_type
self.value = value
self.children = children
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
3593906c7a346a04d30bc4f47ee8e891762d98c1 | 5e01b849530ceac9f62ef2fb85497792bbe4c15a | /Jan10/naver_top10.py | dd1cf2fe92f9dd24cc756974a264eb7c6063d04f | [] | no_license | cheesecat47/ML_DL_Jan2020 | 2206599c0eb20eebdd152d1e3b27e72ffa2c6900 | 15bffd8c9c19d9ff2871aa7afe95607f95e491fe | refs/heads/master | 2021-07-16T18:10:20.609018 | 2021-01-19T00:48:01 | 2021-01-19T00:48:01 | 232,076,415 | 0 | 0 | null | 2020-01-28T04:58:03 | 2020-01-06T10:23:19 | Python | UTF-8 | Python | false | false | 478 | py | if __name__ == "__main__":
import requests
from bs4 import BeautifulSoup
url = 'https://www.naver.com/'
keyword = 'span.ah_k'
source = requests.get(url).text
# print('source -> ', source)
soup = BeautifulSoup(source, 'html.parser')
hotkeys = soup.select(keyword)
# print('hotkeys -> ', hotkeys)
index = 0
for key in hotkeys:
index += 1
print(str(index) + ': ' + key.text)
if index >= 10:
break
| [
"cheesecat47@gmail.com"
] | cheesecat47@gmail.com |
824b3247e9500207e27b7026be68e30c9b0945f5 | bbe7d6d59ef6d7364ff06377df9658367a19c425 | /minigame/ClerkPurchase.py | 11eb86228f3c0a79bcb7b11df4bc25c5c43d2f20 | [
"Apache-2.0"
] | permissive | DedMemez/ODS-August-2017 | 1b45c912ad52ba81419c1596644d8db2a879bd9b | 5d6214732e3245f63bfa250e3e9c881cc2dc28ad | refs/heads/master | 2021-01-22T18:37:51.626942 | 2017-08-19T02:04:51 | 2017-08-19T02:04:51 | 100,762,513 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.minigame.ClerkPurchase
from PurchaseBase import *
from toontown.toonbase import ToontownTimer
COUNT_UP_RATE = 0.15
DELAY_BEFORE_COUNT_UP = 1.25
DELAY_AFTER_COUNT_UP = 1.75
COUNT_DOWN_RATE = 0.075
DELAY_AFTER_COUNT_DOWN = 0.0
DELAY_AFTER_CELEBRATE = 3.0
class ClerkPurchase(PurchaseBase):
activateMode = 'storePurchase'
def __init__(self, toon, remain, doneEvent):
PurchaseBase.__init__(self, toon, doneEvent)
self.remain = remain
def load(self):
purchaseModels = loader.loadModel('phase_4/models/gui/gag_shop_purchase_gui')
PurchaseBase.load(self, purchaseModels)
self.backToPlayground = DirectButton(parent=self.frame, relief=None, scale=1.04, pos=(0.71, 0, -0.045), image=(purchaseModels.find('**/PurchScrn_BTN_UP'), purchaseModels.find('**/PurchScrn_BTN_DN'), purchaseModels.find('**/PurchScrn_BTN_RLVR')), text=TTLocalizer.GagShopDoneShopping, text_fg=(0, 0.1, 0.7, 1), text_scale=0.05, text_pos=(0, 0.015, 0), command=self.__handleBackToPlayground)
self.timer = ToontownTimer.ToontownTimer()
self.timer.reparentTo(self.frame)
self.timer.posInTopRightCorner()
purchaseModels.removeNode()
return
def unload(self):
PurchaseBase.unload(self)
del self.backToPlayground
self.timer.destroy()
del self.timer
def __handleBackToPlayground(self):
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
self.handleDone(0)
def __timerExpired(self):
self.handleDone(2)
def enterPurchase(self):
PurchaseBase.enterPurchase(self)
self.backToPlayground.reparentTo(self.toon.inventory.storePurchaseFrame)
self.pointDisplay.reparentTo(self.toon.inventory.storePurchaseFrame)
self.statusLabel.reparentTo(self.toon.inventory.storePurchaseFrame)
self.timer.countdown(self.remain, self.__timerExpired)
def exitPurchase(self):
PurchaseBase.exitPurchase(self)
self.backToPlayground.reparentTo(self.frame)
self.pointDisplay.reparentTo(self.frame)
self.statusLabel.reparentTo(self.frame)
self.ignore('purchaseStateChange') | [
"noreply@github.com"
] | DedMemez.noreply@github.com |
bf71278902d24a993bdc103a887f085dbdb8912a | 2729fff7cb053d2577985d38c8962043ee9f853d | /bokeh/models/scales.py | 0a037eca1337cf7ff71966fd9f2667aba5e93bf4 | [
"BSD-3-Clause"
] | permissive | modster/bokeh | 2c78c5051fa9cac48c8c2ae7345eafc54b426fbd | 60fce9003aaa618751c9b8a3133c95688073ea0b | refs/heads/master | 2020-03-29T01:13:35.740491 | 2018-09-18T06:08:59 | 2018-09-18T06:08:59 | 149,377,781 | 1 | 0 | BSD-3-Clause | 2018-09-19T02:02:49 | 2018-09-19T02:02:49 | null | UTF-8 | Python | false | false | 923 | py | '''
'''
from __future__ import absolute_import
from ..core.has_props import abstract
from .transforms import Transform
@abstract
class Scale(Transform):
''' Base class for ``Scale`` models that represent an invertible
computation to be carried out on the client-side.
JavaScript implementations should implement the following methods:
.. code-block: coffeescript
compute: (x) ->
# compute the transform of a single value
v_compute: (xs) ->
# compute the transform of an array of values
invert: (xprime) ->
# compute the inverse transform of a single value
v_invert: (xprimes) ->
# compute the inverse transform of an array of values
'''
pass
class LinearScale(Scale):
'''
'''
pass
class LogScale(Scale):
'''
'''
pass
class CategoricalScale(LinearScale):
'''
'''
pass
| [
"noreply@github.com"
] | modster.noreply@github.com |
25c6321f32ecd55981098fa2638aa02c4977a194 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/ae37519b7fd0b4361acf63a040329b1ef9200f17-<get_symbol>-bug.py | ecd07ea2a9e15c3c4a911abc5f699d4508e5a9c2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | def get_symbol(num_classes=1000, **kwargs):
data = mx.symbol.Variable(name='data')
conv1a_3_3 = ConvFactory(data=data, num_filter=32, kernel=(3, 3), stride=(2, 2))
conv2a_3_3 = ConvFactory(conv1a_3_3, 32, (3, 3))
conv2b_3_3 = ConvFactory(conv2a_3_3, 64, (3, 3), pad=(1, 1))
maxpool3a_3_3 = mx.symbol.Pooling(data=conv2b_3_3, kernel=(3, 3), stride=(2, 2), pool_type='max')
conv3b_1_1 = ConvFactory(maxpool3a_3_3, 80, (1, 1))
conv4a_3_3 = ConvFactory(conv3b_1_1, 192, (3, 3))
maxpool5a_3_3 = mx.symbol.Pooling(data=conv4a_3_3, kernel=(3, 3), stride=(2, 2), pool_type='max')
tower_conv = ConvFactory(maxpool5a_3_3, 96, (1, 1))
tower_conv1_0 = ConvFactory(maxpool5a_3_3, 48, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 64, (5, 5), pad=(2, 2))
tower_conv2_0 = ConvFactory(maxpool5a_3_3, 64, (1, 1))
tower_conv2_1 = ConvFactory(tower_conv2_0, 96, (3, 3), pad=(1, 1))
tower_conv2_2 = ConvFactory(tower_conv2_1, 96, (3, 3), pad=(1, 1))
tower_pool3_0 = mx.symbol.Pooling(data=maxpool5a_3_3, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg')
tower_conv3_1 = ConvFactory(tower_pool3_0, 64, (1, 1))
tower_5b_out = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1])
net = repeat(tower_5b_out, 10, block35, scale=0.17, input_num_channels=320)
tower_conv = ConvFactory(net, 384, (3, 3), stride=(2, 2))
tower_conv1_0 = ConvFactory(net, 256, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 256, (3, 3), pad=(1, 1))
tower_conv1_2 = ConvFactory(tower_conv1_1, 384, (3, 3), stride=(2, 2))
tower_pool = mx.symbol.Pooling(net, kernel=(3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(*[tower_conv, tower_conv1_2, tower_pool])
net = repeat(net, 20, block17, scale=0.1, input_num_channels=1088)
tower_conv = ConvFactory(net, 256, (1, 1))
tower_conv0_1 = ConvFactory(tower_conv, 384, (3, 3), stride=(2, 2))
tower_conv1 = ConvFactory(net, 256, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1, 288, (3, 3), stride=(2, 2))
tower_conv2 = ConvFactory(net, 256, (1, 1))
tower_conv2_1 = ConvFactory(tower_conv2, 288, (3, 3), pad=(1, 1))
tower_conv2_2 = ConvFactory(tower_conv2_1, 320, (3, 3), stride=(2, 2))
tower_pool = mx.symbol.Pooling(net, kernel=(3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(*[tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool])
net = repeat(net, 9, block8, scale=0.2, input_num_channels=2080)
net = block8(net, with_act=False, input_num_channel=2080)
net = ConvFactory(net, 1536, (1, 1))
net = mx.symbol.Pooling(net, kernel=(1, 1), global_pool=True, stride=(2, 2), pool_type='avg')
net = mx.symbol.Flatten(net)
net = mx.symbol.Dropout(data=net, p=0.2)
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes)
softmax = mx.symbol.SoftmaxOutput(data=net, name='softmax')
return softmax | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
c966e69945ab0e2351d0e63be5dcf607c2581dbe | 3b93fc92d4d95dab66438ebf221f6cb4745aac5f | /src/content/serializers.py | 5d6310607bff4b88a555a6a08f16a50ae05b552f | [
"MIT"
] | permissive | vollov/django-restful | f20fb1236e2c4d16c62f7f7a6318e842dac0b6ce | ee796ded68470fd1609a9313fbf21e89481bccce | refs/heads/master | 2021-01-10T01:32:42.643182 | 2016-02-23T15:18:44 | 2016-02-23T15:18:44 | 46,464,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.contrib.auth.models import User, Group
from rest_framework import serializers
class PageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Page
fields = ('url', 'title', 'created_at')
| [
"dike.zhang@gmail.com"
] | dike.zhang@gmail.com |
e29f67bd34420ecb4d41e6b4df9f51e438faa20f | ca17bd80ac1d02c711423ac4093330172002a513 | /goodyhandy/SubsetII.py | a0dcac809e84135cf1f348571b84dae783ec73ff | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class Solution(object):
def subsetHelper(self, currentLst, remainLst, k,solution):
if len(currentLst) == k:
solution.append(currentLst)
return
prev = None
for i in xrange(0, len(remainLst)):
if remainLst[i] == prev: continue
prev = remainLst[i]
self.subsetHelper(currentLst + [remainLst[i]], remainLst[i+1:], k , solution)
return
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
solution = [[]]
k = len(nums)
for i in xrange(1,k + 1):
self.subsetHelper([], nums, i, solution)
return solution
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
10460c5c619b4240ff0b99f145ead047cb018d65 | 4c601eaa346e660c296e270cc2d79aea9a3721fe | /homeassistant/components/nexia/entity.py | 33962bb11c09d49123998860af8903052ae16907 | [
"Apache-2.0"
] | permissive | basnijholt/home-assistant | f55110af9ff602274c0a929c7298ef97a0ef282f | ba55b4b8338a2dc0ba3f1d750efea49d86571291 | refs/heads/dev | 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 | Apache-2.0 | 2023-01-13T06:04:49 | 2019-11-07T19:29:54 | Python | UTF-8 | Python | false | false | 3,620 | py | """The nexia integration base entity."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
ATTRIBUTION,
DOMAIN,
MANUFACTURER,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
)
class NexiaEntity(Entity):
"""Base class for nexia entities."""
def __init__(self, coordinator, name, unique_id):
"""Initialize the entity."""
super().__init__()
self._unique_id = unique_id
self._name = name
self._coordinator = coordinator
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def should_poll(self):
"""Return False, updates are controlled via coordinator."""
return False
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
class NexiaThermostatEntity(NexiaEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, thermostat, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, name, unique_id)
self._thermostat = thermostat
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self._thermostat.thermostat_id)},
"name": self._thermostat.get_name(),
"model": self._thermostat.get_model(),
"sw_version": self._thermostat.get_firmware(),
"manufacturer": MANUFACTURER,
}
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}",
self.async_write_ha_state,
)
)
class NexiaThermostatZoneEntity(NexiaThermostatEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, zone, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, zone.thermostat, name, unique_id)
self._zone = zone
@property
def device_info(self):
"""Return the device_info of the device."""
data = super().device_info
data.update(
{
"identifiers": {(DOMAIN, self._zone.zone_id)},
"name": self._zone.get_name(),
"via_device": (DOMAIN, self._zone.thermostat.thermostat_id),
}
)
return data
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}",
self.async_write_ha_state,
)
)
| [
"noreply@github.com"
] | basnijholt.noreply@github.com |
5ce9615a4c88d5518f60ae94f94389d009de2e7d | ad6f20ca36dc65e34b43c69db66f383554718fed | /matrix/MaximumSizeSquareSubMatrixWithAll1s.py | e71c6b396ccd19a920cd3ae3144e2eadfef0eb5e | [] | no_license | atulanandnitt/questionsBank | 3df734c7389959801ab6447c0959c85f1013dfb8 | 477accc02366b5c4507e14d2d54850a56947c91b | refs/heads/master | 2021-06-11T21:39:24.682159 | 2021-05-06T17:54:18 | 2021-05-06T17:54:18 | 175,861,522 | 0 | 1 | null | 2020-05-02T09:26:25 | 2019-03-15T17:10:06 | Python | UTF-8 | Python | false | false | 983 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 19:29:19 2018
@author: Atul Anand
"""
def MaximumSizeSquareSubMatrixWithAll1s(mat):
print(mat)
solMat=[[0 for j in range(len(mat[0]))] for i in range(len(mat))]
print(len(mat), len(mat[0]))
print(len(solMat), len(solMat[0]))
for i in range(len(mat)):
for j in range(len(mat[i])):
print(solMat, i, j)
if i ==0 or j ==0 or mat[i][j] ==0 :
solMat[i][j] = mat[i][j]
continue
elif mat[i][j] != 0:
solMat[i][j] = max(mat[i-1][j-1], mat[i-1][j], mat[i][j-1]) + 1
print(solMat)
mat=[ [0 ,1, 1,0,1],
[1,1,0,1,0],
[0,1,1,1,0],
[1,1,1,1,0],
[1,1,1,0,1],
[0,0,0,0,0]]
solMat=[[0 for j in range(len(mat))] for i in range(len(mat[0]))]
print(solMat)
MaximumSizeSquareSubMatrixWithAll1s(mat) | [
"atul.anand.nitt@gmail.com"
] | atul.anand.nitt@gmail.com |
a7cd0fc7c724c7705a289d6cdd963479b3160bdf | 358519772669c73092f625f630722c38e1d33783 | /DatabaseTopology/Force/AbstractAngle.py | c9c4fb2ea68e3a0faa2f0df8796e1238f9370815 | [] | no_license | minghao2016/mmtools | e7e61aca084498408ceae965dd6c9450ad89eafa | 3ade988afb51cd54ee5a4067d8deaad88afbb0fe | refs/heads/master | 2021-09-21T01:02:22.522187 | 2014-09-19T03:40:03 | 2014-09-19T03:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | class AbstractAngle(object):
def __init__(self, atom1, atom2, atom3):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
def __eq__(self, object):
if ((self.atom1 == object.atom1
and self.atom2 == object.atom2
and self.atom3 == object.atom3)
or
(self.atom1 == object.atom3
and self.atom2 == object.atom2
and self.atom3 == object.atom1)):
return True
else:
return False
def __hash__(self):
return hash(tuple([self.atom1, self.atom2, self.atom3]))
| [
"choderaj@mskcc.org"
] | choderaj@mskcc.org |
314741efab3731b5875ac8b4f6eb9a3c6dfd630c | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5765824346324992_0/Python/xldrx/b_small.py | 602f0976eae08d0b2671638f6d4a89b2f1cbb49a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | #! /usr/bin/env python -u
# coding=utf-8
import sys
__author__ = 'xl'
def find_n(m_time):
global M
ans = 0
for m in M:
ans += m_time / m
return ans
def find_next_time(m_time):
global M
ans = 1e6
for i, m in enumerate(M):
ans = min((m - m_time % m) % m, ans)
return ans + m_time
if __name__ == "__main__":
fp = open("B.in")
sys.stdout = open("B.out", "w")
# fp = sys.stdin
T = int(fp.readline())
for t in range(T):
global M
B, N = map(int, fp.readline().split())
M = map(int, fp.readline().split())
min_time = 0
max_time = max(M) * N
p_time = -1
time = -2
while p_time != time:
p_time = time
time = (max_time + min_time) / 2
n = find_n(time)
if n == N - 1:
break
elif n > N - 1:
max_time = time
else:
min_time = time
ans = -1
index = N - n - 1
while ans < 0:
next_t = find_next_time(time)
for i, m in enumerate(M):
if next_t % m == 0:
if index == 0:
ans = i
break
else:
index -= 1
time = next_t + 1
print "Case #%s: %s" % (t + 1, ans + 1) | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
2f2653ba38a9974a5cb6175428c4f23170ab8e86 | c2ee9d6d84e2270ba4c9d6062460a2be0ff5f19c | /205. Isomorphic Strings.py | 771d7e09dcbe8419877d10b7e415c0b9bf96c449 | [] | no_license | Peiyu-Rang/LeetCode | 0dd915638e8c41c560952d86b4047c85b599d630 | f79886ed3022664c3291e4e78129bd8d855cf929 | refs/heads/master | 2021-11-27T23:48:39.946840 | 2021-11-09T12:47:48 | 2021-11-09T12:47:48 | 157,296,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 16 17:43:45 2020
@author: Caven
"""
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
seen = {}
for ss, tt in zip(s,t):
if ss in seen:
if seen[ss] == tt:
continue
else:
return False
else:
seen[ss] = tt
seen = {}
for ss, tt in zip(t,s):
if ss in seen:
if seen[ss] == tt:
continue
else:
return False
else:
seen[ss] = tt
return True | [
"prang3@gatech.edu"
] | prang3@gatech.edu |
32d2f686c52afd000a755700d21c23ebcfedfdd7 | d8cf93900e6d86240ceb7643fd78bd2841b38152 | /test/str_mainpulation_test/test_Unicode_bytes_bytearray.py | 874ebae0cd1e269f91aee5e9a376b059d0e359db | [] | no_license | Onebigbera/Daily_Practice | 165cee0ee7883b90bcf126b23ff993fed0ceffef | 8f1018a9c1e17c958bce91cbecae88b0bb3c946b | refs/heads/master | 2020-04-09T01:20:48.857114 | 2019-01-03T03:24:59 | 2019-01-03T03:24:59 | 159,900,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | # -*-coding:utf-8 -*-
# File :test_Unicode_bytes_bytearray.py
# Author:George
# Date : 2019/1/3
# motto: Someone always give up while someone always try!
# Python字符串使用Unicode编码来表示文本,大致而言,每个Unicode字符都用一个码点(code point)表示,而
# 码点是Unicode标准给每个字符指定的数字。这让你能够以任何现
# 代软件都能识别的方式表示129个文字系统中的12万个以上的字
# 符。当然,鉴于计算机键盘不可能包含几十万个键,因此有一种指
# 定Unicode字符的通用机制:使用16或32位的十六进制字面量(分
# 别加上前缀\u或\U)或者使用字符的Unicode名称(\N{name})。
# 详情参考 http://unicode-table.com
"""
教材上演示代码
"""
import math
print("\u00C6")
# print("\U0001F60A")
cat = "This is a cat:\N{cat}"
print(cat)
print("\U0001F60A")
"""
使用 ASCLL 、UTF-8、UTF-32编码将字符串转换为bytes
"""
"""
为了实现多文字符号的实现和内存的浪费,Python中使用可变长度编码来编码字符即对于不同的字符,使用不同数量的字节进行编码。这种
编码方式主要出自计算机先锋Kenneth Thompson之手。通过使用这
种编码,可节省占用的空间,就像摩尔斯码使用较少的点和短线表
示常见的字母,从而减少工作量一样 。具体地说,进行单字节编
码时,依然使用ASCII编码,以便与较旧的系统兼容;但对于不在
这个范围内的字符,使用多个字节(最多为6个)进行编码。下面
来使用ASCII、UTF-8和UTF-32编码将字符串转换为bytes。
"""
str = "Hello, world!"
print(str.encode("ASCII"))
print(str.encode("UTF-8"))
print(str.encode("UTF-32"))
# 比较相同字符串经过编码方式编码后的长度对比
str = "How long is this?"
print(len(str.encode("UTF-8"))) # 17
print(len(str.encode("UTF-32"))) # 72
"""
可不使用方法encode和decode,而直接创建bytes和str(即字符
串)对象,如下所示:
"""
# string = bytes(("Hællå, wørld!", encoding='utf-8')
# string = str(b'H\xc3\xa6ll\xc3\xa5, w\xc3\xb8rld!', encoding="utf-8")
"""
Python还提供了bytearray,它是bytes的可变版。从某种
意义上说,它就像是可修改的字符串——常规字符串是不能修改
的。然而,bytearray其实是为在幕后使用而设计的,因此作为类
字符串使用时对用户并不友好。例如,要替换其中的字符,必须将
其指定为0~255的值。因此,要插入字符,必须使用ord获取其序
数值(ordinal value)
"""
x = bytearray(b"Hello!")
x[1] = ord(b"u")
print(x)
print(abs(-43))
print(float(454.34))
"""
四舍五入
int() 获取数字的整数部分
math.floor() 获取数字的整数部分(不大于该数的整数)
math.ceil() 获取不小于该数的整数
round() 四舍五入 当小数位5时 取偶数
"""
print(int(3.1))
print(int(3.9))
print(math.floor(3.1))
print(math.floor(3.9))
print(math.ceil(3.1))
print(math.ceil(3.9))
print(round(3.1))
print(round(3.5))
print(round(3.9)) | [
"2578288992@qq.com"
] | 2578288992@qq.com |
f897cb71f4618b8576015ce323dfbf1a9d1943b8 | 14252ea933a08056363230c6df89223b996a0da2 | /app/users/models.py | 4f39bdd9a656c75d57734bd0ce1fd9a66d26467e | [
"MIT"
] | permissive | S3Infosoft/mvr-insights | eeb02aa2e6767e6a23818d4e09f7be7ce29f80cb | ac73feff03c1592d5efd8e0b82f72dd4dbd3e921 | refs/heads/master | 2020-05-29T14:08:11.070784 | 2020-04-23T19:46:57 | 2020-04-23T19:46:57 | 189,184,619 | 0 | 1 | MIT | 2020-04-23T19:46:58 | 2019-05-29T08:35:56 | CSS | UTF-8 | Python | false | false | 6,123 | py | from django.db import models
from django.urls import reverse
from django.contrib.auth import base_user, models as auth_models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.core.files.base import ContentFile
from django.db.models.signals import post_save
from PIL import Image
class CustomUserManager(base_user.BaseUserManager):
"""
CustomUser manager for CustomUser for authentication using email and
password
"""
def create_user(self, email, password, **extra_fields):
"""
Create a user with given email and password
"""
if email:
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
raise ValueError(_("Email must entered to create a user"))
def create_superuser(self, email, password, **extra_fields):
"""
Create a superuser with given email, password and other credentials
"""
extra_fields.setdefault("is_active", True)
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if not extra_fields.get("is_staff"):
raise ValueError(_("Superuser must have is_staff=True"))
if not extra_fields.get("is_superuser"):
raise ValueError(_("Superuser must have is_superuser=True"))
return self.create_user(email, password, **extra_fields)
def save_image(instance, filename):
user_id = instance.id
extension = filename.rsplit(".", 1)[-1]
timestamp = str(now().date())
filename = f"{timestamp}.{extension}"
return "/".join(("profile", str(user_id), filename))
def save_thumb(instance, filename):
user_id = instance.id
timestamp = str(now().date())
extension = filename.rsplit(".", 1)[-1]
filename = f"{timestamp}.{extension}"
return "/".join(("profile", str(user_id), "thumb", filename))
class CustomUser(auth_models.AbstractUser):
"""
CustomUser model with email and password for authentication
"""
username = None
email = models.EmailField(_("email address"), unique=True)
image = models.ImageField(upload_to=save_image, blank=True, null=True)
image_thumb = models.ImageField(upload_to=save_thumb,
blank=True,
null=True)
objects = CustomUserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
def __str__(self):
return self.email
def __init__(self, *args, **kwargs):
super(CustomUser, self).__init__(*args, **kwargs)
# Store the current image to check for a change while updating image
self._curr_image = self.image
@staticmethod
def get_absolute_url():
return reverse("profile")
def save(self, *args, **kwargs):
created = self._state.adding # created or updated
image_updated = False
if not created:
# Store the new image
image = self.image
if image and image.name != self._curr_image.name:
image_updated = True
# Deleting the previous image and its thumnail
self._curr_image.delete(False)
self.image_thumb.delete(False)
# Assigning the image field with the new image
self.image = image
image_name = image.name.rsplit("/", 1)[-1]
# Create a new image for thumbnail
thumb_image = ContentFile(image.read())
# Save the thumbnail but do not commit to the database
self.image_thumb.save(image_name, thumb_image, False)
# Save the model
super(CustomUser, self).save(*args, **kwargs)
if image_updated:
# Get the thumbnail image from its path to resize it
thumb_image = Image.open(self.image.path)
if thumb_image.height > 140 or thumb_image.height > 140:
output_size = (140, 140)
thumb_image.thumbnail(output_size)
# Save the resized image to its path
thumb_image.save(self.image_thumb.path)
def delete(self, *args, **kwargs):
# Delete the user image or anything after object is deleted
if self.image:
self.image.delete(False)
self.image_thumb.delete(False)
super(CustomUser, self).delete(*args, **kwargs)
class GlobalInfo(models.Model):
"""Model to store extra user information accecible by everyone"""
logo = models.ImageField(upload_to="logo/", blank=True, null=True)
address = models.CharField(max_length=350, blank=True, null=True)
def __init__(self, *args, **kwargs):
super(GlobalInfo, self).__init__(*args, **kwargs)
self._current_logo = self.logo
def save(self, *args, **kwargs):
"""
- Overriding save to enforce only single instance of the model
- Delete the previous image files on update
"""
if self.__class__.objects.count():
self.pk = self.__class__.objects.first().pk
created = self._state.adding # Whether object created or updated
logo_updated = False
if not created:
logo = self.logo
if logo and self._current_logo.name != logo.name:
self._current_logo.delete(False)
self.logo = logo
logo_updated = True
super(GlobalInfo, self).save(*args, **kwargs)
if logo_updated:
logo = Image.open(self.logo.path)
if logo.width > 300 or logo.height > 300:
output_size = (300, 300)
logo.thumbnail(output_size)
logo.save(self.logo.path)
@staticmethod
def get_absolute_url():
return reverse("global_settings")
def create_global_info(sender, instance, created, *args, **kwargs):
if created:
GlobalInfo.objects.get_or_create()
post_save.connect(create_global_info, sender=CustomUser)
| [
"abhie.lp@gmail.com"
] | abhie.lp@gmail.com |
67bf7155188ac9ad6196f709cc8f1055cfa2148c | 4cbc8b81d197bc392d1b57856254300331b9738f | /python/tutorial_env/bin/virtualenv | 73d447dc889101d6aedde9c0a52c9b20a8ff657c | [
"MIT"
] | permissive | vcatafesta/chili | 87b9606f17cda645ba44cbf2bb4cc4637e18d211 | 5c734ac88454db76eb2f4e92c13364a5bbc7a93a | refs/heads/main | 2023-09-01T01:39:09.457448 | 2023-08-29T21:23:28 | 2023-08-29T21:23:28 | 171,972,556 | 2 | 2 | null | 2019-02-22T01:38:49 | 2019-02-22T01:26:46 | null | UTF-8 | Python | false | false | 267 | #!/github/chili/python/tutorial_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv.__main__ import run_with_catch
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_with_catch())
| [
"vcatafesta@gmail.com"
] | vcatafesta@gmail.com | |
b35fca13f4a26f547668416fc0650b4da2010002 | 07e6fc323f657d1fbfc24f861a278ab57338b80a | /python/pySimE/space/exp/OrbitalTransferOpt/OrbOpt_Cos/OrbitalOptCos_profile.py | 9e3b7306b0af6b7d12671d76c2f1f9c87fc2bfa2 | [
"MIT"
] | permissive | ProkopHapala/SimpleSimulationEngine | 99cf2532501698ee8a03b2e40d1e4bedd9a12609 | 47543f24f106419697e82771289172d7773c7810 | refs/heads/master | 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,048 | py | #!/usr/bin/env python
from pylab import *
from basiset import *
from Simplex_optimization import Simplex
from Random_optimization import MCBias_Run,MCBias2_Run
nnodes = 8
nsamp = 64
Gen = [0.0]*2*nnodes
def R2omega(R):
return sqrt(1.0/R**3)
T = 2.0; R0 = 1.0; R1 = 0.2;
v0=R2omega(sqrt(1.0/R0**3)); v1=sqrt(1.0/R1**3);
ph = 0.4*T*sqrt(v0**2+v1**2)
P0=array( [ R0 , 0 ] ); V0=array( [ 1.0, v0 ] );
P1=array( [ R1 , ph ] ); V1=array( [ 0, v1 ] );
Bs = evalDiCosBasisset( nnodes, nsamp, T)
scalesR = 1.0/array(range(1,nnodes+1))**2
scalesO = 2.0/array(range(1,nnodes+1))**1.5
#scalesR = 1.0/array(range(1,nnodes+1))
#scalesO = 2.0/array(range(1,nnodes+1))
ts = arange(0,1.000001,1.0/nsamp )
timescales = b = matrix([1.0, 1.0/T, 1.0/T**2] ).transpose();
Rt0 = array(multiply( timescales, evalPoly4( ts, poly4coefs_x0x1v0v1(P0[0], P1[0], V0[0]*T, V1[0]*T) ) ) )
Ot0 = array(multiply( timescales, evalPoly4( ts, poly4coefs_x0x1v0v1(P0[1], P1[1], V0[1]*T, V1[1]*T) ) ) )
ts *= T
nEvaluations = 0
maxThrust = 2.0
def fitnesFunc( Fs ):
global nEvaluations;
nEvaluations +=1
fsum = 0
tsum = 0
#print "len(Fs) ", len(Fs[4])," len(ts) ", len(ts)
for i in range(len(Fs[4])-1):
dt=(ts[i+1]-ts[i])
df=0.5*(Fs[4][i+1]+Fs[4][i])
fsum+=df*dt
tsum+=dt
#df_over = df-maxThrust
#if(df_over>0):
# fsum+= (df_over**2) * dt # penalty for overloading engine
return -sqrt(fsum/tsum)
#return -T* sqrt((Fs[4]**2).sum()) /len(ts)
def evalFitness( Gen ):
global Os,Rs,Fs
cR = Gen[nnodes:] * scalesR
cO = Gen[:nnodes] * scalesO
Os,Rs,Fs = evalTrajectoryPolar( Rt0, Ot0, Bs, cR, cO )
#print " evalFitness shape Os,Rs,Fs", shape(Rs),shape(Rs), shape(Fs)
fitness = fitnesFunc(Fs)
return -fitness
def plotTrj( Os,Rs,Fs, i, clr="k" ):
print shape(ts), shape(Rs), shape(Rs), shape(Fs)
subplot(2,5,1, polar=True); plot( Os[0], Rs[0], '-'+clr); title(" Trajectory ");
subplot(2,5,2); plot( ts, Rs[0],'-'+clr ); plot( ts, Os[0], '--'+clr ); grid(); title(" Position ");
subplot(2,5,3); plot( ts, Rs[1],'-'+clr ); plot( ts, Os[1], '--'+clr ); grid(); title(" Velocity ");
subplot(2,5,5+i);
plot( ts, Rs[2],'r--' ); plot( ts, Os[2], 'b--' );
plot( ts, Fs[1],'r-' ); plot( ts, Fs[0], 'b-' );
plot( ts, Fs[2],'g-'); # G
plot( ts, Fs[3],'m-'); # FTR
plot( ts, sqrt(Fs[4]),'k.-' ); # FT
title(" acclereations ");
grid()
def map2D( X, U1, U2, f1, f2, n1, n2 ):
#print " X: ",X
M = zeros((2*n1+1,2*n1+1))
for i in range(-n1,n1+1):
d1 = array(U1)*(i*f1/n1)
for j in range(-n2,n2+1):
d2 = array(U2)*(j*f2/n2)
M[i+n1,j+n2] = evalFitness( array(X)+d1 +d2 )
return M
def plotMaps(irow,nrow, Gen):
for i in range(nnodes):
U1 = zeros(2*nnodes); U1[i ]=1.0
U2 = zeros(2*nnodes); U2[i+nnodes]=1.0
print " maping node",i," U1: ",U1," U2: ", U2
subplot(nrow, nnodes, nnodes*irow+i+1 )
mapa = map2D( Gen, U1, U2, 0.1, 0.1, 3, 3 )
imshow(mapa, interpolation='bicubic', cmap='jet'); colorbar( )
CS = contour(mapa, colors="g"); clabel(CS, inline=0.5, fontsize=8)
def TryNew( GenBest, fitnessBest, stepSize ):
hit = False
GenNew = GenBest[:] + (rand(nnodes*2)[:]-0.5)*stepSize
ts,Os,Rs,Fs = evalGen ( ti, GenNew )
fitnessNew = fitnesFunc(Fs)
#fitnessNew = evalFitness( GenNew )
if(fitnessNew > fitnessBest ):
hit = True
GenBest = GenNew
fitnessBest = fitnessNew
#print " Better is ",GenBest," fitness = ",fitnessBest,
#print " fitness: ",fitnessBest, " stepSize: ", stepSize
subplot(2,5,5); plot( ts, Fs[4], '-', lw=0.25 ); grid()
return GenBest, fitnessBest,hit
def Simplex_Run(Gen,steps, GenHistory):
print
print " ========= Simplex Optimization ================= "
Simp = Simplex(evalFitness, Gen, steps )
#values, err, niter = SimplexOpt.minimize()
old_low = 10000000000
lastImprovement = 0
for i in range(0, 10000):
converged, err,low,hi = Simp.simplexStep( 0.0001 )
if converged:
print " converged in ",i," steps "
break;
if(low < old_low):
lastImprovement = i
old_low = low
subplot(2,5,5); plot( ts, Fs[4], '-', lw=0.25 ); grid()
GenHistory.append(list(Simp.simplex[Simp.lowest]))
print " new_low : ", low, " iter: ", i, " err ", err
if(i-lastImprovement)>(nnodes*16):
print " Not able to improve => Exiting .... "
break;
print Simp.simplex[Simp.lowest]
return Simp.simplex[Simp.lowest]
# ================ MAIN PROGRAM BODY =========================
figure(num=None, figsize=(20, 10))
GenHistory = []
print " Initial Gen : ", Gen
evalFitness( Gen )
Gen0 = array(Gen).copy()
Opt = True
if Opt:
nEvaluations=0
Gen = MCBias2_Run( evalFitness, Gen, 0.5, 0.01, 4*4*nnodes, 2*nnodes, 2000, GenHistory, wStep=0.5, fBias = 3.0, kBias0=0.8, kBiasf = 0.97 ) # good
GenRnd = array(Gen).copy()
print "===== nEvaluations : ", nEvaluations
steps = ones(nnodes*2)*0.05
nEvaluations=0
Gen = Simplex_Run(Gen,steps, GenHistory)
GenSimp = array(Gen).copy()
print "===== nEvaluations : ", nEvaluations
'''
if len(GenHistory)>2:
GenHistory = transpose(array(GenHistory ))
subplot(2,5,10);
for i in range(nnodes):
plot( GenHistory[i ]-Gen0[i ], 'r-' );
plot( GenHistory[i+nnodes]-Gen0[i+nnodes], 'b-' );
#legend( bbox_to_anchor=(0.5, 1.00, 1., 0.000) )
if Opt:
print " ===== Random Fittness ", evalFitness( GenRnd )
plotTrj( Os,Rs,Fs, 2, "g" )
subplot(2,5,5); plot( ts, Fs[4], 'g-', lw=2 ); grid()
print " ===== Simplex Fittness ", evalFitness( GenSimp )
plotTrj( Os,Rs,Fs, 3, "k" )
subplot(2,5,5); plot( ts, Fs[4], 'k-', lw=2 ); grid()
print " ===== Initial Fittness ", evalFitness( Gen0 )
plotTrj( Os,Rs,Fs, 1, "r" )
#subplot(2,5,5); autoscale(False); plot( ts, Fs[4], 'r-', lw=2 ); grid(), title("propelant \n consumption");
'''
#savefig("plost.png", bbox_inches='tight')
'''
figure(num=None, figsize=(20, 5))
plotMaps(0,2, Gen0)
plotMaps(1,2, Gen )
savefig("valley.png", bbox_inches='tight')
'''
#show()
| [
"ProkopHapala@gmail.com"
] | ProkopHapala@gmail.com |
f85334f39cfa415b73ca89a1cfa22453ea4916e7 | 2ad9a73cb3e2da46fb15ae56a6dee11407fe8845 | /ports/kodi/addons/plugin.video.transistortv/scrapers/local_scraper.py | 06c7ae7a3925f2c85a4975534ba43cc2f04195b2 | [] | no_license | hpduong/retropie_configs | cde596b35897a3faeedefabd742fc15820d58255 | ed4e39146e5bebc0212dcef91108541a128d9325 | refs/heads/master | 2021-07-12T15:46:17.589357 | 2018-11-11T19:10:54 | 2018-11-11T19:10:54 | 157,111,040 | 1 | 2 | null | 2020-07-24T03:43:29 | 2018-11-11T18:59:52 | Python | UTF-8 | Python | false | false | 7,649 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import xbmc
import kodi
import log_utils # @UnusedImport
from transistortv_lib import scraper_utils
from transistortv_lib.constants import FORCE_NO_MATCH
from transistortv_lib.constants import SORT_KEYS
from transistortv_lib.constants import VIDEO_TYPES
import scraper
logger = log_utils.Logger.get_logger()
BASE_URL = ''
class Scraper(scraper.Scraper):
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT): # @UnusedVariable
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.def_quality = int(kodi.get_setting('%s-def-quality' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'Local'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
params = scraper_utils.parse_query(source_url)
if video.video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
result_key = 'moviedetails'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
result_key = 'episodedetails'
run = cmd % (params['id'])
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
if result_key in meta.get('result', []):
details = meta['result'][result_key]
def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
stream_details = details['streamdetails']
if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
host['quality'] = scraper_utils.width_get_quality(stream_details['video'][0]['width'])
hosters.append(host)
return hosters
def _get_episode_url(self, show_url, video):
params = scraper_utils.parse_query(show_url)
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
"limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
base_url = 'video_type=%s&id=%s'
episodes = []
force_title = scraper_utils.force_title(video)
if not force_title:
run = cmd % (params['id'], video.season, 'episode', video.episode)
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
else:
logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and not episodes:
run = cmd % (params['id'], video.season, 'title', video.ep_title)
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
for episode in episodes:
if episode['file'].endswith('.strm'):
continue
return base_url % (video.video_type, episode['episodeid'])
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-def-quality" type="enum" label=" Default Quality" values="None|Low|Medium|High|HD720|HD1080" default="0" visible="eq(-3,true)"/>' % (name))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
filter_str = '{{"field": "title", "operator": "contains", "value": "{search_title}"}}'
if year: filter_str = '{{"and": [%s, {{"field": "year", "operator": "is", "value": "%s"}}]}}' % (filter_str, year)
if video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'
result_key = 'movies'
id_key = 'movieid'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'
result_key = 'tvshows'
id_key = 'tvshowid'
command = cmd % (filter_str.format(search_title=title))
results = self.__get_results(command, result_key, video_type, id_key)
norm_title = self.__normalize_title(title)
if not results and norm_title and norm_title != title:
command = cmd % (filter_str.format(search_title=norm_title))
results = self.__get_results(command, result_key, video_type, id_key)
return results
def __normalize_title(self, title):
norm_title = re.sub('[^A-Za-z0-9 ]', ' ', title)
return re.sub('\s+', ' ', norm_title)
def __get_results(self, cmd, result_key, video_type, id_key):
results = []
logger.log('Search Command: %s' % (cmd), log_utils.LOGDEBUG)
meta = xbmc.executeJSONRPC(cmd)
meta = scraper_utils.parse_json(meta)
logger.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
for item in meta.get('result', {}).get(result_key, {}):
if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith('.strm'):
continue
result = {'title': item['title'], 'year': item['year'], 'url': 'video_type=%s&id=%s' % (video_type, item[id_key])}
results.append(result)
return results
| [
"henryduong@gmail.com"
] | henryduong@gmail.com |
71c6c990cb067a053461d52af67c6a7f6bfa3c21 | 075c07c4e6efebbcdec670c07712281ed7ba659e | /traceback_format.py | fd82ab2d76aa08c4033e50b691553f48e7a1258d | [] | no_license | crystalDf/Automate-the-Boring-Stuff-with-Python-Chapter-10-Debugging | 86380e0e4a71656bd7638255252d470fe27b35e1 | 75dfc3bb0272fc799f6c618a7ccdef6820702bb0 | refs/heads/master | 2021-01-20T19:39:16.394050 | 2016-07-27T15:30:22 | 2016-07-27T15:30:22 | 63,609,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import traceback
try:
raise Exception('This is the error message.')
except:
error_file = open('errorInfo.txt', 'w')
print(error_file.write(traceback.format_exc()))
error_file.close()
print('The traceback info was written to errorInfo.txt.')
| [
"chendong333@gmail.com"
] | chendong333@gmail.com |
f562169114f72ae7fd9906281c117a34e96029bb | f7a20374403b55189cc5db6e8fa34d0ba290387c | /modules/smm_marketing/__openerp__.py | 407d9650426c5d52c098a18e603a7044ae1b3ba3 | [] | no_license | dark-ice/upink_modules | 1a7b5a165cc5e05396c62cf33c261b907c23e33c | c497bf87a39796f1df3877542359b1927bec3a76 | refs/heads/master | 2021-05-01T04:40:16.436666 | 2014-04-12T15:09:31 | 2014-04-12T15:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'SMM - Marketing',
'version': '1.0',
'category': 'SMM tools',
'description': """
Accounts SMM
""",
'author': 'Upsale dep IS',
'website': 'http://www.upsale.ru',
'depends': ['hr',],
'update_xml': [
'security/smm_marketing_security.xml',
'smm_socialnet_view.xml',
'smm_fotohost_view.xml',
'smm_videohost_view.xml',
'smm_email_view.xml',
'smm_blogs_view.xml',
'smm_stpres_view.xml',
'smm_forum_view.xml',
'smm_mobphone_view.xml',
],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"karbanovich.andrey@gmail.com"
] | karbanovich.andrey@gmail.com |
fa237d48bc666c059cf88d567f64c6ae48cb8b0d | 271dbb5f0c23ae40f19a8df7dd3f15a44fbe5ae1 | /EdmureBlog/EdmureBlog/settings.py | 65db68c5b7535414c71de5b01c5cb3823e4aa1fa | [] | no_license | obligate/python3-king | a4d1c5c145c3b1c42efe059cf2bbd797d0b3c528 | 2b31400468c7a2621f29f24f82e682eb07c0e17d | refs/heads/master | 2020-05-02T11:45:16.218771 | 2019-03-27T08:05:39 | 2019-03-27T08:05:39 | 177,938,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,756 | py | """
Django settings for EdmureBlog project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%q_ke6ok7im7x_-=0mdz+9*!rxvraey(xje=92f$(an4s)-7ls'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'repository',
'backend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EdmureBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'EdmureBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(BASE_DIR,'debug.log'),
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
SESSION_COOKIE_AGE = 60 * 60 * 24
SESSION_SAVE_EVERY_REQUEST = True
| [
"peter@tidebuy.net"
] | peter@tidebuy.net |
eeb786c8a7d518628fe96db228bbf2089d27276b | 0a48086ea4dd24cf696aab16fc3969c5980f1442 | /gcpdjango/apps/users/utils.py | c34df67721671cfd06d7fcd7d74ca1fa7676693e | [
"MIT"
] | permissive | stanford-rc/gcp-django-stanford | 67b7b0b532b3c4b7236ec80ad66892e979b52dda | a8d72130e03f96c7d9636b951b780e478594309d | refs/heads/master | 2022-11-21T03:25:03.980764 | 2020-07-31T18:40:56 | 2020-07-31T18:40:56 | 282,962,179 | 2 | 0 | MIT | 2020-07-31T18:40:58 | 2020-07-27T17:03:03 | HTML | UTF-8 | Python | false | false | 3,245 | py | import string
import random
from gcpdjango.settings import SENDGRID_API_KEY, SENDGRID_SENDER_EMAIL
from django.contrib import messages
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import (
Mail,
Email,
To,
Content,
Attachment,
FileContent,
FileName,
FileType,
Disposition,
)
import base64
import os
def send_email(
email_to,
message,
subject,
attachment=None,
filetype="application/pdf",
request=None,
filename=None,
):
"""given an email, a message, and an attachment, and a SendGrid API key is defined in
settings, send an attachment to the user. We return a message to print to
the interface.
Parameters
==========
email_to: the email to send the message to
message: the html content for the body
subject: the email subject
attachment: the attachment file on the server
"""
if not SENDGRID_API_KEY or not SENDGRID_SENDER_EMAIL:
if request is not None:
messages.warning(
request,
"SendGrid secrets were not found in the environment. Please see https://vsoch.github.io/gcpdjango/docs/getting-started/#sendgrid-secrets",
)
return False
mail = Mail(
Email(SENDGRID_SENDER_EMAIL),
To(email_to),
subject,
Content("text/plain", message),
)
# If the user has provided an attachment, add it
if attachment:
message.attachment = generate_attachment(
filepath=attachment, filetype=filetype, filename=filename
)
try:
sg = SendGridAPIClient(api_key=SENDGRID_API_KEY)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.headers)
return True
except Exception as e:
print(e.message)
return False
def generate_attachment(filepath, filetype="application/pdf", filename=None):
"""given a filepath, generate an attachment object for SendGrid by reading
it in and encoding in base64.
Parameters
==========
filepath: the file path to attach on the server.
filetype: MIME content type (defaults to application/pdf)
filename: a filename for the attachment (defaults to basename provided)
"""
if not os.path.exists(filepath):
return
# Read in the attachment, base64 encode it
with open(filepath, "rb") as filey:
data = filey.read()
# The filename can be provided, or the basename of actual file
if not filename:
filename = os.path.basename(filepath)
encoded = base64.b64encode(data).decode()
attachment = Attachment()
attachment.file_content = FileContent(encoded)
attachment.file_type = FileType(filetype)
attachment.file_name = FileName(filename)
attachment.disposition = Disposition("attachment")
return attachment
def generate_random_password(length=10):
"""Generate a random password with letters, numbers, and special characters
"""
password_characters = string.ascii_letters + string.digits
password = "".join(random.choice(password_characters) for i in range(length))
return password
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
9c10f9833c7b2a77775984733c373b5641329b23 | 23556b966ee6a3abbe42ee5b66e13af3dce0cf71 | /u24_lymphocyte/third_party/treeano/nodes/tests/composite_test.py | eccc06bc2a77e4301650a679bfcae56d78ee2209 | [
"BSD-3-Clause"
] | permissive | SBU-BMI/quip_classification | 74dbf6d41e579755a952ba475f8a89bd82ac8530 | be61e5f047093243404f6f2dc8e837e27e8e1eb3 | refs/heads/master | 2022-07-30T20:29:48.459298 | 2022-07-17T16:59:15 | 2022-07-17T16:59:15 | 162,736,219 | 5 | 14 | BSD-3-Clause | 2022-02-11T02:53:02 | 2018-12-21T16:36:39 | Python | UTF-8 | Python | false | false | 5,315 | py | import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_dense_node_serialization():
tn.check_serialization(tn.DenseNode("a"))
tn.check_serialization(tn.DenseNode("a", num_units=100))
def test_dense_combine_node_serialization():
tn.check_serialization(tn.DenseCombineNode("a", []))
tn.check_serialization(tn.DenseCombineNode("a", [], num_units=100))
def test_dense_node():
network = tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseNode("fc1", num_units=6),
tn.DenseNode("fc2", num_units=7),
tn.DenseNode("fc3", num_units=8)]
).network()
x = np.random.randn(3, 4, 5).astype(fX)
fn = network.function(["in"], ["fc3"])
res = fn(x)[0]
nt.assert_equal(res.shape, (3, 8))
def test_dense_combine_node():
network = tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseCombineNode("fc1", [tn.IdentityNode("i1")], num_units=6),
tn.DenseCombineNode("fc2", [tn.IdentityNode("i2")], num_units=7),
tn.DenseCombineNode("fc3", [tn.IdentityNode("i3")], num_units=8)]
).network()
x = np.random.randn(3, 4, 5).astype(fX)
fn = network.function(["in"], ["fc3"])
res = fn(x)[0]
nt.assert_equal(res.shape, (3, 8))
def test_dense_node_and_dense_combine_node1():
# testing that dense node and dense combine node with identity child
# return the same thing
network1 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseNode("fc1", num_units=6),
tn.DenseNode("fc2", num_units=7),
tn.DenseNode("fc3", num_units=8)]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
network2 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseCombineNode("fc1", [tn.IdentityNode("i1")], num_units=6),
tn.DenseCombineNode("fc2", [tn.IdentityNode("i2")], num_units=7),
tn.DenseCombineNode("fc3", [tn.IdentityNode("i3")], num_units=8)]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
x = np.random.randn(3, 4, 5).astype(fX)
fn1 = network1.function(["in"], ["fc3"])
fn2 = network2.function(["in"], ["fc3"])
np.testing.assert_allclose(fn1(x), fn2(x))
def test_dense_node_and_dense_combine_node2():
# testing that summing the output of 2 dense nodes is the same as
# applying a dense combine node with 2 identities (+ bias)
# and the same as multiplying the output of 1 dense node by 2
network0 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseNode("dense1", num_units=6),
tn.MultiplyConstantNode("mul", value=2)]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
network1 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.ElementwiseSumNode(
"sum",
[tn.DenseNode("dense1", num_units=6),
tn.DenseNode("dense2", num_units=6)])]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
network2 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseCombineNode(
"fc",
[tn.IdentityNode("i1"),
tn.IdentityNode("i2")],
num_units=6),
tn.AddBiasNode("bias")]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
x = np.random.randn(3, 4, 5).astype(fX)
fn0 = network0.function(["in"], ["hp"])
fn1 = network1.function(["in"], ["hp"])
fn2 = network2.function(["in"], ["hp"])
np.testing.assert_allclose(fn0(x), fn1(x))
np.testing.assert_allclose(fn0(x), fn2(x))
def test_dense_combine_node_uses_children():
network1 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.MultiplyConstantNode("mul", value=2),
tn.DenseCombineNode(
"fc",
[tn.IdentityNode("i1"),
tn.IdentityNode("i2")],
num_units=6)]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
network2 = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.DenseCombineNode(
"fc",
[tn.MultiplyConstantNode("mul1", value=2),
tn.MultiplyConstantNode("mul2", value=2)],
num_units=6)]
),
inits=[treeano.inits.ConstantInit(1)]
).network()
x = np.random.randn(3, 4, 5).astype(fX)
fn1 = network1.function(["in"], ["hp"])
fn2 = network2.function(["in"], ["hp"])
np.testing.assert_allclose(fn1(x), fn2(x))
| [
"sabousamra@cs.stonybrook.edu"
] | sabousamra@cs.stonybrook.edu |
55ff4fe9356b729e0c86ab973ecb0755cbe6c0bf | 6418b2bca8c9d95a69109e2fe4b0a827f9a87ddd | /cloudmesh/pi/board/led.py | b6abe06d0b12d0aae28eb57e5922230ca9ef7ccd | [
"Apache-2.0",
"Python-2.0"
] | permissive | nitesh-jaswal/cloudmesh-pi-cluster | 49d31baecd08eef3e8fc355fbf72fbcd655ebd35 | 804a7f0f93fb06161bccb4c9ff0fcecc93854747 | refs/heads/master | 2022-11-26T12:52:20.313904 | 2020-08-04T21:52:11 | 2020-08-04T21:52:11 | 273,601,432 | 0 | 0 | NOASSERTION | 2020-08-04T21:52:12 | 2020-06-19T23:02:02 | Python | UTF-8 | Python | false | false | 7,945 | py | import os
import time
from pprint import pprint
from cloudmesh.common.Host import Host
from cloudmesh.common.Tabulate import Printer
from cloudmesh.common.parameter import Parameter
"""
From: https://www.raspberrypi.org/forums/viewtopic.php?t=12530
If you want the LED to go back to its default function:
echo mmc0 >/sys/class/leds/led0/trigger
As an aside, there are a couple of kernel modules you can load up (ledtrig_timer
and ledtrig_heartbeat) which will flash the LED for you.
modprobe ledtrig_heartbeat
echo heartbeat >/sys/class/leds/led0/trigger
Once you have turned off the mmc0 trigger, you can use GPIO16 to control the
LED. It's active-low, so you need to set the pin low to turn the LED on, and
high to turn it off.
"""
class LED:
"""
Implements:
pi led (red|green) VALUE
pi led (red|green) VALUE NAMES [--user=USER]
pi led list NAMES [--user=USER]
pi led blink (red|green) NAMES [--user=USER] [--rate=SECONDS]
"""
# TODO: implement: cms pi led reset # to original setting
# TODO: implement: cms pi led list --trogger list, list the triggers
def __init__(self):
pass
def Print(self, arguments, results):
if arguments.output == 'table':
print(Printer.write(results,
order=['host', 'success', 'stdout']))
else:
pprint(results)
def Print_leds(self, arguments, results):
if arguments.output == 'table':
print(Printer.write(results,
order=['host', 'green', 'red']))
else:
pprint(results)
def execute(self, arguments):
if arguments.red:
arguments.number = 1
elif arguments.green:
arguments.number = 0
if arguments.sequence:
results = LED.sequence_remote(
led=arguments.number,
hosts=arguments.NAMES,
username=arguments.user,
rate=arguments.RATE,
processors=3)
self.Print_leds(arguments, results)
elif arguments.blink:
results = LED.blink_remote(
led=arguments.number,
hosts=arguments.NAMES,
username=arguments.user,
rate=arguments.RATE,
processors=3)
self.Print_leds(arguments, results)
elif arguments.list:
results = LED.list_remote(
hosts=arguments.NAMES,
username=arguments.user,
processors=3)
self.Print_leds(arguments, results)
elif arguments.reset and not arguments.NAMES and arguments.led:
LED.reset()
elif arguments.reset and arguments.NAMES and arguments.led:
results = LED.reset_remote(
hosts=arguments.NAMES,
username=arguments.user,
processors=3)
self.Print(arguments, results)
# elif not arguments.NAMES and arguments.led:
# LED.set(led=arguments.number, value=arguments.VALUE)
elif arguments.NAMES and arguments.led:
results = LED.set_remote(
led=arguments.number,
value=arguments.VALUE,
hosts=arguments.NAMES,
username=arguments.user,
processors=3)
self.Print(arguments, results)
@staticmethod
def get_state(value):
state = value
if type(value) == str:
if value.lower() in ["1", "on", "true", "+"]:
state = 1
elif value.lower() in ["0", "off", "false", "-"]:
state = 0
else:
state = int(value)
return state
@staticmethod
def reset():
command = f"echo mmc0 >/sys/class/leds/led0/trigger"
os.system(command)
@staticmethod
def reset_remote(
hosts=None,
username=None,
processors=3):
command = f"echo mmc0 >/sys/class/leds/led0/trigger"
result = Host.ssh(hosts=hosts,
command=command,
username=username,
key="~/.ssh/id_rsa.pub",
processors=processors,
executor=os.system)
@staticmethod
def set(led=None, value=1):
if led not in [1, 0]:
raise ValueError("Led number is wrong")
state = LED.get_state(value)
if led == 0:
# switch it first off, technically we should disable the trigger
# first
# then we do not have to switch it off
command = f"echo 0 | " \
"sudo tee /sys/class/leds/led{led}/brightness " \
">> /dev/null"
os.system(command)
command = f"echo {state} | " \
"sudo tee /sys/class/leds/led{led}/brightness >> /dev/null"
os.system(command)
@staticmethod
def set_remote(
led=None,
value=1,
hosts=None,
username=None,
processors=3):
if led not in [1, 0]:
raise ValueError("Led number is wrong")
state = LED.get_state(value)
command = f"echo {state} |" \
f" sudo tee /sys/class/leds/led{led}/brightness" \
f" >> /dev/null"
print ("command", command)
result = Host.ssh(hosts=hosts,
command=command,
username=username,
key="~/.ssh/id_rsa.pub",
processors=processors,
executor=os.system)
return result
@staticmethod
def blink_remote(
led=None,
hosts=None,
username=None,
rate=None,
processors=3):
if led not in [1, 0]:
raise ValueError("Led number is wrong")
rate = float(rate or 0.5)
for i in range(0, 3):
state = 0
LED.set_remote(
led=led,
value="0",
hosts=hosts,
username=username,
processors=processors)
time.sleep(rate)
LED.set_remote(
led=led,
value="1",
hosts=hosts,
username=username,
processors=processors)
time.sleep(rate)
return None
@staticmethod
def sequence_remote(
led=None,
hosts=None,
username=None,
rate=None,
processors=3):
if led not in [1, 0]:
raise ValueError("Led number is wrong")
rate = float(rate or 0.5)
hosts = Parameter.expand(hosts)
for host in hosts:
LED.set_remote(
led=led,
value="0",
hosts=host,
username=username,
processors=processors)
time.sleep(rate)
LED.set_remote(
led=led,
value="1",
hosts=host,
username=username,
processors=processors)
time.sleep(rate)
return None
@staticmethod
def list_remote(
hosts=None,
username=None,
processors=3):
command = f"cat" \
" /sys/class/leds/led0/brightness" \
" /sys/class/leds/led1/brightness"
results = Host.ssh(hosts=hosts,
command=command,
username=username,
key="~/.ssh/id_rsa.pub",
processors=processors,
executor=os.system)
for result in results:
result["green"], result["red"] = result["stdout"].split("\n", 1)
return results
| [
"laszewski@gmail.com"
] | laszewski@gmail.com |
178ebe48b20de522d522befa1f047f91edf82428 | 4289fcc440e0cf3d2ecaca03bd2cb1a40933f8fc | /dtformats/recycler.py | 7c2f3c368d1e9f32eebd78e298c581e9be311590 | [
"Apache-2.0"
] | permissive | ydkhatri/dtformats | 4251563ad8a42dbfb8f293890ed844e29b2c856a | 692d53616f7c89e5ff4d6950778c46d3b94a0130 | refs/heads/master | 2020-04-20T23:41:11.816017 | 2019-02-03T12:40:55 | 2019-02-03T14:32:54 | 169,174,860 | 2 | 0 | Apache-2.0 | 2019-02-05T01:16:42 | 2019-02-05T01:16:41 | null | UTF-8 | Python | false | false | 4,752 | py | # -*- coding: utf-8 -*-
"""Windows Recycler INFO2 files."""
from __future__ import unicode_literals
from dtformats import data_format
from dtformats import errors
class RecyclerInfo2File(data_format.BinaryDataFile):
"""Windows Recycler INFO2 file."""
_DEFINITION_FILE = 'recycler.yaml'
_DEBUG_INFO_FILE_ENTRY = [
('original_filename', 'Original filename (ANSI)', '_FormatANSIString'),
('index', 'Index', '_FormatIntegerAsDecimal'),
('drive_number', 'Drive number', '_FormatIntegerAsDecimal'),
('deletion_time', 'Deletion time', '_FormatIntegerAsFiletime'),
('original_file_size', 'Original file size', '_FormatIntegerAsDecimal')]
_DEBUG_INFO_FILE_HEADER = [
('unknown1', 'Unknown1', '_FormatIntegerAsHexadecimal8'),
('number_of_file_entries', 'Number of file entries',
'_FormatIntegerAsDecimal'),
('unknown2', 'Unknown2', '_FormatIntegerAsHexadecimal8'),
('file_entry_size', 'File entry size', '_FormatIntegerAsDecimal'),
('unknown3', 'Unknown3', '_FormatIntegerAsHexadecimal8')]
def __init__(self, debug=False, output_writer=None):
"""Initializes a Windows Recycler INFO2 file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(RecyclerInfo2File, self).__init__(
debug=debug, output_writer=output_writer)
self._codepage = 'cp1252'
self._file_entry_data_size = 0
def _FormatANSIString(self, string):
"""Formats an ANSI string.
Args:
string (str): string.
Returns:
str: formatted ANSI string.
Raises:
ParseError: if the string could not be decoded.
"""
# The string can contain remnant data after the end-of-string character.
string = string.split(b'\x00')[0]
try:
return string.decode(self._codepage)
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode ANSI string with error: {0!s}.'.format(exception))
def _ReadFileEntry(self, file_object):
"""Reads the file entry.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file entry cannot be read.
"""
file_offset = file_object.tell()
file_entry_data = self._ReadData(
file_object, file_offset, self._file_entry_data_size, 'file entry')
data_type_map = self._GetDataTypeMap('recycler_info2_file_entry')
try:
file_entry = self._ReadStructureFromByteStream(
file_entry_data, file_offset, data_type_map, 'file entry')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map file entry data at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if self._debug:
self._DebugPrintStructureObject(file_entry, self._DEBUG_INFO_FILE_ENTRY)
if self._file_entry_data_size > 280:
file_offset += 280
data_type_map = self._GetDataTypeMap(
'recycler_info2_file_entry_utf16le_string')
try:
original_filename = self._ReadStructureFromByteStream(
file_entry_data[280:], file_offset, data_type_map, 'file entry')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map file entry data at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if self._debug:
self._DebugPrintValue('Original filename (Unicode)', original_filename)
if self._debug:
self._DebugPrintText('\n')
def _ReadFileHeader(self, file_object):
"""Reads the file header.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file header cannot be read.
"""
data_type_map = self._GetDataTypeMap('recycler_info2_file_header')
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map, 'file header')
if self._debug:
self._DebugPrintStructureObject(file_header, self._DEBUG_INFO_FILE_HEADER)
if file_header.file_entry_size not in (280, 800):
raise errors.ParseError('Unsupported file entry size: {0:d}'.format(
file_header.file_entry_size))
self._file_entry_data_size = file_header.file_entry_size
def ReadFileObject(self, file_object):
"""Reads a Windows Recycler INFO2 file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._ReadFileHeader(file_object)
file_offset = file_object.tell()
while file_offset < self._file_size:
self._ReadFileEntry(file_object)
file_offset += self._file_entry_data_size
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
8d35fadf93c1e4e0d7f0a894831ab7b3ad385070 | 2194b6c17f3153c5976d6ac4a9ab78211027adab | /otoroshi_admin_api_client/models/otoroshimodels_infra_provider_match_type.py | 94a545a8ff9ec8adb0fa47d5f3f07861625e89b3 | [] | no_license | krezreb/otoroshi-admin-api-client | 7fab5e873c9c5950d77fffce6bcf80d3fdf4c319 | 9b3156c11eac227024cfe4a26c0129618deb2c4d | refs/heads/master | 2023-05-08T08:32:00.982987 | 2021-05-27T09:55:00 | 2021-05-27T09:55:00 | 371,324,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from enum import Enum
class OtoroshimodelsInfraProviderMatchType(str, Enum):
ALWAYSMATCH = "AlwaysMatch"
NETWORKLOCATIONMATCH = "NetworkLocationMatch"
GEOLOCATIONMATCH = "GeolocationMatch"
def __str__(self) -> str:
return str(self.value)
| [
"josephbeeson@gmail.com"
] | josephbeeson@gmail.com |
1a3371799d39e8df80c7bf8ff391b29ea564c666 | 482ca5a05c3e34eb0c5b9eb239b86288fa3ea5be | /lilac2/const.py | 5d623e05a3db8e66eda7abb6976aecbbc8e377b5 | [] | no_license | renyuneyun/lilac | de8462deb6275f8ea8e540ad71de10313d976250 | c224a65ac810a8aaba05c410c5b07683a5055868 | refs/heads/master | 2020-03-14T04:23:42.518149 | 2019-10-28T16:20:10 | 2019-10-28T16:21:17 | 131,441,132 | 1 | 0 | null | 2018-04-28T20:15:02 | 2018-04-28T20:15:02 | null | UTF-8 | Python | false | false | 335 | py | from pathlib import Path
import types
mydir = Path('~/.lilac').expanduser()
AUR_REPO_DIR = mydir / 'aur'
AUR_REPO_DIR.mkdir(parents=True, exist_ok=True)
SPECIAL_FILES = ('package.list', 'lilac.py', 'lilac.yaml', '.gitignore')
_G = types.SimpleNamespace()
# repo: Repo
# mod: LilacMod
# pkgver: Optional[str]
# pkgrel: Optional[str]
| [
"lilydjwg@gmail.com"
] | lilydjwg@gmail.com |
6ed4c3efbfd2ae0af22e5a4be77058890cced371 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/MonitorExaminationRequest.py | 91ad61a32de73924cfe85364b9c031859e147480 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,621 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class MonitorExaminationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'MonitorExamination','facebody')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self): # Long
return self.get_body_params().get('Type')
def set_Type(self, Type): # Long
self.add_body_params('Type', Type)
def get_ImageURL(self): # String
return self.get_body_params().get('ImageURL')
def set_ImageURL(self, ImageURL): # String
self.add_body_params('ImageURL', ImageURL)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
c3ee35fe9436a493ef738377774073c953dba7cc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /n4JA3je7FEFfZKaWp_11.py | 7b07527a21b2f44574d4b5255563e5c16f1eaaeb | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | """
You landed your dream job. They pay in geometric progression (see resources).
In your first month of work, you will get your first paycheck `first_month`.
For every month after, your paycheck will be `multiplier` times bigger than
the previous paycheck.
Create a function that takes the `first_month`'s paycheck and the `multiplier`
and returns the number of months it took for you to save up more than one
million. The problem assumes you save 100% of every paycheck.
### Examples
million_in_month(10, 2) ➞ 17
million_in_month(100, 1.01) ➞ 464
million_in_month(50, 100) ➞ 4
# Month 1 = 50 (paycheck 50)
# Month 2 = 5050 (paycheck 5,000 + 50 already saved)
# Month 3 = 505050 (paycheck 500,000 + 5,050 already saved)
# Month 4 = 50505050 (paycheck 50,000,000 + 505,050 already saved)
### Notes
* Don't forget to return the result in the number of months.
* Return when your savings are greater than 1,000,000.
"""
def million_in_month(first_month, multiplier):
goal = 1000000
amount_of_money = 0
amount_of_months = 0
amount_of_money += first_month
amount_of_months += 2
new_new_first_month = first_month
while amount_of_money <= goal:
new_new_first_month = new_new_first_month * multiplier
amount_of_money += new_new_first_month * multiplier
amount_of_months += 1
return amount_of_months
print(million_in_month(50, 100))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
33f6368a00941a84a7f75fc5b8debc37bf78b59e | 3d626f6034eddda6122feb81a5c98c8a3dab9d20 | /08-def-type-hints/messages/no_hints/messages.py | df037e7fa4ee7666a450ae417e60e5916bf32bae | [
"MIT"
] | permissive | eliaskousk/example-code-2e | b8d4f9ce86a55c2e7b905d2d1f94a5c867485fa2 | 28d6d033156831a77b700064997c05a40a83805f | refs/heads/master | 2022-07-13T19:12:57.425453 | 2022-04-24T20:41:30 | 2022-04-24T20:41:30 | 216,843,265 | 2 | 0 | MIT | 2019-10-22T15:09:18 | 2019-10-22T15:09:17 | null | UTF-8 | Python | false | false | 371 | py | """
# tag::SHOW_COUNT_DOCTEST[]
>>> show_count(99, 'bird')
'99 birds'
>>> show_count(1, 'bird')
'1 bird'
>>> show_count(0, 'bird')
'no birds'
# end::SHOW_COUNT_DOCTEST[]
"""
# tag::SHOW_COUNT[]
def show_count(count, word):
if count == 1:
return f'1 {word}'
count_str = str(count) if count else 'no'
return f'{count_str} {word}s'
# end::SHOW_COUNT[]
| [
"luciano@ramalho.org"
] | luciano@ramalho.org |
526677bfe45d954fc32b6679c20729126f9eda0a | 14ddda0c376f984d2a3f7dcd0ca7aebb7c49648d | /bnn_mcmc_examples/examples/mlp/penguins/hmc/pilot_run.py | 97dd6cec9d34ff10a9f74c188fb2a7a85f034672 | [
"MIT"
] | permissive | papamarkou/bnn_mcmc_examples | 62dcd9cc0cf57cda39aa46c2f2f237bbcd2d35bb | 7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d | refs/heads/main | 2023-07-12T20:51:28.302981 | 2021-08-22T13:06:17 | 2021-08-22T13:06:17 | 316,554,634 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | # %% Import packages
from datetime import timedelta
from timeit import default_timer as timer
from bnn_mcmc_examples.examples.mlp.penguins.constants import num_burnin_epochs, num_epochs, verbose, verbose_step
from bnn_mcmc_examples.examples.mlp.penguins.hmc.constants import sampler_output_pilot_path
from bnn_mcmc_examples.examples.mlp.penguins.hmc.sampler import sampler
# %% Run HMC sampler
start_time = timer()
sampler.run(num_epochs=num_epochs, num_burnin_epochs=num_burnin_epochs, verbose=verbose, verbose_step=verbose_step)
end_time = timer()
print("Time taken: {}".format(timedelta(seconds=end_time-start_time)))
# %% Save chain array
sampler.get_chain().to_chainfile(keys=['sample', 'accepted'], path=sampler_output_pilot_path, mode='w')
| [
"theodore.papamarkou@gmail.com"
] | theodore.papamarkou@gmail.com |
adabad459eeb610321cf8aca55eaa46f883670a0 | 209aae9f40657d48461bed5e081c4f235f86090a | /2019/day23-1.py | 84c2010cfdb302f687417c5081575342cd025e26 | [] | no_license | scheidguy/Advent_of_Code | 6e791132157179928e1415f49467ad221ef1e258 | fbc09e4d26502b9a77e0c8d2840b11ec85a3c478 | refs/heads/main | 2023-03-05T12:34:15.343642 | 2021-02-20T00:27:58 | 2021-02-20T00:27:58 | 329,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py |
from copy import deepcopy
# import numpy as np
f = open('day23-1_input.txt')
# f = open('day23-1_debug.txt')
text = f.readlines()
f.close()
program = [int(i) for i in text[0].strip().split(',')]
program.extend([0 for _ in range(10**4)])
NIC = [deepcopy(program)for computer in range(50)]
inputs = [[network_address] for network_address in range(50)]
foundit = False
poss = [0 for network_address in range(50)]
rels = [0 for network_address in range(50)]
numpackets = -1
while True:
numpackets += 1
for comp in range(50):
program = NIC[comp]
outputs = []
pos = poss[comp]
rel = rels[comp]
while True:
command = str(program[pos])
command = '0'*(5-len(command)) + command
opcode = command[-2:]
modes = command[0:-2]
if opcode == '99':
# print(out)
break
if modes[2] == '0': first = program[program[pos+1]]
if modes[2] == '1': first = program[pos+1]
if modes[2] == '2': first = program[rel + program[pos+1]]
if opcode in ['01', '02', '05', '06', '07', '08']:
if modes[1] == '0': second = program[program[pos+2]]
if modes[1] == '1': second = program[pos+2]
if modes[1] == '2': second = program[rel + program[pos+2]]
if opcode in ['01', '02', '07', '08']:
if modes[0] == '0': third = program[pos+3]
if modes[0] == '1': third = pos+3
if modes[0] == '2': third = rel + program[pos+3]
if opcode == '01':
program[third] = first + second
pos += 4
elif opcode == '02':
program[third] = first * second
pos += 4
elif opcode == '03':
if len(inputs[comp]) == 0: inp = -1
else: inp = inputs[comp].pop(0)
if modes[2] == '0': program[program[pos+1]] = inp
if modes[2] == '1': program[pos+1] = inp
if modes[2] == '2': program[rel + program[pos+1]] = inp
pos += 2
if len(inputs[comp]) % 2 == 0:
poss[comp] = pos
rels[comp] = rel
break
elif opcode == '04':
out = first
outputs.append(out)
pos += 2
if len(outputs) == 3:
if outputs[0] == 255: print(outputs[2]);foundit=True;break
if outputs[0] < 0 or outputs[0] > 49: print(f'address: {outputs[0]}')
inputs[outputs[0]].append(outputs[1])
inputs[outputs[0]].append(outputs[2])
outputs = []
poss[comp] = pos
rels[comp] = rel
break
elif opcode == '05':
if first == 0: pos += 3
elif first != 0: pos = second
elif opcode == '06':
if first != 0: pos += 3
elif first == 0: pos = second
elif opcode == '07':
if first < second: program[third] = 1
else: program[third] = 0
pos += 4
elif opcode == '08':
if first == second: program[third] = 1
else: program[third] = 0
pos += 4
elif opcode == '09':
rel += first
pos += 2
else: print('ERROR');break
if foundit: break
if foundit: break
| [
"scheidguy@gmail.com"
] | scheidguy@gmail.com |
aa1107a2f5107739a1951e5cb4948883dd283ca3 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/module_utils/network/checkpoint/checkpoint.py | 7545ebfe34554f4f7f9eeede21eb5bc3b8718fe6 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 2,516 | py | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
checkpoint_argument_spec = dict(auto_publish_session=dict(type='bool', default=True),
policy_package=dict(type='str', default='standard'),
auto_install_policy=dict(type='bool', default=True),
targets=dict(type='list')
)
def publish(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/publish', payload)
def discard(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/discard', payload)
def install_policy(connection, policy_package, targets):
payload = {'policy-package': policy_package,
'targets': targets}
connection.send_request('/web_api/install-policy', payload)
| [
"skydevapp@gmail.com"
] | skydevapp@gmail.com |
c21fe724fc485be315124ec386b0530e099af7b8 | 5c58587ebfbf56192b3dc6ed6f43bc002c8e2cff | /core/migrations/0032_market.py | cf05224f92656563bdf5d67622b60b565cf3d04c | [] | no_license | hossamelneily/nexchange | fb9a812cfc72ac00b90cf64d6669a8129c2d2d4b | 6d69274cd3808989abe2f5276feb772d1f0fa8b4 | refs/heads/release | 2022-12-13T09:20:47.297943 | 2019-02-12T08:20:34 | 2019-02-12T08:20:34 | 210,064,740 | 1 | 2 | null | 2022-12-09T00:54:01 | 2019-09-21T23:19:34 | Python | UTF-8 | Python | false | false | 963 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-27 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0031_merge_20170921_1946'),
]
operations = [
migrations.CreateModel(
name='Market',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('modified_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50, unique=True)),
('code', models.CharField(max_length=10, unique=True)),
('is_main_market', models.BooleanField(default=False, max_length=10)),
],
options={
'abstract': False,
},
),
]
| [
"noreply@github.com"
] | hossamelneily.noreply@github.com |
9344fa509479d8e7ecc91673c3b50f17ddea5d7d | df4fd380b3e1720a970573c4692eb0a32faf8f47 | /string_matching/rabin_karp.py | a335b90d14aa9c110503ed37eb35d99914e008e6 | [] | no_license | Taeheon-Lee/Algorithm | 99dd21e1e0ddba31190a16d6c9646a9f393f4c4b | 64ebacf24dfdf25194b5bce39f4ce43c4bc87141 | refs/heads/master | 2023-07-10T20:26:10.121214 | 2021-08-07T17:26:26 | 2021-08-07T17:26:26 | 383,803,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | "Rabin-Karp 라빈-카프 알고리즘"
# 패턴의 해시값과 본문 안에 있는 하위 문자열의 해시값만을 비교하여 탐색
# 즉 문자열 패턴을 수치로 바꾸어 문자열의 비교를 수치 비교로 전환해 매칭하는 방법
# 해싱은 인덱스만 계산하면 바로 값을 참조할 수 있기 떄문에 연산 속도가 O(1)로 매우 빠름
# 해싱 값은 문자열이 달라도 같을 수 있기 때문에 해싱 값이 같을 경우, 단순 비교를 시작
# 따라서 최악의 경우, 해싱이 모두 같고 매칭이 다른 경우가 발생할 수 있어 시간 복잡도가 O(mn)
# 평균적인 시간 복잡도는 선형에 가까운 O(m+n)으로 매우 빠름
def rabin_karp(T, P):
n = len(T) # 본문의 길이
m = len(P) # 패턴의 길이
hash_p = hash(P) # 패턴의 해시 값
for i in range(n+1-m): # 처음부터 탐색 시작
if hash_p == hash(T[i:i+m]): # 해시값이 서로 같은 경우, 단순 비교 시작
cnt = 0 # 매칭을 위한 인덱스 초기화
while cnt != m: # 매칭 확인 시작
if P[cnt] != T[i+cnt]: # 매칭 실패 경우
break
cnt += 1
if cnt == m: # 매칭 성공 경우
return i # 처음 위치 리턴
else:
continue # 실패 시 다음으로 넘어가 탐색
return -1 # 탐색 실패
P = input("Input pattern: ") # 패턴 입력
T = input("Input String: ") # 본문(string) 입력
ans = rabin_karp(T, P)
print("Matching fail" if ans == -1 else ans) | [
"taeheon714@gmail.com"
] | taeheon714@gmail.com |
5fd492e636197ff8865032922e0ce3ac8d9b7f52 | 8d213a21ac532d6713f1239449ffc08497a77476 | /drf_api/wsgi.py | d73b8908123d55b0bf0f1c22f994774fe1a86fe5 | [] | no_license | anykate/drf_api | 3453c2a27a3d7ab8e1560848d84dd6f7b985d5ec | 7fe9c7c2950ae0a84f6a9f9d33a9f2dccd723560 | refs/heads/master | 2020-09-04T03:32:44.440439 | 2019-11-05T03:22:08 | 2019-11-05T03:22:08 | 219,648,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for drf_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf_api.settings')
application = get_wsgi_application()
| [
"aryamane.aniket@gmail.com"
] | aryamane.aniket@gmail.com |
6fb44519ef839fdb96140c65a58683b9d72f9322 | 9b7291d81a416bde2ec181229601eb2e33c7b8b2 | /monophoton/configs/TagAndProbe2016/trigger.py | b9028479a55860739488b7b49c1131b654df69a0 | [] | no_license | MiT-HEP/MonoX | ab1528e72dad2590a0ae64f1a1d47195139e1749 | 224ee01107a94cedf8563c497edb2f326b99d9b1 | refs/heads/master | 2021-01-24T06:04:16.645559 | 2019-11-15T09:18:40 | 2019-11-15T09:18:40 | 41,823,403 | 1 | 9 | null | 2018-07-19T17:05:30 | 2015-09-02T19:33:33 | Python | UTF-8 | Python | false | false | 7,143 | py | import ROOT
ROOT.gSystem.Load('libPandaTreeObjects.so')
e = ROOT.panda.Event
ROOT.gROOT.ProcessLine('int val;')
def getEnum(cls, name):
ROOT.gROOT.ProcessLine('val = panda::' + cls + '::TriggerObject::' + name + ';')
return ROOT.val
measurements = {
('photon', 'sel'): ('sel-16b-m', 'tpegLowPt', 'probes.medium && !probes.pixelVeto && tp.mass > 60. && tp.mass < 120. && TMath::Abs(TVector2::Phi_mpi_pi(probes.phi_ - tags.phi_)) > 0.6', 'probes'),
('photon', 'selBCD'): (['sel-16b-m', 'sel-16c-m', 'sel-16d-m'], 'tpegLowPt', 'probes.medium && !probes.pixelVeto && tp.mass > 60. && tp.mass < 120. && TMath::Abs(TVector2::Phi_mpi_pi(probes.phi_ - tags.phi_)) > 0.6 && runNumber < 276525', 'probes'), # for photon75
('photon', 'dy'): (['dy-50@', 'dy-50-*'], 'tpegLowPt', 'probes.medium && !probes.pixelVeto && tp.mass > 60. && tp.mass < 120. && TMath::Abs(TVector2::Phi_mpi_pi(probes.phi_ - tags.phi_)) > 0.6', 'probes'),
('photon', 'elmu'): (['smu-16*-m'], 'elmu', 'photons.mediumX[][2]', 'photons'),
('photon', 'elmuBCD'): (['smu-16b-m', 'smu-16c-m', 'smu-16d-m'], 'elmu', 'photons.mediumX[][2]', 'photons'),
('photon', 'ph75'): (['sph-16b-m', 'sph-16c-m', 'sph-16d-m'], 'ph75', 'photons.medium && HLT_Photon50 && runNumber < 276525', 'photons'),
('photon', 'ph75h'): (['sph-16b-m', 'sph-16c-m', 'sph-16d-m'], 'ph75', 'photons.medium && HLT_Photon75 && runNumber < 276525', 'photons'),
('photon', 'mcph75'): (['gj04-*'], 'ph75', 'photons.medium && HLT_Photon50', 'photons'),
('electron', 'sel'): ('sel-16*-m', 'tp2e', 'probes.tight && tp.mass > 60. && tp.mass < 120.', 'probes'),
('muon', 'smu'): ('smu-16*', 'tp2m', 'probes.tight && tp.mass > 60. && tp.mass < 120.', 'probes'),
('vbf', 'selBCD'): (['sel-16b-m', 'sel-16c-m', 'sel-16d-m'], 'vbfe', 'electrons.triggerMatch[][%d] && dijet.size > 0 && runNumber < 276525' % getEnum('Electron', 'fEl75EBR9IsoPh'), ''),
('vbf', 'ph75h'): (['sph-16b-m', 'sph-16c-m', 'sph-16d-m'], 'ph75', 'photons.triggerMatch[][%d] && dijet.size > 0 && runNumber < 276525' % getEnum('Photon', 'fPh75EBR9Iso'), ''),
('vbf', 'dy'): (['dy-50@*', 'dy-50-*'], 'vbfe', 'electrons.triggerMatch[][%d] && dijet.size > 0' % getEnum('Electron', 'fEl75EBR9IsoPh'), ''),
('vbf', 'mcph75h'): (['gj04-*'], 'ph75', 'photons.triggerMatch[][%d] && dijet.size > 0' % getEnum('Photon', 'fPh75EBR9Iso'), ''),
('vbf', 'wlnu'): (['wlnu-*'], 'vbfe', 'electrons.triggerMatch[][%d] && dijet.size > 0' % getEnum('Electron', 'fEl75EBR9IsoPh'), '')
}
confs = {
'photon': {
'l1eg40': ('{col}.triggerMatch[][%d]' % getEnum('Photon', 'fSEG34IorSEG40'), '', 'L1 seed', {
'pt': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', (50, 0., 100.)),
'ptwide': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', [30. + 5. * x for x in range(14)] + [100. + 10. * x for x in range(10)] + [200. + 20. * x for x in range(5)] + [300., 350., 400.]),
'hOverE': ('H/E', '{col}.hOverE', '{col}.pt_ > 175.', (25, 0., 0.05)),
'hcalE': ('E^{HCAL} (GeV)', '{col}.pt_ * TMath::CosH({col}.eta_) * {col}.hOverE', '{col}.pt_ > 175.', (25, 0., 5))
}),
'l1all': ('{col}.triggerMatch[][%d] || {col}.triggerMatch[][%d] || {col}.triggerMatch[][%d]' % (getEnum('Photon', 'fSEG34IorSEG40'), getEnum('Photon', 'fSEG40IorSJet200'), getEnum('Photon', 'fSEG34IorSEG40IorSJet200')), '', 'L1 seed', {
'pt': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', (50, 0., 100.)),
'ptwide': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', [30. + 5. * x for x in range(14)] + [100. + 10. * x for x in range(10)] + [200. + 20. * x for x in range(5)] + [300., 350., 400.]),
'hOverE': ('H/E', '{col}.hOverE', '{col}.pt_ > 175.', (25, 0., 0.05)),
'hcalE': ('E^{HCAL} (GeV)', '{col}.pt_ * TMath::CosH({col}.eta_) * {col}.hOverE', '{col}.pt_ > 175.', (25, 0., 5))
}),
'sph165abs': ('{col}.triggerMatch[][%d]' % getEnum('Photon', 'fPh165HE10'), '', 'L1&HLT', {
'pt': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', [30. + 5. * x for x in range(14)] + [100. + 10. * x for x in range(10)] + [200. + 20. * x for x in range(5)] + [300. + 50. * x for x in range(10)]),
'ptzoom': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '', [30. + 5. * x for x in range(34)] + [200. + 15. * x for x in range(11)]),
'hOverE': ('H/E', '{col}.hOverE', '{col}.pt_ > 175.', (25, 0., 0.05)),
'hcalE': ('E^{HCAL} (GeV)', '{col}.pt_ * TMath::CosH({col}.eta_) * {col}.hOverE', '{col}.pt_ > 175.', (25, 0., 5)),
'run': ('Run', 'runNumber', '{col}.pt_ > 175.', (26, 271050., 284050.))
}),
'ph75r9iso': ('{col}.triggerMatch[][%d]' % getEnum('Photon', 'fPh75EBR9Iso'), '{col}.isEB', 'Photon75Iso40R9', {
'pt': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '{col}.r9 > 0.9', (50, 0., 100.)),
'ptwide': ('p_{T}^{#gamma} (GeV)', '{col}.pt_', '{col}.r9 > 0.9', [30. + 10. * x for x in range(7)] + [100., 120., 140., 160., 200., 300., 400., 600.]),
'r9': ('R^{9}', '{col}.r9', '{col}.pt_ > 80.', (30, 0.7, 1.))
})
},
'electron': {
'el27': ('{col}.triggerMatch[][%d]' % getEnum('Electron', 'fEl27Tight'), '', 'HLT', {
'ptzoom': ('p_{T}^{e} (GeV)', '{col}.pt_', '', (50, 0., 50.)),
'ptwide': ('p_{T}^{e} (GeV)', '{col}.pt_', '', [30. + 2. * x for x in range(85)] + [200. + 10. * x for x in range(10)]),
'hOverE': ('H/E', '{col}.hOverE', '{col}.pt_ > 200.', (25, 0., 0.05)),
'hcalE': ('E^{HCAL} (GeV)', '{col}.pt_ * TMath::CosH({col}.eta_) * {col}.hOverE', '{col}.pt_ > 200.', (25, 0., 5)),
'run': ('Run', 'runNumber', '{col}.pt_ > 200.', (350, 271000., 274500.)),
'pt': ('p_{T}^{e} (GeV)', '{col}.pt_', '', [0. + 5. * x for x in range(10)] + [50. + 10. * x for x in range(6)]),
'eta': ('#eta^{e}', '{col}.eta_', '{col}.pt_ > 50.', (25, -2.5, 2.5))
})
},
'muon': {
'mu24ortrk24': ('{col}.triggerMatch[][%d] || {col}.triggerMatch[][%d]' % (getEnum('Muon', 'fIsoMu24'), getEnum('Muon', 'fIsoTkMu24')), '', 'HLT', {
'ptzoom': ('p_{T}^{#mu} (GeV)', '{col}.pt_', '', (50, 0., 50.)),
'ptwide': ('p_{T}^{#mu} (GeV)', '{col}.pt_', '', [30. + 2. * x for x in range(85)] + [200. + 10. * x for x in range(10)]),
'run': ('Run', 'runNumber', '{col}.pt_ > 200.', (350, 271000., 274500.)),
'pt': ('p_{T}^{#mu} (GeV)', '{col}.pt_', '', [0. + 5. * x for x in range(10)] + [50. + 10. * x for x in range(6)])
})
},
'vbf': {
'vbf': ('HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_VBF', '', 'VBF filter', {
'dEtajj': ('|#Delta#eta_{jj}|', 'Max$(TMath::Abs(dijet.dEtajj * (dijet.mjj > 800.)))', 'Sum$(dijet.mjj > 500) != 0', (50, 0., 5.)),
'mjj': ('m_{jj} (GeV)', 'Max$(TMath::Abs(dijet.mjj * (TMath::Abs(dijet.dEtajj) > 3.2)))', 'Sum$(TMath::Abs(dijet.dEtajj) > 3.2) != 0', (50, 0., 1000.))
})
}
}
# TTree output for fitting
fitconfs = {}
fitconfs['photon'] = []
fitconfs['electron'] = [
('ptzoom', 'el27')
]
fitconfs['muon'] = []
fitconfs['vbf'] = []
| [
"yiiyama@mit.edu"
] | yiiyama@mit.edu |
6627c39817bf5dff8482e5da4e684c51071c774c | 80e36c723d26fef80892a684a5987295e1dbd48c | /library/forms.py | 6e5c194fa078d5e8c738dff33f835dd95c7f4495 | [
"LicenseRef-scancode-public-domain"
] | permissive | xritzx/WordFruit | 9f4ef8b6fe4774d3ca4e9ea2ceff83ece99c6f3b | 04dd9539c5c7fb57a40ceb5d02b76f8f95c52ae6 | refs/heads/master | 2022-12-13T16:03:51.388925 | 2019-06-06T06:15:22 | 2019-06-06T06:15:22 | 168,710,999 | 4 | 3 | null | 2022-12-08T01:34:48 | 2019-02-01T14:39:55 | CSS | UTF-8 | Python | false | false | 189 | py | from django.forms import ModelForm
from .models import Book
class BookAddForm(ModelForm):
class Meta:
model = Book
exclude = ['contributor', 'date', 'read', 'likes']
| [
"ritankarpaul47@gmail.com"
] | ritankarpaul47@gmail.com |
52d0ac2b85718c3c146720e8f651230dfff0cc2c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /NhPYFqfQcFXWvdH8t_6.py | d9f026bff80b989b0ba365c2911524b73da7508c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | """
A positive integer multiplied times its inverse is always equal to 1:
`17*(1/17)==1`. Modular arithmetic has a similar inverse function, although,
for modulus `m`, we are confined to integers from 0 to m-1. The modular
multiplicative inverse of 3 modulus 5 is equal to 2 because `(3*2)%5==1`.
Another example: the modular inverse of 17 modulus 1000007 is equal to 58824
because `(17*58824)%1000007==1`. The modular inverse, if it exists, must
always be in the range 0 to m-1.
Create a function that has arguments integer `n` and modulus `m`. The function
will return the modular inverse of `n` mod `m`. If the modular inverse does
not exist, return `False`.
### Examples
mod_inv(2, 3) ➞ 2
mod_inv(12, 47) ➞ 4
mod_inv(11, 33) ➞ False
mod_inv(55, 678) ➞ 37
mod_inv(81, 3455) ➞ 2346
### Notes
* Some of the test cases have rather large integers, so if you attempt to do a brute force search of the entire modular field, you may not be successful due to the 12 second time limit imposed by the server. See **Resources** for a more efficient approach.
* The modular inverse of a number `n` modulus `m` exists only if `n` and `m` are coprime (i.e. they have no common factors other than 1).
* One practical use of modular inverse is in public-key cryptography like RSA where it can be used to determine the value of the private key.
"""
def gcd_ex(a, b):
x, pre_x, y, pre_y, r, pre_r = 0, 1, 1, 0, a, b
while r != 0:
q = pre_r // r
pre_r, r = r, pre_r - q * r
pre_x, x = x, pre_x - q * x
pre_y, y = y, pre_y - q * y
return (pre_r, pre_x, pre_y)
def mod_inv(n, m):
gcd, x, y = gcd_ex(n, m)
return (y if y >= 0 else m + y) if gcd == 1 else False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b77eadfb20bc641f8047bfe61ca0392ebcd0f42d | f439d2e77582a747957df6ff6e102df91b8a16d3 | /examples/nlp/lstm_generator_textfile.py | 0c0399ed1f4628d5d8524f0a780eba7e0bb936c8 | [
"MIT"
] | permissive | NLPDev/tflearn | 6bba8a0e811d465c008511ef2946d183c996d0bf | 77436978c62124bd91ef739dc77c9ea58277c779 | refs/heads/master | 2020-04-09T13:20:36.113533 | 2018-12-07T15:46:33 | 2018-12-07T15:46:33 | 160,370,136 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,976 | py | from __future__ import absolute_import, division, print_function
import os, sys, argparse
import urllib
import tflearn
from tflearn.data_utils import *
parser = argparse.ArgumentParser(description=
'Pass a text file to generate LSTM output')
parser.add_argument('filename')
parser.add_argument('-t','--temp', help=
'Defaults to displaying multiple temperature outputs which is suggested.' +
' If temp is specified, a value of 0.0 to 2.0 is recommended.' +
' Temperature is the novelty or' +
' riskiness of the generated output. A value closer to 0 will result' +
' in output closer to the input, so higher is riskier.',
required=False, nargs=1, type=float)
parser.add_argument('-l','--length', help=
'Optional length of text sequences to analyze. Defaults to 25.',
required=False, default=25, nargs=1, type=int)
args = vars(parser.parse_args())
path = args['filename']
if args['temp'] and args['temp'][0] is not None:
temp = args['temp'][0]
print("Temperature set to", temp)
if temp > 2 or temp < 0:
print("Temperature out of suggested range. Suggested temp range is 0.0-2.0")
else:
print("Will display multiple temperature outputs")
if args['length'] is not 25:
maxlen = args['length'][0] # default 25 is set in .add_argument above if not set by user
print("Sequence max length set to ", maxlen)
else:
maxlen = args['length']
model_name=path.split('.')[0] # create model name from textfile input
if not os.path.isfile(path):
print("Couldn't find the text file. Are you sure the you passed is correct?")
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_'+ model_name)
for i in range(50):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id=model_name)
print("-- TESTING...")
if args['temp'] is not None:
temp = args['temp'][0]
print("-- Test with temperature of %s --" % temp)
print(m.generate(600, temperature=temp, seq_seed=seed))
else:
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
| [
"vasile123andronic@gmail.com"
] | vasile123andronic@gmail.com |
44e793c1248ef2e36ba33257f2adbf511b590309 | 37d8802ecca37cc003053c2175f945a501822c82 | /09-动态规划/0062-不同路径-2.py | 7c1a0f8ea366b820befe2445e2814772c7343155 | [
"Apache-2.0"
] | permissive | Sytx74/LeetCode-Solution-Python | cc0f51e31a58d605fe65b88583eedfcfd7461658 | b484ae4c4e9f9186232e31f2de11720aebb42968 | refs/heads/master | 2020-07-04T18:17:24.781640 | 2019-07-30T03:34:19 | 2019-07-30T03:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dp = [1] * n
for i in range(1, m):
for j in range(1, n): # 从索引 2 开始走就行了
dp[j] = dp[j] + dp[j - 1]
return dp[-1]
if __name__ == '__main__':
s = Solution()
res = s.uniquePaths(5, 4)
print(res)
| [
"121088825@qq.com"
] | 121088825@qq.com |
431bc242a4cad6bd666fd73d507d2df2e74fb34f | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/z3c.form-2.4.3-py2.7.egg/z3c/form/adding.py | 5cee0932d4335930114a159e520d1c93762c90c9 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | ##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Implementation of an addform for IAdding
$Id: adding.py 76841 2007-06-20 13:09:18Z srichter $
"""
__docformat__ = "reStructuredText"
from z3c.form import form
class AddForm(form.AddForm):
"""An addform for the IAdding interface."""
def add(self, object):
ob = self.context.add(object)
self._finishedAdd = True
return ob
def nextURL(self):
return self.context.nextURL()
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
8c9bec14ad6067364063355bda91fc6dd73ef21e | 82d6b24fb786143b07e897d4bbe70f2c1d5fc481 | /hsvTrackbarColorDetection.py | a44002da3b1ae0702e5c89290e7119015a5ecbf1 | [] | no_license | Pritam055/image_opencv | cded3eff39d4ef2006bef7b4a05a38a86700da2b | 63c19bb4b30e6db76b3e1d90697409acebb3ec3e | refs/heads/master | 2021-03-19T00:12:05.157661 | 2020-07-01T16:21:32 | 2020-07-01T16:21:32 | 247,113,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
frameCounter = 0
def empty(a):
pass
cv2.namedWindow("HSV")
cv2.resizeWindow("HSV", 640, 240)
cv2.createTrackbar("HUE Min", "HSV", 0, 179, empty)
cv2.createTrackbar("HUE Max", "HSV", 179, 179, empty)
cv2.createTrackbar("SAT Min", "HSV", 0, 255, empty)
cv2.createTrackbar("SAT Max", "HSV", 255, 255, empty)
cv2.createTrackbar("VALUE Min", "HSV", 0, 255, empty)
cv2.createTrackbar("VALUE Max", "HSV", 255, 255, empty)
while True:
# frameCounter += 1
# if cap.get(cv2.CAP_PROP_FRAME_COUNT) == frameCounter:
# cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# frameCounter = 0
_, img = cap.read()
imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("HUE Min", "HSV")
h_max = cv2.getTrackbarPos("HUE Max", "HSV")
s_min = cv2.getTrackbarPos("SAT Min", "HSV")
s_max = cv2.getTrackbarPos("SAT Max", "HSV")
v_min = cv2.getTrackbarPos("VALUE Min", "HSV")
v_max = cv2.getTrackbarPos("VALUE Max", "HSV")
print(h_min)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHsv, lower, upper)
result = cv2.bitwise_and(img, img, mask=mask)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
hStack = np.hstack([img, mask, result])
cv2.imshow('Horizontal Stacking', hStack)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"admin@gmail.com"
] | admin@gmail.com |
7b181c3b3d9e7fab86725dd12b5b8595e930051d | ecebefec65cc55b305419a689660eb8e2ea04fef | /release/virtual_player/simulation.py | 41fe28c584133cc4bcb995c6df09b94e2150affb | [] | no_license | generlist/ABRTuner | 4ab1d6d5e5201a7953d4565ca4574307a35513c3 | baea8fab155a71c185e74121a8f014e6ad889308 | refs/heads/master | 2020-05-26T01:09:29.712879 | 2018-08-03T23:40:01 | 2018-08-03T23:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,782 | py | # SIMULATION 1.0
import math
import sys
import os
import collections
from config import *
from helpers import *
from chunkMap import *
from performance_vector import *
from algorithms import *
from simulation_performance_vector import *
import numpy as np
from collections import deque
import time
from vplayer_state import State
import config
import warnings
warnings.filterwarnings('error')
def usage():
print >> sys.stderr, "Incorrect usage\nUsage: python " + sys.argv[0] + " <path to trace file>"
sys.exit(1)
if len(sys.argv) < 2:
usage()
trace_file = sys.argv[1]
if not os.path.isfile(trace_file):
print >> sys.stderr, "No such file: " + trace_file
sys.exit(1)
gp = getBolaGP()
bola_vp = getBolaVP(gp)
configs = []
if MPC_ABR:
configs = np.arange(0, 150, 10)
elif BOLA_ABR:
configs = np.arange(gp - 1.5, gp, 0.1)
elif HYB_ABR:
configs = np.arange(0.0, 1.0 ,0.03)
for param in configs:
s = State(config, trace_file)
# this while loop advances SIMULATION_STEP msec in each iteration,
# till the CLOCK exceeds the session_time_ms.
# SIMULATION_STEP is defined in config.py
while s.CLOCK < s.session_time_ms:
play_stalled_this_interval = 0
chunk_downloaded_this_interval = 0
blen_added_this_interval = 0
if DEBUG and not s.session_fully_downloaded:
s.PrintStats()
if s.CLOCK + s.interval > s.session_time_ms:
s.interval = s.session_time_ms - s.CLOCK
s.chunk_sched_time_delay = max(0, s.chunk_sched_time_delay - s.interval)
s.CLOCK += s.interval
if s.BLEN > s.min_playable_buff:
s.buffering = False
if s.buffering and not s.session_fully_downloaded:
play_stalled_this_interval = min(timeToDownloadSingleChunk(CHUNKSIZE, s.BR, s.BW, s.chunk_residue, s.CHUNKS_DOWNLOADED), s.interval / 1000.0)
if play_stalled_this_interval < s.interval / 1000.0: # chunk download so resume
s.buffering = False
if not s.session_fully_downloaded and s.chunk_sched_time_delay < s.interval:
s, param = chunksDownloaded(s, param, s.CLOCK - s.interval)
chunk_downloaded_this_interval = s.chunk_residue + s.numChunks
if play_stalled_this_interval == s.interval / 1000.0 and chunk_downloaded_this_interval >= 1.0:
s.buffering = False
s.chunk_residue = chunk_downloaded_this_interval - int(chunk_downloaded_this_interval)
if s.BLEN + chunk_downloaded_this_interval * s.CHUNKSIZE >= MAX_BUFFLEN: # can't download more than the MAX_BUFFLEN
chunk_downloaded_this_interval = int(MAX_BUFFLEN - s.BLEN) / CHUNKSIZE
s.chunk_residue = 0
if s.CHUNKS_DOWNLOADED + int(chunk_downloaded_this_interval) >= math.ceil((s.play_time_ms) / (CHUNKSIZE * 1000.0)):
chunk_downloaded_this_interval = math.ceil((s.play_time_ms) / (CHUNKSIZE * 1000.0)) - s.CHUNKS_DOWNLOADED
s.clock_inc = s.CLOCK - s.last_clock_val
s.last_clock_val = s.CLOCK
if s.numChunks > 0:
s.realBR = getRealBitrate(s.BR, s.CHUNKS_DOWNLOADED, CHUNKSIZE) / (CHUNKSIZE * 1000.0)
if chunk_downloaded_this_interval != 0 and s.numChunks > 0 and int(chunk_downloaded_this_interval) != 1 and s.last_chd_interval != 0 and s.last_chd_interval < chunk_downloaded_this_interval:
s.q.append((s.realBR * CHUNKSIZE * s.numChunks) / (s.clock_inc / 1000.0))
if s.CLOCK % 100 == 0:
s.player_visible_bw.append(np.mean(s.q))
s.last_chd_interval = chunk_downloaded_this_interval
s.CHUNKS_DOWNLOADED += int(chunk_downloaded_this_interval)
s.ATTEMPT_ID += int(chunk_downloaded_this_interval)
blen_added_this_interval = int(chunk_downloaded_this_interval) * CHUNKSIZE
if not s.buffering and s.BLEN - s.min_playable_buff >= 0 and s.BLEN - s.min_playable_buff + blen_added_this_interval < s.interval / 1000.0 and not s.session_fully_downloaded:
play_stalled_this_interval += (s.interval / 1000.0 - (float(s.BLEN) - s.min_playable_buff + float(blen_added_this_interval)) )
s.buffering = True
if not s.first_chunk:
s.BUFFTIME += float(play_stalled_this_interval)
s.PLAYTIME += s.interval / 1000.0 - play_stalled_this_interval
if s.first_chunk and s.CHUNKS_DOWNLOADED >= 1:
s.first_chunk = False
if s.buffering:
s.BLEN = s.min_playable_buff
elif not s.buffering and s.first_chunk and s.CHUNKS_DOWNLOADED == 0:
s.BLEN = max(0, float(s.BLEN) - s.interval / 1000.0)
else:
s.BLEN = max(0, float(s.CHUNKS_DOWNLOADED) * float(s.CHUNKSIZE) - float(s.PLAYTIME)) # else update the bufferlen to take into account the current time step
if s.CHUNKS_DOWNLOADED >= TOTAL_CHUNKS or s.CHUNKS_DOWNLOADED >= math.ceil((s.play_time_ms) / (s.CHUNKSIZE * 1000.0)):
s.session_fully_downloaded = True
break
if not s.first_chunk and not s.session_fully_downloaded and s.oldBR != s.BR:
s.numSwitches += 1
s.BW = max(interpolateBWInterval(s.CLOCK, s.used_bw_array, s.bw_array), 0.01) # interpolate bandwidth for the next heartbeat interval
s.used_bw_array.append(s.BW) # save the bandwidth used in the session
# account for the accumulated buffer
if s.BLEN > 0:
s.PLAYTIME += s.BLEN
# obtain stats to print
s.AVG_SESSION_BITRATE, s.REBUF_RATIO, s.rebuf_groundtruth = generateStats(s.AVG_SESSION_BITRATE, s.BUFFTIME, s.PLAYTIME, s.bufftimems, s.play_time_ms)
print s.trace_file + " param: "+str(param)+" minCell: "+str(s.minCellSize)+" QoE: " + str(s.maxQoE) + " avg. bitrate: " + str(s.AVG_SESSION_BITRATE) + " buf. ratio: " + str(s.REBUF_RATIO) +" playtime: " + str(s.PLAYTIME) +" buftime: " + str(s.BUFFTIME)
| [
"zahaib.akhtar@gmail.com"
] | zahaib.akhtar@gmail.com |
d82b95343bfad8ecb00fb1488a773f0a45b56461 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/torch/legacy/nn/MSECriterion.py | a93d045d898a3dc9560df362a1571c9a4ec7de11 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 1,082 | py | import torch
from .Criterion import Criterion
class MSECriterion(Criterion):
def __init__(self, sizeAverage=True):
super(MSECriterion, self).__init__()
self.sizeAverage = sizeAverage
self.output_tensor = None
def updateOutput(self, input, target):
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.MSECriterion_updateOutput(
self._backend.library_state,
input,
target,
self.output_tensor,
self.sizeAverage,
True, # reduce
)
self.output = self.output_tensor[0].item()
return self.output
def updateGradInput(self, input, target):
implicit_gradOutput = torch.Tensor([1]).type(input.type())
self._backend.MSECriterion_updateGradInput(
self._backend.library_state,
input,
target,
implicit_gradOutput,
self.gradInput,
self.sizeAverage,
True, # reduce
)
return self.gradInput
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw |
dfcf03607d0d721031a3b8bef63a7511114a9ca0 | 836f2095d5ac8a200fc4b19c2644c8f693612d23 | /src/preprocess.py | 25f880dacaaeded768f7cc037a5e26423e60fa4b | [] | no_license | peternara/VL-T5-embeding-image-to-text-mulit-modal | bf62e910dcc8d89606099a8b15065def917e9349 | 1902413ade01fb6f032c1cdbec65aaa41313277a | refs/heads/main | 2023-05-01T21:45:10.682026 | 2021-05-25T15:49:57 | 2021-05-25T15:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,560 | py | import torch
import numpy as np
import random
from copy import deepcopy
def corrupt_spans(text, mask_ratio=0.15, prefix=None):
"""Masked Language Modeling with corrupted span prediction
Args:
text
Returns:
source_text (masked_text)
target_text
Ex) (in vocab ids)
input
In this tutorial, we’ll explore how to preprocess your data using Transformers. The main tool for this is what we call a tokenizer.
masked_text
<extra_id_0> this tutorial, we’ll explore how to preprocess your data <extra_id_1> Transformers. The main tool for this is what <extra_id_2> call a tokenizer.
target_text
"""
tokens = text.split()
n_tokens = len(tokens)
n_mask = int(max(mask_ratio * n_tokens, 1))
mask_indices = torch.randperm(n_tokens)[:n_mask].sort().values
assert len(mask_indices) > 0, text
mask_indices = mask_indices.tolist()
span = [mask_indices[0], mask_indices[0]+1]
spans = []
for i, mask_index in enumerate(mask_indices):
# if current mask is not the last one & the next mask is right after current mask
if i < len(mask_indices) - 1 and mask_indices[i+1] == mask_index + 1:
contiguous = True
else:
contiguous = False
if contiguous:
span[1] += 1
else:
# non contiguous -> output current span
spans.append(span)
# if current mask is not the last one -> create next span
if i < len(mask_indices) - 1:
span = [mask_indices[i+1], mask_indices[i+1]+1]
masked_tokens = deepcopy(tokens)
target_tokens = []
cum_span_length = 0
for i, span in enumerate(spans):
start, end = span
masked_tokens[start-cum_span_length+i: end -
cum_span_length+i] = [f'<extra_id_{i}>']
target_tokens.append(f'<extra_id_{i}>')
target_tokens.extend(tokens[start:end])
cum_span_length += (end - start)
# target_tokens.append(f'<extra_id_{i+1}>')
# target_tokens.append(f'</s>')
masked_text = " ".join(masked_tokens)
if prefix is None:
source_text = masked_text
else:
source_text = f"{prefix} {masked_text}"
target_text = " ".join(target_tokens)
return source_text, target_text
def corrupt_bart(input_text, mask_ratio=0.30, prefix="denoise text:"):
"""BART-style Masked Language Modeling with corrupted span prediction
Args:
text
Returns:
source_text (masked_text)
target_text
Ex) (in vocab ids)
input
In this tutorial, we’ll explore how to preprocess your data using Transformers. The main tool for this is what we call a tokenizer.
masked_text
denoise text: In <mask> we’ll explore how to preprocess your data <mask> Transformers. <mask> main <mask> for this is what we <mask> a tokenizer.
target_text
same is input text
"""
tokens = input_text.split()
n_tokens = len(tokens)
n_mask = int(max(mask_ratio * n_tokens, 1))
mask_indices = torch.randperm(n_tokens)[:n_mask].sort().values
assert len(mask_indices) > 0, input_text
mask_indices = mask_indices.tolist()
span = [mask_indices[0], mask_indices[0]+1]
spans = []
for i, mask_index in enumerate(mask_indices):
# if current mask is not the last one & the next mask is right after current mask
if i < len(mask_indices) - 1 and mask_indices[i+1] == mask_index + 1:
contiguous = True
else:
contiguous = False
if contiguous:
span[1] += 1
else:
# non contiguous -> output current span
spans.append(span)
# if current mask is not the last one -> create next span
if i < len(mask_indices) - 1:
span = [mask_indices[i+1], mask_indices[i+1]+1]
masked_tokens = deepcopy(tokens)
cum_span_length = 0
for i, span in enumerate(spans):
start, end = span
masked_tokens[start-cum_span_length +
i: end-cum_span_length+i] = ['<mask>']
cum_span_length += (end - start)
masked_text = " ".join(masked_tokens)
if prefix is None:
source_text = masked_text
else:
source_text = f"{prefix} {masked_text}"
target_text = input_text
return source_text, target_text
def ground_caption(captions, n_ground=1, prefix="describe visual inputs:", sort=True):
"""
For VG
Args:
captions
n_ground
Returns:
source_text
target_text
Ex) (in vocab ids)
captions
['Yellow banana', 'red crayon', 'black cow', 'blue sky']
n_ground > 1
ground_indices
[1, 0, 2]
source_text
describe visual inputs: <vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2>
target_text
<extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow
n_ground == 1
source_text
describe visual inputs: <vis_extra_id_1>
target_text
red crayon
"""
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if n_ground == 1:
idx = ground_indices[0]
source_text.append(f'<vis_extra_id_{idx}>')
target_text.append(f'{captions[idx]}')
else:
for j, idx in enumerate(ground_indices):
source_text.append(f'<vis_extra_id_{idx}>')
target_text.append(f'<extra_id_{j}>')
target_text.append(f'{captions[idx]}')
# target_text.append('</s>')
source_text = " ".join(source_text)
target_text = " ".join(target_text)
# return ground_indices, source_text, target_text
return source_text, target_text
def refer_expression(captions, n_ground=1, prefix="refer expressions:", sort=True):
"""
n_ground > 1
ground_indices
[1, 0, 2]
source_text
refer expressions: <extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow
target_text
<vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2>
n_ground == 1
source_text
refer expressions: red crayon
target_text
<vis_extra_id_1>
"""
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if n_ground == 1:
idx = ground_indices[0]
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
else:
for j, idx in enumerate(ground_indices):
source_text.append(f'<extra_id_{j}>')
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
# target_text.append('</s>')
source_text = " ".join(source_text)
target_text = " ".join(target_text)
# return ground_indices, source_text, target_text
return source_text, target_text
| [
"heythisischo@gmail.com"
] | heythisischo@gmail.com |
bdc874a8f0c2d822cf43731e1ed0437f025b91f7 | 16385e10f6ad05b8147517daf2f40dbdda02617c | /site-packages/cs.metrics-15.4.0.2-py2.7.egg/cs/metrics/updates/v15_2_0_15/__init__.py | 3fe53ff954cbc858516882f09d4b8a7e3f041ebb | [] | no_license | prachipainuly-rbei/devops-poc | 308d6cab02c14ffd23a0998ff88d9ed0420f513a | 6bc932c67bc8d93b873838ae6d9fb8d33c72234d | refs/heads/master | 2020-04-18T01:26:10.152844 | 2019-02-01T12:25:19 | 2019-02-01T12:25:19 | 167,118,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,759 | py | #!/usr/bin/env powerscript
# -*- mode: python; coding: iso-8859-1 -*-
#
# Copyright (C) 1990 - 2014 CONTACT Software GmbH
# All rights reserved.
# http://www.contact.de/
#
from __future__ import unicode_literals
__revision__ = "$Id: __init__.py 154332 2017-02-21 14:43:33Z khi $"
class AddServiceUsers(object):
"""
Add the qcaggregationengine and qccomputationengine users with his standard assignments. We have
to use an update script because role assignments are usually not
updated automatically.
"""
def run(self):
from cdb.comparch import modules
from cdb.comparch import content
from cdb import sqlapi
for login, password in [('qcaggregationengine', '$pbkdf2-sha256$29000$0hqDkNJaS4nxvrd2LsVYyw$ydktQewvlXlJbIKuVfGSxbqqVBfrgKkTMMODNYeWHbc'),
('qccomputationengine', '$pbkdf2-sha256$29000$LiUk5HyPUWrtPcfYO.d8rw$s9lNyEJ8IA9sS4ANySTZiICx.w1jmI6Eyim2zl4gf9s')]:
user = sqlapi.RecordSet2("angestellter",
"personalnummer='%s'" % login)
if not user:
m = modules.Module.ByKeys('cs.metrics')
for rel, key in [('angestellter', 'personalnummer'),
('cdb_global_subj', 'subject_id')]:
content_filter = content.ModuleContentFilter([rel])
mc = modules.ModuleContent(m.module_id, m.std_conf_exp_dir, content_filter)
for mod_content in mc.getItems(rel).values():
if mod_content.getAttr(key) == login:
try:
mod_content.insertIntoDB()
user = sqlapi.RecordSet2("angestellter",
"personalnummer='%s'" % login)
except Exception:
# Already there
pass
# The component architecture does not transport the password
if user and (not user[0].password or user[0].password == password):
import cdbwrapc
import os
new_pw = cdbwrapc.get_crypted_password(login, os.urandom(32))
user[0].update(password=new_pw)
class AddServiceUserOptions(object):
"""
Add the new mandatory service options for the service
cs.metrics.qc_engine.QCAggregationEngine and cs.metrics.qc_engine.QCComputationEngine
"""
def run(self):
from cdb import sqlapi
svc_names_and_login_pairs = [("cs.metrics.qc_engine.QCAggregationEngine",
"qcaggregationengine"),
("cs.metrics.qc_engine.QCComputationEngine",
"qccomputationengine")]
for svcname, login in svc_names_and_login_pairs:
svcs = sqlapi.RecordSet2("cdbus_svcs", "svcname='{}'".format(svcname))
for svc in svcs:
cond = "svcid='%s'" % sqlapi.quote(svc.svcid)
opt_names = [svc_opt.name
for svc_opt in sqlapi.RecordSet2("cdbus_svcopts", cond)]
new_opts = {
"--user": login
}
for name, val in new_opts.items():
if name not in opt_names:
sqlapi.Record("cdbus_svcopts",
svcid=svc.svcid,
name=name,
value=val).insert()
pre = []
post = [AddServiceUsers, AddServiceUserOptions]
if __name__ == "__main__":
AddServiceUsers().run()
AddServiceUserOptions().run()
| [
"PPR4COB@rbeigcn.com"
] | PPR4COB@rbeigcn.com |
197cc624f47f8f44191b93e8603750c75b54ad13 | 6f2ae51bfa26f58cf0eccad67a563dc91f87e0ac | /oop18 (quick tips).py | 62cd87d8ff9bd42976ba810c827c9dfcd799132c | [
"MIT"
] | permissive | nicholaskarlson/Object-Oriented-Programming-in-Python | c830e9dc86df72ee6fbfd017cf05198e4664e2b6 | 9a51892a99f9920c20c3abf8342a060af94305e6 | refs/heads/master | 2022-12-03T03:27:12.282726 | 2020-08-22T12:17:27 | 2020-08-22T12:17:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # OOP quick tips
# Tip 1
# printing the children classes of Parent class.
# Parent classes
class Father:
def __init__(self):
value=0
def update(self):
value+=1
def renew(self):
value=0
def show(self):
print(value)
class Mother:
def __init__(self):
value=1
def update(self):
value-=1
def renew(self):
value=0
def show(self):
print(value)
# Children classes
class Child_1(Father):
def update(self):
value+=2
class Child_2(Mother):
def update(self):
value-=2
# the main function.
def interiors(*classx):
subclasses=set()
work=[*classx]
while work:
parent=work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
print(interiors(Father,Mother))
| [
"ahammadshawki8@gmail.com"
] | ahammadshawki8@gmail.com |
b5aecc91b76a83f848608b2e1b3114abc8dd616c | b5921afe6ea5cd8b3dcfc83147ab5893134a93d0 | /tl/utils/lockmanager.py | 1fa59de415bd6e96b46201e2b870ea969f38ebeb | [
"LicenseRef-scancode-other-permissive"
] | permissive | techdragon/tl | aaeb46e18849c04ad436e0e786401621a4be82ee | 6aba8aeafbc92cabdfd7bec11964f7c3f9cb835d | refs/heads/master | 2021-01-17T16:13:18.636457 | 2012-11-02T10:08:10 | 2012-11-02T10:08:10 | 9,296,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | # tl/utils/lockmanager.py
#
#
""" manages locks """
## basic imports
import _thread
import threading
import logging
## LockManager class
class LockManager(object):
""" place to hold locks """
def __init__(self):
self.locks = {}
def allocate(self, name):
""" allocate a new lock """
self.locks[name] = _thread.allocate_lock()
logging.debug('lockmanager - allocated %s' % name)
def get(self, name):
""" get lock """
if name not in self.locks: self.allocate(name)
return self.locks[name]
def delete(self, name):
""" delete lock """
if name in self.locks: del self.locks[name]
def acquire(self, name):
""" acquire lock """
if name not in self.locks: self.allocate(name)
logging.debug('lockmanager - *acquire* %s' % name)
self.locks[name].acquire()
def release(self, name):
""" release lock """
logging.debug('lockmanager - *releasing* %s' % name)
try: self.locks[name].release()
except RuntimeError: pass
## RLockManager class
class RLockManager(LockManager):
def allocate(self, name):
""" allocate a new lock """
self.locks[name] = threading.RLock()
logging.debug('lockmanager - allocated RLock %s' % name)
## global lockmanagers
lockmanager = LockManager()
rlockmanager = RLockManager()
| [
"feedbackflow@gmail.com"
] | feedbackflow@gmail.com |
4fd7d2bf8eaab733ceafeabe6a8c3ece9b28126e | e641bd95bff4a447e25235c265a58df8e7e57c84 | /third_party/blink/renderer/modules/webcodecs/DEPS | f79eda8dda151643478fa884e467ea8d8ff57eff | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | zaourzag/chromium | e50cb6553b4f30e42f452e666885d511f53604da | 2370de33e232b282bd45faa084e5a8660cb396ed | refs/heads/master | 2023-01-02T08:48:14.707555 | 2020-11-13T13:47:30 | 2020-11-13T13:47:30 | 312,600,463 | 0 | 0 | BSD-3-Clause | 2022-12-23T17:01:30 | 2020-11-13T14:39:10 | null | UTF-8 | Python | false | false | 942 | include_rules = [
"+base/threading/thread_task_runner_handle.h",
"+components/viz/common/gpu/raster_context_provider.h",
"+components/viz/common/resources/single_release_callback.h",
"+gpu/command_buffer/client/shared_image_interface.h",
"+media/base",
"+media/filters",
"+media/formats/mp4/box_definitions.h",
"+media/media_buildflags.h",
"+media/mojo",
"+media/renderers",
"+media/video",
"+third_party/libyuv",
"+ui/gfx/color_space.h",
"+ui/gfx/geometry/rect.h",
"+ui/gfx/geometry/size.h",
"+ui/gfx/gpu_memory_buffer.h",
]
specific_include_rules = {
"video_track_reader_writer_test\.cc": [
"+base/run_loop.h",
],
"video_decoder_broker_test\.cc": [
"+base/run_loop.h",
"+base/threading/thread.h",
"+gpu/command_buffer/common/mailbox_holder.h",
],
"audio_decoder_broker_test\.cc": [
"+base/run_loop.h",
"+base/files/file_util.h",
],
}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
c0a00ebc3b8a77426bb463375c0e5a7233203829 | 4ac23b2633321df48a896180e6205dfc17ad5746 | /scratch09/ex04.py | b5ece5306f502f6fa8319f3759496fd815d500bf | [] | no_license | lee-saint/lab-python | f051a544ed97956f9725bb6f4a080bdc65c7e1ad | a425c173c379dda0de21eec538195ded17d31697 | refs/heads/master | 2020-11-30T12:24:12.614119 | 2019-12-27T07:35:50 | 2019-12-27T07:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | """
pandas 패키지를 사용한 csv 파일 읽기
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
file_path = os.path.join('..', 'scratch08', 'mpg.csv')
df = pd.read_csv(file_path)
print(df.head()) # 데이터프레임의 앞의 일부분 데이터 출력
print('shape:', df.shape) # 관측값 234, 변수 11
print('dtypes:', df.dtypes)
# DataFrame.dtypes: 각 컬럼(변수)의 데이터 타입
# pandas의 데이터 타입: object(문자열), float(실수), int(정수)
print(df.describe()) # 기술 요약 통계량
displ = df['displ']
print(displ)
cty = df['cty']
plt.scatter(displ, cty)
plt.show()
# DataFrame에서 행(row)을 선택할 때: df.iloc[행 번호(인덱스)], df.loc[행 레이블]
print(df.iloc[0])
print(df.iloc[0:3]) # row index 0 이상 3 미만인 행 선택
# 데이터프레임에서 여러 개의 컬럼(변수)들을 선택
cols = ['displ', 'cty', 'hwy'] # []: 리스트
print(df[cols]) # []: 인덱스 연산자
# 데이터프레임에서 여러 개의 행(관측값)과 컬럼(변수)들을 선택
# df.loc[row_labels, col_labels]: 행과 열의 레이블(이름)
# df.iloc[row_indices, col_indices]: 행과 열의 인덱스(숫자)
print(df.loc[0:3, cols])
print(df.iloc[0:3, 0:3])
| [
"plutorian131@gmail.com"
] | plutorian131@gmail.com |
9e8e3a770c962073cf83f0c6dfa97a5803dfcdfe | 90f2cbe1c940a20dcc893837b6033a51d3233931 | /Learn_Flas/flask_study/lab-4-movie/app/home/forms.py | e82f1fe123a80255955704bfb4bcbf1116930796 | [] | no_license | MaxNcu/Learn_Python | 71501f38f6442f3ff2a1de1ff685b8975e50af20 | 5a1c6edf353ed7447b2ffd4126ad7668d8c5a407 | refs/heads/master | 2022-01-15T18:56:04.814476 | 2019-07-20T03:02:02 | 2019-07-20T03:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | # -*- coding: utf-8 -*-
# @Time : 2018/8/15 0015 13:48
# @Author : Langzi
# @Blog : www.langzi.fun
# @File : forms.py
# @Software: PyCharm
import sys
reload(sys)
sys.setdefaultencoding('utf-8') | [
"982722261@qq.com"
] | 982722261@qq.com |
5c55b42e4c4e1fb43abd86684c0665b0d4446b63 | d3d53fd1fb10e3895495066c3cc7b5529dfb2e27 | /main/migrations/0007_partenaire.py | 4e88e863572660060118dbc8e3e939a601dac5f3 | [] | no_license | miyou995/msenergy | c517b05cb3b28e9bbe3a5e668990ea96951f3fb7 | 8f635d1e19f8d91bffe7490cc88e25aa9b65e410 | refs/heads/master | 2023-01-13T06:50:10.379344 | 2020-10-20T10:41:05 | 2020-10-20T10:41:05 | 299,890,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # Generated by Django 3.0.7 on 2020-07-27 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0006_contactform_date_added'),
]
operations = [
migrations.CreateModel(
name='Partenaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('logo', models.ImageField(upload_to='part/')),
('url_marque', models.URLField(blank=True)),
],
),
]
| [
"inter.taki@gmail.com"
] | inter.taki@gmail.com |
7ea82c6443323fd95423f77eaa3d686341b30664 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /KQe5w8AdSLbweW8ck_5.py | e974521972362f31814cf4c82cbb398519e4a242 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | """
Create a function that returns the characters from a list or string `r` on odd
or even positions, depending on the specifier `s`. The specifier will be
**"odd"** for items on _odd positions_ (1, 3, 5, ...) and **"even"** for items
on _even positions_ (2, 4, 6, ...).
### Examples
char_at_pos([2, 4, 6, 8, 10], "even") ➞ [4, 8]
# 4 & 8 occupy the 2nd & 4th positions
char_at_pos("EDABIT", "odd") ➞ "EAI"
# "E", "A" and "I" occupy the 1st, 3rd and 5th positions
char_at_pos(["A", "R", "B", "I", "T", "R", "A", "R", "I", "L", "Y"], "odd") ➞ ["A", "B", "T", "A", "I", "Y"]
### Notes
* Lists are zero-indexed, so, index+1 = position or position-1 = index.
* There will not be an empty string or an empty list.
* ( **Optional** ) Try solving this challenge in a single-line lambda function.
* A more advanced version of this challenge can be [found here](https://edabit.com/challenge/72KukSssxk2eHrWqx).
"""
char_at_pos=lambda r,s:(r[::2],r[1::2])['e'in s]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
51d3c030d8d2022184af7c47d538156b2dbd5e13 | 738ac84e16f206e417399e96702b04433b2f286f | /setup.py | 1713fc06cbf62f9b132418e455eae59f547c13d0 | [] | no_license | belonesox/pg2bcolz | d732f7575c061f82b0097487bce29ec0ec8d89ae | 56964fca6256a9afa3051782fadea8708a56cce4 | refs/heads/master | 2020-07-05T05:17:33.306526 | 2019-08-15T12:11:03 | 2019-08-15T12:11:03 | 202,534,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of pg2bcolz.
# https://github.com/belonesox/pg2bcolz
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2018, Stas Fomin <stas-fomin@yandex.ru>
from setuptools import setup, find_packages
from pg2bcolz import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='pg2bcolz',
version=__version__,
description='Fast and optimized loading of large bcolz from postgres DB',
long_description='''
Fast and optimized loading of large bcolz tables from postgres DB
''',
keywords='Bcolz Postgres',
author='Stas Fomin',
author_email='stas-fomin@yandex.ru',
url='https://github.com/belonesox/pg2bcolz',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
"psycopg2",
# "pandas",
"numpy",
"bcolz",
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.y+1.0' notation (this way you get bugfixes)
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# 'bcolz=bcolz.cli:main',
],
},
)
| [
"stas-fomin@yandex.ru"
] | stas-fomin@yandex.ru |
40c82ae4f2841d3d7fe415a716b29527eac584bb | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /accounts/migrations/0015_auto_20190527_1010.py | 7e0ed91e427e416ba95a9cf189e3a066bef8f203 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # Generated by Django 2.2 on 2019-05-27 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0014_user_phone_cache'),
]
operations = [
migrations.RenameField(
model_name='rgpdconsent',
old_name='allow_to_share_my_information_with_ADEME',
new_name='allow_to_share_my_information_with_ademe',
),
]
| [
"norman@xael.org"
] | norman@xael.org |
427d19c541374cbceef49ce9c683561c00b10ed6 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/worksheet/test_write_filter.py | cf9f4989a1a59d294da00df1dafaf3b81e3ff4fb | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 743 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteFilter(unittest.TestCase):
"""
Test the Worksheet _write_filter() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_filter(self):
"""Test the _write_filter() method"""
self.worksheet._write_filter('East')
exp = """<filter val="East"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
2591a5d573367c1b0e87394c9bd88b7c4525efbb | 292cec77b5003a2f80360d0aee77556d12d990f7 | /src/bentoml_cli/worker/http_dev_api_server.py | 669023f72a5ef4abf765eec7ab835a72487e137f | [
"Apache-2.0"
] | permissive | yubozhao/BentoML | 194a6ec804cc1c6dbe7930c49948b6707cbc3c5f | d4bb5cbb90f9a8ad162a417103433b9c33b39c84 | refs/heads/master | 2022-12-17T00:18:55.555897 | 2022-12-06T00:11:39 | 2022-12-06T00:11:39 | 178,978,385 | 3 | 0 | Apache-2.0 | 2020-12-01T18:17:15 | 2019-04-02T01:53:53 | Python | UTF-8 | Python | false | false | 3,932 | py | from __future__ import annotations
import socket
import click
@click.command()
@click.argument("bento_identifier", type=click.STRING, required=False, default=".")
@click.option("--fd", type=click.INT, required=True)
@click.option("--working-dir", required=False, type=click.Path(), default=None)
@click.option("--backlog", type=click.INT, default=2048)
@click.option(
"--prometheus-dir",
type=click.Path(exists=True),
help="Required by prometheus to pass the metrics in multi-process mode",
)
@click.option(
"--ssl-certfile",
type=str,
default=None,
help="SSL certificate file",
)
@click.option(
"--ssl-keyfile",
type=str,
default=None,
help="SSL key file",
)
@click.option(
"--ssl-keyfile-password",
type=str,
default=None,
help="SSL keyfile password",
)
@click.option(
"--ssl-version",
type=int,
default=None,
help="SSL version to use (see stdlib 'ssl' module)",
)
@click.option(
"--ssl-cert-reqs",
type=int,
default=None,
help="Whether client certificate is required (see stdlib 'ssl' module)",
)
@click.option(
"--ssl-ca-certs",
type=str,
default=None,
help="CA certificates file",
)
@click.option(
"--ssl-ciphers",
type=str,
default=None,
help="Ciphers to use (see stdlib 'ssl' module)",
)
def main(
bento_identifier: str,
fd: int,
working_dir: str | None,
backlog: int,
prometheus_dir: str | None,
ssl_certfile: str | None,
ssl_keyfile: str | None,
ssl_keyfile_password: str | None,
ssl_version: int | None,
ssl_cert_reqs: int | None,
ssl_ca_certs: str | None,
ssl_ciphers: str | None,
):
"""
Start a development server for the BentoML service.
"""
import psutil
import uvicorn
from bentoml import load
from bentoml._internal.log import configure_server_logging
from bentoml._internal.context import component_context
from bentoml._internal.configuration.containers import BentoMLContainer
component_context.component_type = "dev_api_server"
configure_server_logging()
if prometheus_dir is not None:
BentoMLContainer.prometheus_multiproc_dir.set(prometheus_dir)
svc = load(bento_identifier, working_dir=working_dir, standalone_load=True)
# setup context
component_context.component_name = svc.name
if svc.tag is None:
component_context.bento_name = svc.name
component_context.bento_version = "not available"
else:
component_context.bento_name = svc.tag.name
component_context.bento_version = svc.tag.version or "not available"
sock = socket.socket(fileno=fd)
uvicorn_options = {
"backlog": backlog,
"log_config": None,
"workers": 1,
"lifespan": "on",
}
if ssl_certfile:
import ssl
uvicorn_options["ssl_certfile"] = ssl_certfile
if ssl_keyfile:
uvicorn_options["ssl_keyfile"] = ssl_keyfile
if ssl_keyfile_password:
uvicorn_options["ssl_keyfile_password"] = ssl_keyfile_password
if ssl_ca_certs:
uvicorn_options["ssl_ca_certs"] = ssl_ca_certs
if not ssl_version:
ssl_version = ssl.PROTOCOL_TLS_SERVER
uvicorn_options["ssl_version"] = ssl_version
if not ssl_cert_reqs:
ssl_cert_reqs = ssl.CERT_NONE
uvicorn_options["ssl_cert_reqs"] = ssl_cert_reqs
if not ssl_ciphers:
ssl_ciphers = "TLSv1"
uvicorn_options["ssl_ciphers"] = ssl_ciphers
if psutil.WINDOWS:
uvicorn_options["loop"] = "asyncio"
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
config = uvicorn.Config(svc.asgi_app, **uvicorn_options)
uvicorn.Server(config).run(sockets=[sock])
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| [
"noreply@github.com"
] | yubozhao.noreply@github.com |
d7ad5a84b638fc0540e7f580dc4f50df4fde635c | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/iam/service_accounts/sign_blob.py | e33878bf514e44c4212e0e1d81cd6185617a051e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 2,830 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for signing blobs for service accounts."""
import textwrap
from apitools.base.py import exceptions
from googlecloudsdk.command_lib.iam import base_classes
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.core import log
class SignBlob(base_classes.BaseIamCommand):
"""Sign a blob with a managed service account key.
This command signs a file containing arbitrary binary data (a blob) using a
system-managed service account key.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': textwrap.dedent("""\
To sign a blob file with a system-managed service account key,
run:
$ {command} --iam-account my-account@somedomain.com input.bin output.bin
"""),
'SEE ALSO': textwrap.dedent("""\
For more information on how this command ties into the wider cloud
infrastructure, please see
[](https://cloud.google.com/appengine/docs/java/appidentity/)
"""),
}
@staticmethod
def Args(parser):
parser.add_argument('--iam-account',
required=True,
help='The service account to sign as.')
parser.add_argument('input',
metavar='INPUT-FILE',
help='A path to the blob file to be signed.')
parser.add_argument('output',
metavar='OUTPUT-FILE',
help='A path the resulting signed blob will be '
'written to.')
def Run(self, args):
try:
response = self.iam_client.projects_serviceAccounts.SignBlob(
self.messages.IamProjectsServiceAccountsSignBlobRequest(
name=iam_util.EmailToAccountResourceName(args.iam_account),
signBlobRequest=self.messages.SignBlobRequest(
bytesToSign=self.ReadFile(args.input))))
self.WriteFile(args.output, response.signature)
log.status.Print(
'signed blob [{0}] as [{1}] for [{2}] using key [{3}]'.format(
args.input, args.output, args.iam_account, response.keyId))
except exceptions.HttpError as error:
raise iam_util.ConvertToServiceAccountException(error, args.account)
| [
"toork@uw.edu"
] | toork@uw.edu |
66cc401c0e1112684bdbcf769d7b8f85b3ad00b6 | 916480ae24345193efa95df013f637e0a115653b | /web/transiq/driver/migrations/0022_driver_pan.py | ee0c60713f9da7066133b323920f9e578f38b391 | [
"Apache-2.0"
] | permissive | manibhushan05/tms | 50e289c670e1615a067c61a051c498cdc54958df | 763fafb271ce07d13ac8ce575f2fee653cf39343 | refs/heads/master | 2022-12-11T07:59:30.297259 | 2021-09-08T03:24:59 | 2021-09-08T03:24:59 | 210,017,184 | 0 | 0 | Apache-2.0 | 2022-12-08T02:35:01 | 2019-09-21T16:23:57 | Python | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.0.5 on 2018-08-28 16:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('driver', '0021_auto_20180702_1554'),
]
operations = [
migrations.AddField(
model_name='driver',
name='pan',
field=models.CharField(blank=True, max_length=11, null=True),
),
]
| [
"mani@myhost.local"
] | mani@myhost.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.