blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac0e03c9aabb196bcd25e697acd16276ed0dfb48
|
4944541b0cd0fa48a01581ffce5e7ce16f5cf8d7
|
/src/Backend/MbkExam/SimpleExam/views.py
|
041952f4adbc8875326e27a1df3d2cbda002813f
|
[] |
no_license
|
aballah-chamakh/the_exam
|
49a5b5c9d28c61b2283f2d42d2b2fb771dd48bf4
|
dbbbdc7a955ca61572f26430a7788407eaf0c632
|
refs/heads/main
| 2023-03-28T13:19:18.148630
| 2021-04-03T22:12:51
| 2021-04-03T22:12:51
| 354,404,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from django.shortcuts import render
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets,status,generics
from .models import SimpleExam
from .serializers import SimpleExamSerializer
class SimpleExamViewSet(viewsets.ModelViewSet):
serializer_class = SimpleExamSerializer
queryset = SimpleExam.objects.all()
|
[
"chamakhabdallah8@gmail.com"
] |
chamakhabdallah8@gmail.com
|
0284d92e23809cd7d04fd9c59c3266ec025e9d92
|
3dc9f2aaea2620d6c382211369548d9b52f4039a
|
/FaceQuality/qualityface/config.py
|
f0753116b5bb74da50ad2f23270be1c25b0be2cd
|
[
"MIT"
] |
permissive
|
banana1024/FaceProjects
|
6fd4961d73356139c7ebba85428a9c40b02335a8
|
87ae30191c01eadc3cfa59b751db91f1aa76bc5d
|
refs/heads/master
| 2022-09-18T15:40:19.346332
| 2020-05-29T08:15:17
| 2020-05-29T08:15:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
import torch
import torchvision.transforms as T
class Config:
# data preprocess
test_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.5] * 3, std=[0.5] * 3),
])
# training settings
checkpoints = "checkpoints"
restore_model = "last.pth"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
config = Config()
|
[
"forrest_zhu@foxmail.com"
] |
forrest_zhu@foxmail.com
|
1cc98c58b0790dbe9fcc7b0cafa6d97fb43db76b
|
c7a39ecf433a6b954096aed2ef8ef2b3366fab35
|
/quora_project/questions/api/views.py
|
686c01b36dd75ef3c615aec616367168208b3896
|
[
"MIT"
] |
permissive
|
vojta-janousek/Questions-and-Answers-Website
|
78f28a8f8ad6e66f7f6aca6db733e41eef3c1485
|
02cfb6274a73f64382e57f3718beb5ee89c2d1ac
|
refs/heads/master
| 2020-11-24T16:09:10.519718
| 2020-04-01T20:58:17
| 2020-04-01T20:58:17
| 228,234,952
| 0
| 0
|
MIT
| 2020-06-07T17:19:59
| 2019-12-15T18:58:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
from rest_framework import viewsets, generics, status
from rest_framework.exceptions import ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from questions.api.permissions import IsAuthorOrReadOnly
from questions.api.serializers import QuestionSerializer, AnswerSerializer
from questions.models import Question, Answer
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
lookup_field = 'slug'
serializer_class = QuestionSerializer
permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class AnswerCreateAPIView(generics.CreateAPIView):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
request_user = self.request.user
kwarg_slug = self.kwargs.get('slug')
question = get_object_or_404(Question, slug=kwarg_slug)
if question.answers.filter(author=request_user).exists():
raise ValidationError('You have already answered this Question')
serializer.save(author=request_user, question=question)
class QuestionAnswerListAPIView(generics.ListAPIView):
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
kwarg_slug = self.kwargs.get('slug')
return Answer.objects.filter(
question__slug=kwarg_slug
).order_by('-created_at')
class AnswerRUDAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]
class AnswerLikeAPIView(APIView):
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def delete(self, request, pk):
answer = get_object_or_404(Answer, pk=pk)
user = request.user
answer.voters.remove(user)
answer.save()
serializer_context = {'request': request}
serializer = self.serializer_class(answer, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, pk):
answer = get_object_or_404(Answer, pk=pk)
user = request.user
answer.voters.add(user)
answer.save()
serializer_context = {'request': request}
serializer = self.serializer_class(answer, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
|
[
"janousek315@gmail.com"
] |
janousek315@gmail.com
|
682ad7458ee8c3a3b669d39d903e656bb7072887
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/monomial/mono_upto_enum.py
|
3a359439d6875442b34669e9b22902025af36554
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232
| 2015-06-27T01:05:17
| 2015-06-27T01:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
#!/usr/bin/env python
def mono_upto_enum ( m, n ):
#*****************************************************************************80
#
## MONO_UPTO_ENUM enumerates monomials in M dimensions of degree up to N.
#
# Discussion:
#
# For M = 2, we have the following values:
#
# N VALUE
#
# 0 1
# 1 3
# 2 6
# 3 10
# 4 15
# 5 21
#
# In particular, VALUE(2,3) = 10 because we have the 10 monomials:
#
# 1, x, y, x^2, xy, y^2, x^3, x^2y, xy^2, y^3.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 October 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the maximum degree.
#
# Output, integer VALUE, the number of monomials in
# M variables, of total degree N or less.
#
from i4_choose import i4_choose
value = i4_choose ( n + m, n )
return value
def mono_upto_enum_test ( ):
#*****************************************************************************80
#
## MONO_UPTO_ENUM_TEST tests MONO_UPTO_ENUM.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 October 2014
#
# Author:
#
# John Burkardt
#
print ''
print 'MONO_UPTO_ENUM_TEST'
print ' MONO_UPTO_ENUM can enumerate the number of monomials'
print ' in M variables, of total degree between 0 and N.'
print '';
print ' N:',
for n in range ( 0, 9 ):
print ' %4d' % ( n ),
print ''
print ' M +---------------------------------------------------------------'
for m in range ( 1, 9 ):
print ' %2d |' % ( m ),
for n in range ( 0, 9 ):
v = mono_upto_enum ( m, n )
print ' %5d' % ( v ),
print ''
print ''
print 'MONO_UPTO_ENUM_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
mono_upto_enum_test ( )
timestamp ( )
|
[
"siplukabir@gmail.com"
] |
siplukabir@gmail.com
|
9957e7a14d7db921fad9a4cdccf9aeaacb5ce2e9
|
559f3dec0964d2e0f86c6c871371fe779cf3726c
|
/Matting/tools/predict.py
|
83cff09e4a9dc949138a30d8d2bcd28e38cacff4
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSeg
|
319ab26665ea492527a1949671650135123ffc39
|
2c8c35a8949fef74599f5ec557d340a14415f20d
|
refs/heads/release/2.8
| 2023-08-31T09:08:06.724717
| 2023-08-18T01:59:56
| 2023-08-18T01:59:56
| 204,380,779
| 8,531
| 1,866
|
Apache-2.0
| 2023-09-12T02:30:42
| 2019-08-26T02:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import paddle
import paddleseg
from paddleseg.cvlibs import manager
from paddleseg.utils import get_sys_env, logger
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(LOCAL_PATH, '..'))
manager.BACKBONES._components_dict.clear()
manager.TRANSFORMS._components_dict.clear()
import ppmatting
from ppmatting.core import predict
from ppmatting.utils import get_image_list, Config, MatBuilder
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
parser.add_argument(
"--config", dest="cfg", help="The config file.", default=None, type=str)
parser.add_argument(
'--model_path',
dest='model_path',
help='The path of model for prediction',
type=str,
default=None)
parser.add_argument(
'--image_path',
dest='image_path',
help='The path of image, it can be a file or a directory including images',
type=str,
default=None)
parser.add_argument(
'--trimap_path',
dest='trimap_path',
help='The path of trimap, it can be a file or a directory including images. '
'The image should be the same as image when it is a directory.',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output/results')
parser.add_argument(
'--fg_estimate',
default=True,
type=eval,
choices=[True, False],
help='Whether to estimate foreground when predicting.')
parser.add_argument(
'--device',
dest='device',
help='Set the device type, which may be GPU, CPU or XPU.',
default='gpu',
type=str)
return parser.parse_args()
def main(args):
assert args.cfg is not None, \
'No configuration file specified, please set --config'
cfg = Config(args.cfg)
builder = MatBuilder(cfg)
paddleseg.utils.show_env_info()
paddleseg.utils.show_cfg_info(cfg)
paddleseg.utils.set_device(args.device)
model = builder.model
transforms = ppmatting.transforms.Compose(builder.val_transforms)
image_list, image_dir = get_image_list(args.image_path)
if args.trimap_path is None:
trimap_list = None
else:
trimap_list, _ = get_image_list(args.trimap_path)
logger.info('Number of predict images = {}'.format(len(image_list)))
predict(
model,
model_path=args.model_path,
transforms=transforms,
image_list=image_list,
image_dir=image_dir,
trimap_list=trimap_list,
save_dir=args.save_dir,
fg_estimate=args.fg_estimate)
if __name__ == '__main__':
args = parse_args()
main(args)
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
fa28d630bdb2be7b15926769daaf0cf6361c335e
|
8bf8ab29cb25de00c6a799d1f58610528b810592
|
/모의 SW 역량테스트/5648. [모의 SW 역량테스트] 원자 소멸 시뮬레이션/참고.py
|
29928d51765559d7b11a787d3a4eb2fcd27baddb
|
[] |
no_license
|
mgh3326/sw_expert_academy_algorithm
|
fa93fb68862cabeba8f9f5fff00a87f26a014afc
|
97cbd2a1845e42f142d189e9121c3cd5822fc8d8
|
refs/heads/master
| 2020-07-03T21:40:29.948233
| 2019-11-23T07:26:15
| 2019-11-23T07:26:15
| 202,058,567
| 0
| 0
| null | 2019-11-30T06:11:34
| 2019-08-13T03:40:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
T = int(input())
for tc in range(1, T + 1):
N = int(input())
atoms = [list(map(int, input().split())) for _ in range(N)]
for atom in atoms:
atom[0] *= 2
atom[1] *= 2
energy = 0
candidates = [[0, 0, 0]]
for i in range(N - 1):
for j in range(i, N):
dx = atoms[i][0] - atoms[j][0]
dy = atoms[i][1] - atoms[j][1]
v1 = atoms[i][2]
v2 = atoms[j][2]
if dy == 0:
if v1 == 2 and v2 == 3 and dx > 0:
candidates.append([dx // 2, i, j])
elif v1 == 3 and v2 == 2 and dx < 0:
candidates.append([-dx // 2, i, j])
elif dx == 0:
if v1 == 0 and v2 == 1 and dy < 0:
candidates.append([-dy // 2, i, j])
elif v1 == 1 and v2 == 0 and dy > 0:
candidates.append([dy // 2, i, j])
elif dx == dy:
if dx < 0 and v1 == 3 and v2 == 1:
candidates.append([-dx, i, j])
elif dx < 0 and v1 == 0 and v2 == 2:
candidates.append([-dx, i, j])
elif dx > 0 and v1 == 1 and v2 == 3:
candidates.append([dx, i, j])
elif dx > 0 and v1 == 2 and v2 == 0:
candidates.append([dx, i, j])
elif dx == -dy:
if dx < 0 and v1 == 3 and v2 == 0:
candidates.append([-dx, i, j])
elif dx < 0 and v1 == 1 and v2 == 2:
candidates.append([-dx, i, j])
elif dx > 0 and v1 == 0 and v2 == 3:
candidates.append([dx, i, j])
elif dx > 0 and v1 == 2 and v2 == 1:
candidates.append([dx, i, j])
visited = [0] * N
candidates.sort()
collision = []
for i in range(len(candidates) - 1):
if candidates[i][0] != candidates[i + 1][0]:
while collision:
temp = collision.pop()
if not visited[temp]:
visited[temp] = 1
energy += atoms[temp][3]
if not visited[candidates[i + 1][1]] and not visited[candidates[i + 1][2]]:
collision.append(candidates[i + 1][1])
collision.append(candidates[i + 1][2])
else:
if not visited[candidates[i + 1][1]] and not visited[candidates[i + 1][2]]:
collision.append(candidates[i + 1][1])
collision.append(candidates[i + 1][2])
while collision:
temp = collision.pop()
if not visited[temp]:
visited[temp] = 1
energy += atoms[temp][3]
print('#{} {}'.format(tc, energy))
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
b28477b4e29bb41a9e9b42c78ded882797d1b48b
|
7ccdcd8e7885603141c55f4c065373417f4aa118
|
/list_ele.py
|
db3665532a1696e4d0b1c27fe0e4ee5bad465db8
|
[] |
no_license
|
Icode4passion/practicepythonprogams
|
f8330c499c660d4e46ab54d7ed44d62625f250db
|
8936166d6419df5deef290a5723a30661ea064a2
|
refs/heads/master
| 2020-03-19T10:02:13.992465
| 2018-06-06T14:26:27
| 2018-06-06T14:26:27
| 136,337,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
#Write a Python program to display the first and last colors from the following list.
color_list = ['red','green','white','black']
for color in color_list:
print (color)
|
[
"yogeerama@gmail.com"
] |
yogeerama@gmail.com
|
d95ae65b44a40731a4a2e3009dc55d90af9ea531
|
58ffe83eb9828668c13242c6f98238f08655f561
|
/app/notebooks/problang/custom_mlp.py
|
4ac8b927457fd104f247d84542cc19d8682ce335
|
[
"Apache-2.0"
] |
permissive
|
DanFu09/esper
|
f9dcc47cd5677dee8dffb1e066d69332471a0d6c
|
ccc5547de3637728b8aaab059b6781baebc269ec
|
refs/heads/master
| 2020-04-04T21:31:43.549572
| 2020-01-16T01:14:13
| 2020-01-16T01:14:13
| 156,289,533
| 4
| 0
|
Apache-2.0
| 2018-12-14T03:01:02
| 2018-11-05T22:05:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,720
|
py
|
from inspect import isclass
import torch
import torch.nn as nn
from pyro.distributions.util import broadcast_shape
class Exp(nn.Module):
"""
a custom module for exponentiation of tensors
"""
def __init__(self):
super(Exp, self).__init__()
def forward(self, val):
return torch.exp(val)
class ConcatModule(nn.Module):
"""
a custom module for concatenation of tensors
"""
def __init__(self, allow_broadcast=False):
self.allow_broadcast = allow_broadcast
super(ConcatModule, self).__init__()
def forward(self, *input_args):
# we have a single object
if len(input_args) == 1:
# regardless of type,
# we don't care about single objects
# we just index into the object
input_args = input_args[0]
# don't concat things that are just single objects
if torch.is_tensor(input_args):
return input_args
else:
if self.allow_broadcast:
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return torch.cat(input_args, dim=-1)
class ListOutModule(nn.ModuleList):
"""
a custom module for outputting a list of tensors from a list of nn modules
"""
def __init__(self, modules):
super(ListOutModule, self).__init__(modules)
def forward(self, *args, **kwargs):
# loop over modules in self, apply same args
return [mm.forward(*args, **kwargs) for mm in self]
def call_nn_op(op):
"""
a helper function that adds appropriate parameters when calling
an nn module representing an operation like Softmax
:param op: the nn.Module operation to instantiate
:return: instantiation of the op module with appropriate parameters
"""
if op in [nn.Softmax, nn.LogSoftmax]:
return op(dim=1)
else:
return op()
class MLP(nn.Module):
def __init__(self, mlp_sizes, activation=nn.ReLU, output_activation=None,
post_layer_fct=lambda layer_ix, total_layers, layer: None,
post_act_fct=lambda layer_ix, total_layers, layer: None,
allow_broadcast=False, use_cuda=False):
# init the module object
super(MLP, self).__init__()
assert len(mlp_sizes) >= 2, "Must have input and output layer sizes defined"
# get our inputs, outputs, and hidden
input_size, hidden_sizes, output_size = mlp_sizes[0], mlp_sizes[1:-1], mlp_sizes[-1]
# assume int or list
assert isinstance(input_size, (int, list, tuple)), "input_size must be int, list, tuple"
# everything in MLP will be concatted if it's multiple arguments
last_layer_size = input_size if type(input_size) == int else sum(input_size)
# everything sent in will be concatted together by default
all_modules = [ConcatModule(allow_broadcast)]
# loop over l
for layer_ix, layer_size in enumerate(hidden_sizes):
assert type(layer_size) == int, "Hidden layer sizes must be ints"
# get our nn layer module (in this case nn.Linear by default)
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
# for numerical stability -- initialize the layer properly
cur_linear_layer.weight.data.normal_(0, 0.001)
cur_linear_layer.bias.data.normal_(0, 0.001)
# use GPUs to share data during training (if available)
if use_cuda:
cur_linear_layer = nn.DataParallel(cur_linear_layer)
# add our linear layer
all_modules.append(cur_linear_layer)
# handle post_linear
post_linear = post_layer_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# if we send something back, add it to sequential
# here we could return a batch norm for example
if post_linear is not None:
all_modules.append(post_linear)
# handle activation (assumed no params -- deal with that later)
all_modules.append(activation())
# now handle after activation
post_activation = post_act_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# handle post_activation if not null
# could add batch norm for example
if post_activation is not None:
all_modules.append(post_activation)
# save the layer size we just created
last_layer_size = layer_size
# now we have all of our hidden layers
# we handle outputs
assert isinstance(output_size, (int, list, tuple)), "output_size must be int, list, tuple"
if type(output_size) == int:
all_modules.append(nn.Linear(last_layer_size, output_size))
if output_activation is not None:
all_modules.append(call_nn_op(output_activation)
if isclass(output_activation) else output_activation)
else:
# we're going to have a bunch of separate layers we can spit out (a tuple of outputs)
out_layers = []
# multiple outputs? handle separately
for out_ix, out_size in enumerate(output_size):
# for a single output object, we create a linear layer and some weights
split_layer = []
# we have an activation function
split_layer.append(nn.Linear(last_layer_size, out_size))
# then we get our output activation (either we repeat all or we index into a same sized array)
act_out_fct = output_activation if not isinstance(output_activation, (list, tuple)) \
else output_activation[out_ix]
if(act_out_fct):
# we check if it's a class. if so, instantiate the object
# otherwise, use the object directly (e.g. pre-instaniated)
split_layer.append(call_nn_op(act_out_fct)
if isclass(act_out_fct) else act_out_fct)
# our outputs is just a sequential of the two
out_layers.append(nn.Sequential(*split_layer))
all_modules.append(ListOutModule(out_layers))
# now we have all of our modules, we're ready to build our sequential!
# process mlps in order, pretty standard here
self.sequential_mlp = nn.Sequential(*all_modules)
# pass through our sequential for the output!
def forward(self, *args, **kwargs):
return self.sequential_mlp.forward(*args, **kwargs)
|
[
"wcrichto@cs.stanford.edu"
] |
wcrichto@cs.stanford.edu
|
61fc96cbdbdbae2bdd17f620cf967d0f0f88ef4d
|
b2545b8c1a7ed01216ae090a634ddf19099f175a
|
/python/coding-challenges/cc-005-create-phonebook/phone_book_class.py
|
ffe561d02e48a6472d28362db9e9b284221663c4
|
[] |
no_license
|
kopuskopecik/my-aws-devops-projects
|
50f222986150ccd20d724ccaec9ec637aaf08a3c
|
4a25f8f72c262c933ada26c0ac0476f4ef68fbcf
|
refs/heads/master
| 2023-01-10T00:59:39.718578
| 2020-11-03T14:58:48
| 2020-11-03T14:58:48
| 290,531,908
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
class PhoneBook:
phone_book = {} # class attribute
liste = []
demet = ()
print("Sınıf niteliği")
def __init__(self):
print("init çalıştı")
self.elma = ""
self.liste2 = []
self.demet2 = ()
self.start()
def start(self):
#self.book = {}
print("""
Welcome to the phonebook application
1. Find phone number
2. Insert a phone number
3. Delete a person from the phonebook
4. Terminate
""")
entered_number = input("Select operation on Phonebook App (1/2/3) :")
if entered_number == "1":
self.find_number()
self.start()
elif entered_number == "2":
self.insert_number()
self.start()
elif entered_number == "3":
self.delete_number()
self.start()
elif entered_number == "4":
print("Exiting Phonebook")
else:
print("Please enter a valid number")
self.start()
def insert_number(self):
name = input("Insert name of the person : ")
number = input("Insert phone number of the person: ")
if name and number.isdigit():
self.phone_book[name] = number
# phone_book = { "ali": 123456}
print(self.phone_book)
else:
print("Invalid input format, cancelling operation ...")
print(self.phone_book)
def find_number(self):
name = input("Find the phone number of : ")
if name:
value = self.phone_book.get(name, f"Couldn't find phone number of {name}")
print(value)
def delete_number(self):
name = input("Whom to delete from phonebook : ")
if name:
value = self.phone_book.pop(name, "")
if value:
print(f"{name} is deleted from the phonebook")
else:
print(f"{name} is not in the phonebook")
#ahmet = PhoneBook()
|
[
"kopuskopecik@gmail.com"
] |
kopuskopecik@gmail.com
|
c436b52eec4081f8bf526d9d4795f98aa8cf4ae2
|
132826d3f9d0a68d70ec9ba411846bbf3695140d
|
/scripts/sklearn_classifier.py
|
53ff3015511ebcee5e6fdc9dbd968284bcb8293c
|
[
"MIT"
] |
permissive
|
deep-spin/spec
|
c7f9a4eae08ec7d6a422b7d9f21e52980c836312
|
23db7a559e09ff7f63ede06b04cad226432b90db
|
refs/heads/master
| 2023-03-03T22:53:49.647064
| 2021-02-18T05:01:35
| 2021-02-18T05:01:35
| 312,660,674
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,704
|
py
|
import argparse
import numpy as np
from sklearn.feature_extraction.text import (TfidfVectorizer, CountVectorizer,
HashingVectorizer)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import accuracy_score, matthews_corrcoef, \
precision_recall_fscore_support
from spec.dataset.corpora import available_corpora
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="sklearn classifier")
parser.add_argument("--corpus",
type=str,
choices=list(available_corpora.keys()),
default='sst',
help="corpus type",
required=True)
parser.add_argument("--train-path",
type=str,
default=None,
help="path to the train corpus",
required=True)
parser.add_argument("--test-path",
type=str,
default=None,
help="path to the test corpus",
required=True)
parser.add_argument("--feature",
type=str,
default="bow",
choices=['bow', 'tfidf', 'hash'],
help="features format")
args = parser.parse_args()
seed = 42
np.random.seed(42)
print('Reading train data...')
corpus_cls = available_corpora[args.corpus]
fields_tuples = corpus_cls.create_fields_tuples()
fields_dict = dict(fields_tuples)
corpus = corpus_cls(fields_tuples, lazy=True)
examples = corpus.read(args.train_path)
x_train, y_train = [], []
for ex in examples:
y_train.extend(ex.target)
text = ' '.join(ex.words)
if args.corpus == 'snli':
text = text + ' ' + ' '.join(ex.words_hyp)
x_train.append(text)
corpus.close()
y_train = np.array(y_train)
print('Vectorizing train data...')
if args.feature == 'bow':
vectorizer = CountVectorizer(lowercase=False)
features_train = vectorizer.fit_transform(x_train)
elif args.feature == 'bow':
vectorizer = TfidfVectorizer(lowercase=False)
features_train = vectorizer.fit_transform(x_train)
else:
vectorizer = HashingVectorizer(lowercase=False, n_features=2000)
features_train = vectorizer.fit_transform(x_train)
print('Training...')
# classifier_linear = LogisticRegression(
# C=1000,
# max_iter=1000,
# solver='lbfgs',
# multi_class='multinomial',
# penalty='l2',
# random_state=seed,
# n_jobs=2
# )
classifier_linear = SGDClassifier(
max_iter=50,
alpha=0.00001, # 0.0001
eta0=0.001, # not used for learning_rate=`optimal`
learning_rate='constant',
loss='hinge',
penalty='l2',
shuffle=True,
random_state=seed,
n_jobs=8,
verbose=1
)
classifier_linear.fit(features_train, y_train)
print('Reading test data...')
corpus = corpus_cls(fields_tuples, lazy=True)
examples = corpus.read(args.test_path)
x_test, y_test = [], []
for ex in examples:
y_test.extend(ex.target)
text = ' '.join(ex.words)
if args.corpus == 'snli':
text = text + ' ' + ' '.join(ex.words_hyp)
x_test.append(text)
corpus.close()
y_test = np.array(y_test)
print('Vectorizing test data...')
features_test = vectorizer.transform(x_test)
print('Predicting...')
y_train_pred = classifier_linear.predict(features_train)
y_test_pred = classifier_linear.predict(features_test)
print('Train')
print('-----')
acc = accuracy_score(y_train, y_train_pred)
mcc = matthews_corrcoef(y_train, y_train_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_train, y_train_pred,
average='macro')
print('Acc: {:.4f}'.format(acc))
print('Prec: {:.4f}'.format(prec))
print('Rec: {:.4f}'.format(rec))
print('F1: {:.4f}'.format(f1))
print('MCC: {:.4f}'.format(mcc))
print('Test')
print('-----')
acc = accuracy_score(y_test, y_test_pred)
mcc = matthews_corrcoef(y_test, y_test_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_test, y_test_pred,
average='macro')
print('Acc: {:.4f}'.format(acc))
print('Prec: {:.4f}'.format(prec))
print('Rec: {:.4f}'.format(rec))
print('F1: {:.4f}'.format(f1))
print('MCC: {:.4f}'.format(mcc))
|
[
"marcosvtreviso@gmail.com"
] |
marcosvtreviso@gmail.com
|
9db96abccd5b62b41daa94a3a729d2e868c9c8b7
|
3efca607aefbd6cf558517bae689ccdacb7b383e
|
/contrib/devtools/symbol-check.py
|
5fdf31de16afe2e2d14e6880f9d5cb158835dd4b
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
MicroBitcoinOrg/MicroBitcoin
|
f761b2ff04bdcb650d7c0ddbef431ef95cd69541
|
0119e8eff44ec4d94313eaa30022a97692b71143
|
refs/heads/snapshot
| 2022-12-27T10:04:21.040945
| 2021-02-09T05:51:45
| 2021-02-09T05:51:45
| 132,959,214
| 21
| 33
|
MIT
| 2020-06-12T04:38:45
| 2018-05-10T22:07:51
|
C++
|
UTF-8
|
Python
| false
| false
| 6,136
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '_init', '__bss_start', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# microd and micro-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libanl.so.1', # DNS resolve
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
# micro-qt only
'libX11-xcb.so.1', # part of X11
'libX11.so.6', # part of X11
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name))
retval = 1
sys.exit(retval)
|
[
"iamstenman@protonmail.com"
] |
iamstenman@protonmail.com
|
947a32e74b228a2939732969ff1b57e7dc5e68cc
|
364edc98a05ddecf5ad7b7614d2a35a95d19705b
|
/os_bitcoin/Bithumb_20170412_RESTFulAPI-python3/xcoin_api_client.py
|
bccc8fe928a3987f5073f8daa14df026d9ad127e
|
[] |
no_license
|
as950118/outsource
|
f7f10b5ba62487da8ccddd894aaedc8af48e9d50
|
05a9f654aa222f4da4ce9c4902dde094c9d158d0
|
refs/heads/master
| 2022-12-21T00:18:45.405708
| 2020-02-03T15:53:16
| 2020-02-03T15:53:16
| 193,331,277
| 0
| 0
| null | 2022-12-06T22:38:00
| 2019-06-23T09:50:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
#
# XCoin API-call related functions
#
# @author btckorea
# @date 2017-04-12
#
# Compatible with python3 version.
import sys
import time
import math
import base64
import hmac, hashlib
import urllib.parse
import pycurl
import json
import certifi
class XCoinAPI:
api_url = "https://api.bithumb.com";
api_key = "b023e33a524e648f08431825769db6d9";
api_secret = "88ea704e6800aa9e77e2edb8385a32a7";
def __init__(self, api_key, api_secret):
self.api_key = api_key;
self.api_secret = api_secret;
def body_callback(self, buf):
self.contents = buf;
def microtime(self, get_as_float = False):
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def usecTime(self) :
mt = self.microtime(False)
mt_array = mt.split(" ")[:2];
return mt_array[1] + mt_array[0][2:5];
def xcoinApiCall(self, endpoint, rgParams):
# 1. Api-Sign and Api-Nonce information generation.
# 2. Request related information from the Bithumb API server.
#
# - nonce: it is an arbitrary number that may only be used once.
# - api_sign: API signature information created in various combinations values.
endpoint_item_array = {
"endpoint" : endpoint
};
uri_array = dict(endpoint_item_array, **rgParams); # Concatenate the two arrays.
str_data = urllib.parse.urlencode(uri_array);
nonce = self.usecTime();
data = endpoint + chr(0) + str_data + chr(0) + nonce;
utf8_data = data.encode('utf-8');
key = self.api_secret;
utf8_key = key.encode('utf-8');
h = hmac.new(bytes(utf8_key), utf8_data, hashlib.sha512);
hex_output = h.hexdigest();
utf8_hex_output = hex_output.encode('utf-8');
api_sign = base64.b64encode(utf8_hex_output);
utf8_api_sign = api_sign.decode('utf-8');
curl_handle = pycurl.Curl();
curl_handle.setopt(pycurl.POST, 1);
#curl_handle.setopt(pycurl.VERBOSE, 1); # vervose mode :: 1 => True, 0 => False
curl_handle.setopt(pycurl.POSTFIELDS, str_data);
url = self.api_url + endpoint;
curl_handle.setopt(curl_handle.URL, url);
curl_handle.setopt(curl_handle.HTTPHEADER, ['Api-Key: ' + self.api_key, 'Api-Sign: ' + utf8_api_sign, 'Api-Nonce: ' + nonce]);
curl_handle.setopt(curl_handle.WRITEFUNCTION, self.body_callback);
curl_handle.perform();
#response_code = curl_handle.getinfo(pycurl.RESPONSE_CODE); # Get http response status code.
curl_handle.close();
return (json.loads(self.contents));
|
[
"na_qa@icloud.com"
] |
na_qa@icloud.com
|
9bace68bb00dea446e490b5150b8ea921d0e8499
|
94a4388cee6dfeaa1674fba20423e8a3f8f6dd42
|
/backend/users/migrations/0003_user_group.py
|
33f315c63bb0addf839fdc3d8a692489f8be205a
|
[] |
no_license
|
crowdbotics-apps/game-mdoe-18873
|
0dc5c1e1827f382d5a84847697d0b1b05191066d
|
8bcbe6c9b116fa1294b8104018c9cd36b1536c13
|
refs/heads/master
| 2022-11-15T17:29:36.126851
| 2020-07-13T22:23:28
| 2020-07-13T22:23:28
| 279,423,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# Generated by Django 2.2.14 on 2020-07-13 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0002_auto_20200713_2221'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
3381f548bbfad75fbeb08e50c0769df83dd078b6
|
a38b27d0eac787ca453c0ef06f1a819305b2f648
|
/varappx/main/view_tools/main_views_tools.py
|
6368bb5d40f33d9ebd93088403443fe118f06682
|
[
"MIT"
] |
permissive
|
444thLiao/VarappX-flask
|
9417b84167c31276c9342b23ab56cbcc4b71fa1f
|
9a59f4eb5897a1ecf90978e9f357954cdd7d410a
|
refs/heads/master
| 2021-01-19T05:00:05.575501
| 2017-05-16T07:04:59
| 2017-05-16T07:04:59
| 87,406,853
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
# import logging
# import re
# from varapp.samples.samples_factory import *
#
#
# def samples_selection_from_request(request, db, from_ped=True):
# """Parse a GET request to make the samples groups and return a SamplesSelection.
# :param from_ped: read groups info based on 'phenotype' attribute in the Samples table.
# """
# groups = {}
# sample_requests = request.GET.getlist('samples',[])
# samples = samples_selection_factory(db).sort('sample_id') # a SamplesCollection
# if not sample_requests:
# if from_ped:
# groups = fetch_ped_info_groups(samples)
# else:
# groups = {}
# return SamplesSelection(samples, groups, db=db)
# elif all(x == '' for x in sample_requests):
# return SamplesSelection(samples, {}, db=db)
# else:
# for sr in sample_requests:
# m = re.match(r"(\S+?)=(\S+)", sr)
# if not m:
# raise ValueError("Wrong samples request (expected '<group>=<samples list>', got '{}').".format(sr))
# gname,snames = m.groups()
# snames = snames.split(',')
# group = samples.get_list(snames)
# if len(group) != len(snames):
# raise ValueError("Unknown samples: {}".format(
# set(snames) - set([s.name for s in group])))
# groups[gname] = [s.name for s in group]
# return SamplesSelection(samples, groups, db=db)
|
[
"l0404th@gmail.com"
] |
l0404th@gmail.com
|
b675b567f1ca8b19f71f13a481b2c1f48fed4a7a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02991/s309121406.py
|
e6af09084c5db2471cde62c6c8b5d88d68d8a2e5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
import sys
input = sys.stdin.readline
N,M = map(int,input().split())
UV = [tuple(map(int,input().split())) for i in range(M)]
S,T = map(int,input().split())
S,T = S-1,T-1
es = [[] for _ in range(N*3)]
for u,v in UV:
u,v = u-1,v-1
es[u].append(v+N)
es[u+N].append(v+N+N)
es[u+N+N].append(v)
from collections import deque
q = deque([S])
INF = float('inf')
dist = [INF] * (N*3)
dist[S] = 0
while q:
v = q.popleft()
for to in es[v]:
if dist[to] <= dist[v]+1: continue
dist[to] = dist[v]+1
q.append(to)
if to==T:
print(dist[to] // 3)
exit()
print(-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
deb97f79158dad95e29030e083d12abf5eb6d1d4
|
1eaf69357dfca66e4dc6651da2b93db8665164f9
|
/2022/06/test_code.py
|
11832d889e5e26ee9deaddc92f34934c444b4cd3
|
[
"MIT"
] |
permissive
|
Akumatic/Advent-of-Code
|
deb89b9a5302999ffb344766bb3f1b0dd5272445
|
5377d8d653092246a7a35c7fa2a3e22cc74ebb0b
|
refs/heads/master
| 2022-12-21T20:32:05.978675
| 2022-12-16T14:41:23
| 2022-12-16T14:41:23
| 221,700,755
| 24
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2022 Akumatic
from code import part1, part2
def test():
inputs = ["mjqjpqmgbljsphdztnvjfqwrcgsmlb", "bvwbjplbgvbhsrlpgdmjqwftvncz",
"nppdvjthqldpwncqszvftbrmjlhg", "nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg", "zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw"]
expected_values = [(7, 19), (5, 23), (6, 23), (10, 29), (11, 26)]
for i in range(len(inputs)):
assert part1(inputs[i]) == expected_values[i][0]
print("Passed Part 1")
for i in range(len(inputs)):
assert part2(inputs[i]) == expected_values[i][1]
print("Passed Part 2")
if __name__ == "__main__":
test()
|
[
"ugp@hotmail.de"
] |
ugp@hotmail.de
|
0ee4b1f629f10089a33bd6119d6964d0b041ed3d
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-alidns/aliyunsdkalidns/request/v20150109/DescribeDohSubDomainStatisticsSummaryRequest.py
|
f21d8c0c0b01b28d6156cd3ef3b28b308f708744
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class DescribeDohSubDomainStatisticsSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'DescribeDohSubDomainStatisticsSummary','alidns')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_StartDate(self): # String
return self.get_query_params().get('StartDate')
def set_StartDate(self, StartDate): # String
self.add_query_param('StartDate', StartDate)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_EndDate(self): # String
return self.get_query_params().get('EndDate')
def set_EndDate(self, EndDate): # String
self.add_query_param('EndDate', EndDate)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_SubDomain(self): # String
return self.get_query_params().get('SubDomain')
def set_SubDomain(self, SubDomain): # String
self.add_query_param('SubDomain', SubDomain)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
8384c58d74daedb0e579241902fe89914974b152
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_shibboleths.py
|
479b492df20a757071eeea8dcfd92915bc99a468
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from xai.brain.wordbase.nouns._shibboleth import _SHIBBOLETH
#calss header
class _SHIBBOLETHS(_SHIBBOLETH, ):
def __init__(self,):
_SHIBBOLETH.__init__(self)
self.name = "SHIBBOLETHS"
self.specie = 'nouns'
self.basic = "shibboleth"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
a0a636dd1aaf47c3b8b20d61ef35980706c7cc74
|
39e03684081b27311385a0ab31afcc2e09883e5c
|
/tools/train_saliency.py
|
99a78842aca4431f7cdfd83729401f8bcf65db99
|
[
"MIT",
"Python-2.0"
] |
permissive
|
witnessai/MMSceneGraph
|
8d0b2011a946ddcced95fbe15445b7f4da818509
|
bc5e0f3385205404c712ae9f702a61a3191da0a1
|
refs/heads/master
| 2023-08-12T06:54:00.551237
| 2021-10-12T03:04:21
| 2021-10-12T03:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,333
|
py
|
from __future__ import division
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_saliency_detector
from mmdet.utils import collect_env, get_root_logger
import yaml
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
# this is not allowed!
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu_ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = list(range(1)) if args.gpus is None else list(range(args.gpus))
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
# For OD: bs=16 -- lr=0.001
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) * cfg.data.imgs_per_gpu / 16
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_saliency_detector(cfg.model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text)
mmcv.dump(yaml.safe_load(cfg.dump()), osp.join(cfg.work_dir, 'cfg.yaml'))
# you can reuse the detector training script.
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[
"23736866+Kenneth-Wong@users.noreply.github.com"
] |
23736866+Kenneth-Wong@users.noreply.github.com
|
eb6e327944cbc63a28f6744e79a24841cf289c7b
|
a7ded5d3d19a98e61a44189cffe3703f7938e0db
|
/xero_python/payrolluk/models/leave_type_object.py
|
a3951dbe79d8171aed1d4532a1909c00f97b9355
|
[
"MIT"
] |
permissive
|
liseekeralbert/xero-python
|
dfd1076344f763d74f81f701e32600cf88bcc7b2
|
d27ab1894ecd84d2a9af0ca91583593756b21ab3
|
refs/heads/master
| 2022-12-16T07:41:14.331308
| 2020-09-18T17:12:35
| 2020-09-18T17:12:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
# coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class LeaveTypeObject(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"leave_type": "LeaveType",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"leave_type": "leaveType",
}
def __init__(self, pagination=None, problem=None, leave_type=None): # noqa: E501
"""LeaveTypeObject - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._leave_type = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if leave_type is not None:
self.leave_type = leave_type
@property
def pagination(self):
"""Gets the pagination of this LeaveTypeObject. # noqa: E501
:return: The pagination of this LeaveTypeObject. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this LeaveTypeObject.
:param pagination: The pagination of this LeaveTypeObject. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this LeaveTypeObject. # noqa: E501
:return: The problem of this LeaveTypeObject. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this LeaveTypeObject.
:param problem: The problem of this LeaveTypeObject. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def leave_type(self):
"""Gets the leave_type of this LeaveTypeObject. # noqa: E501
:return: The leave_type of this LeaveTypeObject. # noqa: E501
:rtype: LeaveType
"""
return self._leave_type
@leave_type.setter
def leave_type(self, leave_type):
"""Sets the leave_type of this LeaveTypeObject.
:param leave_type: The leave_type of this LeaveTypeObject. # noqa: E501
:type: LeaveType
"""
self._leave_type = leave_type
|
[
"sid.maestre@gmail.com"
] |
sid.maestre@gmail.com
|
beb0e5f57a3463005136baba1b8c7c3c9e533db5
|
e9f598d4e2e53bdffd95e2b91726eed156a9bd25
|
/we_chat_tou.py
|
50911f89aa338c6ccce63b1f6e0df506ceb609c7
|
[] |
no_license
|
z991/play_pillow
|
e2d1b17ccdc7e3b5aacb0d1dc7b04805699942f8
|
e01f99be129a5790c78700cb09b1fa85825f832a
|
refs/heads/master
| 2020-11-27T12:20:51.237125
| 2019-12-21T14:19:48
| 2019-12-21T14:19:48
| 229,437,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
import os
import math
from wxpy import Bot
from PIL import Image
# 创建头像存放文件夹
def creat_filepath():
# 返回一个表示当前工作目录的unicode字符串。
"""
/Users/zhuxuanyu/python_xuexi/pil_play
:return:
"""
avatar_dir = os.getcwd() + "/wechat/"
if not os.path.exists(avatar_dir):
os.mkdir(avatar_dir)
return avatar_dir
# 保存好友头像
def save_avatar(avatar_dir):
# 初始化机器人,扫码登陆
bot = Bot()
friends = bot.friends(update=True)
num = 0
for friend in friends:
friend.get_avatar(avatar_dir + '/' + str(num) + ".jpg")
num = num + 1
# 拼接头像
def joint_avatar(path):
# 设置画布大小(正方形)
image_size = 2560
# 获取文件夹内头像个数
length = len(os.listdir(path))
"""
计算所需各行列的头像数量
"""
# sqrt 开平方
x_lines = math.ceil(math.sqrt(length)) # 图片x 轴放多少个好友头像
y_lines = math.ceil(math.sqrt(length)) # 图片y 轴放多少个好友头像
# 设置每个头像大小 拼接后图片长度 / x(y)轴 好友头像的个数
each_size = math.ceil(image_size / math.floor(math.sqrt(length)))
image = Image.new('RGB', (each_size * x_lines, each_size * y_lines))
x = 0 # x 轴索引值
y = 0 # y 轴索引值
for (root, dirs, files) in os.walk(path):
for pic_name in files:
# 增加头像读取不出来的异常处理
try:
with Image.open(path + pic_name) as img:
"""
resize(self, size, resample=NEAREST, box=None):
返回此图像的大小调整后的副本。
"""
img = img.resize((each_size, each_size))
"""
paste(self, im, box=None, mask=None):
将另一个图像粘贴到此图像中。box参数要么是
一个2元组给出左上角,一个4元组定义
左,上,右,和低像素坐标,或没有(相同的
(0,0))。看到裁判:“坐标系”。如果给定了一个4元组,则给出其大小
所粘贴图像的大小必须与所粘贴区域的大小相匹配。
"""
image.paste(img, (x * each_size, y * each_size))
x += 1
if x == x_lines:
x = 0
y += 1
except IOError:
print("头像读取失败")
img = image.save(os.getcwd() + "/wechat.png")
print('微信好友头像拼接完成!')
if __name__ == '__main__':
avatar_dir = creat_filepath()
save_avatar(avatar_dir)
joint_avatar(avatar_dir)
# for (root, dirs, files) in os.walk(avatar_dir):
# print(root, dirs, files)
|
[
"1049939190@qq.com"
] |
1049939190@qq.com
|
7b881d3fd3b5a0506e690102189326ec26164212
|
46bd3e3ba590785cbffed5f044e69f1f9bafbce5
|
/env/lib/python3.8/site-packages/pip/_internal/operations/freeze.py
|
4173161009fc9748ecc149d7cd925afb5657aad9
|
[] |
no_license
|
adamkluk/casper-getstarted
|
a6a6263f1547354de0e49ba2f1d57049a5fdec2b
|
01e846621b33f54ed3ec9b369e9de3872a97780d
|
refs/heads/master
| 2023-08-13T11:04:05.778228
| 2021-09-19T22:56:59
| 2021-09-19T22:56:59
| 408,036,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4f22ef5d3e19aa9222f31f17fd34ec80127be881b9e0289d265c4620705b9813
size 10556
|
[
"a.klukowski@live.com"
] |
a.klukowski@live.com
|
1fe809787c4d22f427ef3c91f31a6d7b5004f8a3
|
3ca30ff28e4233ff815ebc2525ba6409dbf7ade6
|
/changelogs/custom/pypi/django.py
|
a0b30a1bc3ab3f9b485cb6575052bc4a75a497b5
|
[
"MIT"
] |
permissive
|
hackebrot/changelogs
|
e29b7ce1b6b799fc1a5f1871e29d7d7ac787ad48
|
c5bf363a5b7efd2640ba404b217a37661ef220c1
|
refs/heads/master
| 2021-01-20T02:07:27.028697
| 2017-04-25T15:05:19
| 2017-04-25T15:05:19
| 89,375,742
| 1
| 0
| null | 2017-04-25T15:18:14
| 2017-04-25T15:18:14
| null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
def get_head(line, releases, **kwargs):
for release in releases:
if "Django {} release notes".format(release) in line:
return release
return False
def get_urls(releases, **kwargs):
urls = []
for release in releases:
urls.append("https://raw.githubusercontent.com/django/django/master/docs/releases/{v}.txt".format(v=release))
return urls, []
|
[
"ja.geb@me.com"
] |
ja.geb@me.com
|
fc7a978cdca9b14608013239456e0f1fea702e7b
|
f57529f95a0fd10676f46063fdcd273fb5a81427
|
/boj/03001-04000/3181.py
|
0cf1f7d1f5d5af6ca60a5dd002df336fe82466ba
|
[] |
no_license
|
hoyasmh/PS
|
a9b83b0044e483586590c9b7c6bf8a77236b67e7
|
6bbaa0ce77b2726f6af782af049d73720820f761
|
refs/heads/master
| 2023-04-23T10:43:27.349785
| 2021-05-17T13:43:53
| 2021-05-17T13:43:53
| 311,239,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
l=['i','pa','te','ni','niti','a','ali','nego','no','ili']
s=input().split()
for i in range(len(s)):
if i==0 or s[i] not in l:
print(s[i][0].upper(),end='')
|
[
"hoyasmh@gmail.com"
] |
hoyasmh@gmail.com
|
b6e50f0069a0ad20140650a86266d623d882464d
|
167eb71c690e43e06b943a04a031f9e662ac7521
|
/acq4/devices/Scanner/scan_program/step.py
|
78e088f79c49f0e555d3506b82810b3513d8e648
|
[
"MIT"
] |
permissive
|
histed/acq4
|
8e0a5dedc74c2ea063477e4b0027fbade3a72e61
|
ea0242d49245b81ab218d8d3e0187138b136ded5
|
refs/heads/develop
| 2021-01-19T23:46:54.999081
| 2017-03-24T22:48:52
| 2017-03-24T22:48:52
| 89,023,143
| 0
| 6
| null | 2017-04-21T20:58:33
| 2017-04-21T20:58:32
| null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
import numpy as np
import acq4.pyqtgraph as pg
from .component import ScanProgramComponent
#class StepScanComponent(ScanProgramComponent):
#"""
#Steps the laser once to a specific position.
#"""
#name = 'step'
#def generateVoltageArray(self, arr, startInd, stopInd):
#pos = cmd['pos']
#if pos == None:
#pos = self.dev.getOffVoltage()
#else:
#pos = self.mapToScanner(pos[0], pos[1])
#lastPos = pos
#arr[0, startInd] = pos[0]
#arr[1, startInd] = pos[1]
#return startInd
|
[
"luke.campagnola@gmail.com"
] |
luke.campagnola@gmail.com
|
eda7057bb4a064142794dc4168db8c4c3d7075ef
|
18825807a4cf373f00419e46ac70566d17115e9e
|
/problems/algo/convert_a_number_to_hexadecimal.py
|
b8d4588cc5bd2a0833c3299792178830a61a9c56
|
[] |
no_license
|
StefanRankovic/leetcode
|
51154d7297b4674c62e481c6c13016097207b4d0
|
bbed81b50acaef025186648c61110dbf65e5f6cb
|
refs/heads/master
| 2023-02-20T06:16:02.913457
| 2021-01-24T09:42:50
| 2021-01-24T09:42:50
| 266,200,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
class Solution:
def toHex(self, num: int) -> str:
if num < 0:
num += 2 ** 32
elif num == 0:
return '0'
mapping = { 0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 10:'a', 11:'b', 12:'c', 13:'d', 14:'e', 15:'f' }
res = []
while num > 0:
res.append(mapping[num & 0xF])
num >>= 4
return ''.join(res[::-1])
|
[
"stefan.rankovic.89@gmail.com"
] |
stefan.rankovic.89@gmail.com
|
599d2ccb94c5166eb0e397169f084f8ac3d1816c
|
0f234d1029e89309994331a68a999e2359bb08b0
|
/tslearn/neural_network/__init__.py
|
1eed30502447a5b9d10b18cf9d50d8c3bec683cf
|
[
"BSD-2-Clause"
] |
permissive
|
tslearn-team/tslearn
|
8282698361bfb42183466eaaa4c6da1d107e9513
|
e9b3ecca5f56bc8ffab5a0106e2d41f17ae89109
|
refs/heads/main
| 2023-09-01T02:03:19.814166
| 2023-08-21T13:22:42
| 2023-08-21T13:22:42
| 90,264,407
| 1,687
| 198
|
BSD-2-Clause
| 2023-09-13T20:39:47
| 2017-05-04T13:08:13
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
The :mod:`tslearn.neural_network` module contains multi-layer perceptron
models for time series classification and regression.
These are straight-forward adaptations of scikit-learn models.
"""
from .neural_network import TimeSeriesMLPClassifier, TimeSeriesMLPRegressor
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
__all__ = [
"TimeSeriesMLPClassifier", "TimeSeriesMLPRegressor"
]
|
[
"romain.tavenard@univ-rennes2.fr"
] |
romain.tavenard@univ-rennes2.fr
|
e3b7eb1b3827edcba78060e3051438f29d691b58
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02404/s960425293.py
|
0de2e292bdd90651c2ea9948ed854c3590feb3e8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
while 1:
(H,W) = [int(i) for i in input().split()]
if H==W==0:
break
print('#'*W)
for i in range(H-2):
print('#'+'.'*(W-2)+'#')
print('#'*W)
print('')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
849c162af41131a106cdda454b6af428f8cac483
|
11aac6edab131293027add959b697127bf3042a4
|
/isToeplitzMatrix.py
|
39560dbb5a486627bf54e486fb6cac79f90fe046
|
[] |
no_license
|
jdanray/leetcode
|
a76b3436002b31865967b757b73c85992636383b
|
fd736af3e79899b86dac89d4d925d5bd985944ad
|
refs/heads/master
| 2023-08-15T01:20:05.110565
| 2023-08-14T00:25:58
| 2023-08-14T00:25:58
| 148,686,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
# https://leetcode.com/problems/toeplitz-matrix/description/
class Solution:
def isToeplitzMatrix(self, matrix):
for i in range(len(matrix) - 1):
for j in range(len(matrix[i]) - 1):
if matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
|
[
"jdanray@users.noreply.github.com"
] |
jdanray@users.noreply.github.com
|
da4f9f021fd019d5ef18dbd2e821d201de06d002
|
544cfadc742536618168fc80a5bd81a35a5f2c99
|
/tools/test/connectivity/acts/framework/acts/controllers/chameleon_controller.py
|
b9965cf69fdfc8d09502e15d02627a0fdb1751c4
|
[] |
no_license
|
ZYHGOD-1/Aosp11
|
0400619993b559bf4380db2da0addfa9cccd698d
|
78a61ca023cbf1a0cecfef8b97df2b274ac3a988
|
refs/heads/main
| 2023-04-21T20:13:54.629813
| 2021-05-22T05:28:21
| 2021-05-22T05:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,175
|
py
|
#!/usr/bin/env python3
#
# Copyright 2017 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import xmlrpc.client
from subprocess import call
from acts import signals
MOBLY_CONTROLLER_CONFIG_NAME = "ChameleonDevice"
ACTS_CONTROLLER_REFERENCE_NAME = "chameleon_devices"
CHAMELEON_DEVICE_EMPTY_CONFIG_MSG = "Configuration is empty, abort!"
CHAMELEON_DEVICE_NOT_LIST_CONFIG_MSG = "Configuration should be a list, abort!"
audio_bus_endpoints = {
'CROS_HEADPHONE': 'Cros device headphone',
'CROS_EXTERNAL_MICROPHONE': 'Cros device external microphone',
'PERIPHERAL_MICROPHONE': 'Peripheral microphone',
'PERIPHERAL_SPEAKER': 'Peripheral speaker',
'FPGA_LINEOUT': 'Chameleon FPGA line-out',
'FPGA_LINEIN': 'Chameleon FPGA line-in',
'BLUETOOTH_OUTPUT': 'Bluetooth module output',
'BLUETOOTH_INPUT': 'Bluetooth module input'
}
class ChameleonDeviceError(signals.ControllerError):
pass
def create(configs):
if not configs:
raise ChameleonDeviceError(CHAMELEON_DEVICE_EMPTY_CONFIG_MSG)
elif not isinstance(configs, list):
raise ChameleonDeviceError(CHAMELEON_DEVICE_NOT_LIST_CONFIG_MSG)
elif isinstance(configs[0], str):
# Configs is a list of IP addresses
chameleons = get_instances(configs)
return chameleons
def destroy(chameleons):
for chameleon in chameleons:
del chameleon
def get_info(chameleons):
"""Get information on a list of ChameleonDevice objects.
Args:
ads: A list of ChameleonDevice objects.
Returns:
A list of dict, each representing info for ChameleonDevice objects.
"""
device_info = []
for chameleon in chameleons:
info = {"address": chameleon.address, "port": chameleon.port}
device_info.append(info)
return device_info
def get_instances(ips):
"""Create ChameleonDevice instances from a list of IPs.
Args:
ips: A list of Chameleon IPs.
Returns:
A list of ChameleonDevice objects.
"""
return [ChameleonDevice(ip) for ip in ips]
class ChameleonDevice:
"""Class representing a Chameleon device.
Each object of this class represents one Chameleon device in ACTS.
Attributes:
address: The full address to contact the Chameleon device at
client: The ServiceProxy of the XMLRPC client.
log: A logger object.
port: The TCP port number of the Chameleon device.
"""
def __init__(self, ip="", port=9992):
self.ip = ip
self.log = logging.getLogger()
self.port = port
self.address = "http://{}:{}".format(ip, self.port)
try:
self.client = xmlrpc.client.ServerProxy(
self.address, allow_none=True, verbose=False)
except ConnectionRefusedError as err:
self.log.exception(
"Failed to connect to Chameleon Device at: {}".format(
self.address))
self.client.Reset()
def pull_file(self, chameleon_location, destination):
"""Pulls a file from the Chameleon device. Usually the raw audio file.
Args:
chameleon_location: The path to the file on the Chameleon device
destination: The destination to where to pull it locally.
"""
# TODO: (tturney) implement
self.log.error("Definition not yet implemented")
def start_capturing_audio(self, port_id, has_file=True):
"""Starts capturing audio.
Args:
port_id: The ID of the audio input port.
has_file: True for saving audio data to file. False otherwise.
"""
self.client.StartCapturingAudio(port_id, has_file)
def stop_capturing_audio(self, port_id):
"""Stops capturing audio.
Args:
port_id: The ID of the audio input port.
Returns:
List contain the location of the recorded audio and a dictionary
of values relating to the raw audio including: file_type, channel,
sample_format, and rate.
"""
return self.client.StopCapturingAudio(port_id)
def audio_board_connect(self, bus_number, endpoint):
"""Connects an endpoint to an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
endpoint: An endpoint defined in audio_bus_endpoints.
"""
self.client.AudioBoardConnect(bus_number, endpoint)
def audio_board_disconnect(self, bus_number, endpoint):
"""Connects an endpoint to an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
endpoint: An endpoint defined in audio_bus_endpoints.
"""
self.client.AudioBoardDisconnect(bus_number, endpoint)
def audio_board_disable_bluetooth(self):
"""Disables Bluetooth module on audio board."""
self.client.AudioBoardDisableBluetooth()
def audio_board_clear_routes(self, bus_number):
"""Clears routes on an audio bus.
Args:
bus_number: 1 or 2 for audio bus 1 or bus 2.
"""
self.client.AudioBoardClearRoutes(bus_number)
def scp(self, source, destination):
"""Copies files from the Chameleon device to the host machine.
Args:
source: The file path on the Chameleon board.
dest: The file path on the host machine.
"""
cmd = "scp root@{}:/{} {}".format(self.ip, source, destination)
try:
call(cmd.split(" "))
except FileNotFoundError as err:
self.log.exception("File not found {}".format(source))
|
[
"rick_tan@qq.com"
] |
rick_tan@qq.com
|
20b035cb4df2c7e31ca09b0df3a8484d28292617
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5020/180005020.py
|
3ebd718b508dadfdbe85804be5bfc4ff1cde6abc
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,675
|
py
|
from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AN',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'RDR', MIN: 0, MAX: 1},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'SAC', MIN: 0, MAX: 10},
{ID: 'G38', MIN: 0, MAX: 1},
{ID: 'PKG', MIN: 0, MAX: 5},
{ID: 'TD1', MIN: 0, MAX: 10},
{ID: 'TD5', MIN: 0, MAX: 10},
{ID: 'NTE', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 200, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 5},
]},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'BLI', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 20},
{ID: 'PID', MIN: 0, MAX: 5},
{ID: 'RDR', MIN: 0, MAX: 1},
{ID: 'SAC', MIN: 0, MAX: 10},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'CRC', MIN: 0, MAX: 99999},
{ID: 'NTE', MIN: 0, MAX: 99999},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 15},
{ID: 'DD', MIN: 0, MAX: 100},
{ID: 'GF', MIN: 0, MAX: 1},
{ID: 'TD5', MIN: 0, MAX: 5},
{ID: 'SDQ', MIN: 0, MAX: 100},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'N1', MIN: 0, MAX: 200, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 5},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
a452d8c8be8214679e4821b0ad93f0e586261b5e
|
c9fe9f52d70ad5308d19664e82081233f1bc6d9a
|
/app/views.py
|
4f04a92d6305eafbcd88315cf2b9d14c4a415af4
|
[] |
no_license
|
arifbd2221/ResumeParser
|
9f48f97528588cde6fa7b5507d8ac3364a6c016b
|
4508465e21e9a362018c84ac0370dcd35df98a7f
|
refs/heads/master
| 2022-12-10T21:06:50.429742
| 2020-03-18T18:21:07
| 2020-03-18T18:21:07
| 248,309,886
| 0
| 0
| null | 2022-12-08T03:50:02
| 2020-03-18T18:19:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
from django.shortcuts import render
from .models import Resume, Candidate
from django.core.files.storage import default_storage
import os
from pyresparser import ResumeParser
def home(request):
top_candidates = dict()
candidates = Candidate.objects.all()
candidates = list(candidates)
candidates.sort(key=lambda c: c.experience, reverse=True)
return render(request, "app/home.html", {'candidates': candidates})
def handleResume(request):
if request.method == 'POST':
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print('post')
resume = request.FILES.get('resume', None)
print(resume)
if resume:
saving=Resume(resume=resume)
saving.save()
media_path = os.path.join(BASE_DIR,'resumes')
lpart = str(saving.resume).split('/')
full_path=os.path.join(media_path,lpart[1])
data = ResumeParser(str(full_path)).get_extracted_data()
candidate = Candidate(name=data.get('name'),email=data.get('email'),
phone=data.get('mobile_number'),experience=float(data.get('total_experience')),
total_skills=len(data.get('skills')), designation=data.get('designation'),
company= "N/A" if data.get('company_names') is None else data.get('company_names'))
candidate.save()
return render(request, "app/home.html", {})
return render(request, "app/cvform.html", {})
|
[
"mohidulhoque216@gmail.com"
] |
mohidulhoque216@gmail.com
|
3fc99cb24ddecebaf07b6bdc249560f5cc586b4c
|
b9e99a828952ffeab9767e625c0061cb3ea5b670
|
/Python编程从入门到实践/learning_log/learning_log_2.1_让用户能够输入数据/learning_log/urls.py
|
1eb20c51f860bd491ba4e3b501449aa4cf335e2c
|
[] |
no_license
|
ZGA101421/Python3_Project
|
95d95e23858ef92f6825f018605089c105303ad3
|
fa30f876fd13890743bc81d1521534c340575132
|
refs/heads/master
| 2022-04-03T07:03:46.369710
| 2019-12-30T15:22:21
| 2019-12-30T15:22:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
"""learning_log URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),# 该模块定义了可在管理网站中请求的所有URL
path('', include('learning_logs.urls', namespace='learning_logs')),
# 代码包含实参namespace , 让我们能够将learning_logs 的URL同项目中的其他URL区分开来
]
'''
Django版本更新,书上的代码需做相应修改
书中源代码:
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('learning_logs.urls', namespace='learning_logs')),
]
应改为:
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('learning_logs.urls', namespace='learning_logs')),
]
'''
|
[
"986740304@qq.com"
] |
986740304@qq.com
|
7b5282cb4880ed7331be6c50b7b9bde16fe209cb
|
b9b06d86d43e738b62ab9289fc13aae4c2b2670b
|
/nsd1807/devops/day04/smail2.py
|
51a4e856461309c72f18ac3c1d64e75aafe8f38f
|
[] |
no_license
|
MrZhangzhg/nsd_2018
|
31a7a8d54e2cb3ff4f4eb5c736fbd76601718356
|
458a1fef40c5e15ba7689fcb3a00baf893ac0218
|
refs/heads/master
| 2020-04-08T19:08:48.237646
| 2019-09-08T04:31:07
| 2019-09-08T04:31:07
| 159,642,127
| 5
| 7
| null | 2019-01-04T05:33:40
| 2018-11-29T09:37:27
|
Python
|
UTF-8
|
Python
| false
| false
| 904
|
py
|
from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP
import getpass
def send_mail(text, subject, sender, receivers, server, user, passwd, port=25):
message = MIMEText(text, 'plain', 'utf8')
message['From'] = Header(sender, 'utf8')
message['To'] = Header(receivers[0], 'utf8')
message['Subject'] = Header(subject, 'utf8')
smtp = SMTP()
smtp.connect(server, port)
# smtp.starttls() # 如果使用证书,打开此注释
smtp.login(user, passwd)
smtp.sendmail(sender, receivers, message.as_bytes())
if __name__ == '__main__':
text = 'python邮件测试\r\n'
subject = 'smtp test'
sender = 'zhangzhigang79@126.com'
passwd = getpass.getpass()
server = 'mail.tedu.cn'
receivers = ['zhangzhigang79@126.com', 'zhangzhigang79@qq.com']
send_mail(text, subject, sender, receivers, server, sender, passwd)
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
261e0eb698524a65c64f509f16fc005825678a85
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5709773144064000_1/Python/DayBit/probB.py
|
89a1ba67f2f5762b9bdf723758fef3336e9985fe
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
'''
Created on 12/04/2014
@author: david
'''
fIn=open("B-large.in")
T=int(fIn.readline())
P=[]
for i in range(T):
c,f,x = [float(x) for x in fIn.readline().strip().split()]
P.append((c,f,x))
fRes = open("res.txt", "w")
case = 0
for c,f,x in P:
case += 1
cps=2.0
timetobuy = c/cps
bestTime = x/cps
acc = 0
while True:
cps+=f
acc += timetobuy
if bestTime < acc + x/cps:
print("Case #{0}: {1:0.7f}".format(case,bestTime))
fRes.write("Case #{0}: {1:0.7f}\n".format(case,bestTime))
break
timetobuy = c/cps
bestTime = acc + x/cps
fRes.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
27f71acc6181f6b40de7037f107d718970c210e8
|
24532cc3eb0e489415a08457b454c454abf66525
|
/object-maker/copy-dataset-files.py
|
295761788c3e67586d04717102ac11cacb0d8a08
|
[] |
no_license
|
glygener/glygen-backend-integration
|
7a4c8e45dd9af6b0424946fcc7e11e9aef39d9a6
|
526775496f860680df2dbfdfc42b3ba35c69cfea
|
refs/heads/master
| 2022-09-22T03:56:35.116497
| 2022-09-09T16:56:59
| 2022-09-09T16:56:59
| 151,144,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,122
|
py
|
#!/usr/bin/python
import os,sys
import string
import csv
import json
import glob
import requests
import subprocess
import pymongo
from optparse import OptionParser
import libgly
from Bio import SeqIO
__version__="1.0"
__status__ = "Dev"
def get_master_file_list():
file_name_list = []
ds_obj_list = json.loads(open(wrk_dir + "/generated/misc/dataset-masterlist.json", "r").read())
for obj in ds_obj_list:
ds_name = obj["name"]
ds_format = obj["format"]
mol = obj["categories"]["molecule"]
if ds_name in ["homolog_alignments", "isoform_alignments"]:
continue
if obj["categories"]["species"] == []:
file_name_list.append("%s_%s.%s" % (mol, ds_name, ds_format))
else:
sp_list_one = sorted(obj["categories"]["species"])
for species in sp_list_one:
if species not in obj["integration_status"]["excludelist"]:
file_name_list.append("%s_%s_%s.%s" % (species, mol, ds_name, ds_format))
return file_name_list
def main():
global wrk_dir
global field_dict
global io_dict
generated_dir = "/data/projects/glygen/generated/"
wrk_dir = "/home/rykahsay/glygen-backend-integration/object-maker"
reviewed_dir = wrk_dir + "/reviewed/"
unreviewed_dir = wrk_dir + "/unreviewed/"
file_list = get_master_file_list()
path_list = []
missing_files = []
for out_file_name in file_list:
path = unreviewed_dir + out_file_name
if os.path.isfile(path) == False:
missing_files.append(path)
else:
path_list.append(path)
if missing_files != []:
for path in missing_files:
print (path, "is missing")
else:
cmd = "rm -f " + reviewed_dir + "/*"
x, y = subprocess.getstatusoutput(cmd)
for path in path_list:
cmd = "cp " + path + " " + reviewed_dir
x, y = subprocess.getstatusoutput(cmd)
cmd = "chmod -R 755 " + reviewed_dir
x, y = subprocess.getstatusoutput(cmd)
if __name__ == '__main__':
main()
|
[
"rykahsay@gwu.edu"
] |
rykahsay@gwu.edu
|
ec95968a7b2df86a07137f7e6e672b71302ae50a
|
7834e55df20cd3b0fb629a137dd2671cf53f484f
|
/tests/test_encoding.py
|
8fb4b5e646be000a21d9ac4265648f9e54c2c5af
|
[
"MIT"
] |
permissive
|
mapbox/mapbox-sdk-py
|
ca23c0f5cbbadd654b53683ff3f8918f504e0ff6
|
0329ccb17e7d3f4123da1534417bd21aa31bc2eb
|
refs/heads/master
| 2023-06-05T11:46:38.434644
| 2022-08-01T22:31:37
| 2022-08-01T22:31:37
| 39,404,445
| 335
| 150
|
MIT
| 2020-02-04T16:25:14
| 2015-07-20T19:34:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,221
|
py
|
import pytest
import copy
import json
from mapbox.encoding import (read_points,
encode_waypoints,
encode_polyline,
encode_coordinates_json)
gj_point_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-87.33787536621092,
36.539156961321574]}}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-88.2476806640625,
36.92217534275667]}}]
gj_multipoint_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "MultiPoint",
"coordinates": [
[-87.33787536621092,
36.539156961321574],
[-88.2476806640625,
36.92217534275667]]}}]
gj_line_features = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "LineString",
"coordinates": [
[-87.33787536621092,
36.539156961321574],
[-88.2476806640625,
36.92217534275667]]}}]
class GeoThing(object):
__geo_interface__ = None
def __init__(self, thing):
self.__geo_interface__ = thing
def test_read_geojson_features():
expected = [(-87.33787536621092, 36.539156961321574),
(-88.2476806640625, 36.92217534275667)]
assert expected == list(read_points(gj_point_features))
assert expected == list(read_points(gj_multipoint_features))
assert expected == list(read_points(gj_line_features))
def test_geo_interface():
expected = [(-87.33787536621092, 36.539156961321574),
(-88.2476806640625, 36.92217534275667)]
features = [GeoThing(gj_point_features[0]),
GeoThing(gj_point_features[1])]
assert expected == list(read_points(features))
geoms = [GeoThing(gj_point_features[0]['geometry']),
GeoThing(gj_point_features[1]['geometry'])]
assert expected == list(read_points(geoms))
def test_encode_waypoints():
expected = "-87.337875,36.539157;-88.247681,36.922175"
assert expected == encode_waypoints(gj_point_features)
assert expected == encode_waypoints(gj_multipoint_features)
assert expected == encode_waypoints(gj_line_features)
def test_encode_limits():
expected = "-87.337875,36.539157;-88.247681,36.922175"
assert expected == encode_waypoints(gj_point_features)
with pytest.raises(ValueError) as exc:
encode_waypoints(gj_point_features, min_limit=3)
assert 'at least' in str(exc.value)
with pytest.raises(ValueError) as exc:
encode_waypoints(gj_point_features, max_limit=1)
assert 'at most' in str(exc.value)
def test_unsupported_geometry():
unsupported = copy.deepcopy(gj_point_features)
unsupported[0]['geometry']['type'] = "MultiPolygonnnnnn"
with pytest.raises(ValueError) as exc:
list(read_points(unsupported))
assert 'Unsupported geometry' in str(exc.value)
def test_unknown_object():
unknown = ["foo", "bar"]
with pytest.raises(ValueError) as exc:
list(read_points(unknown))
assert 'Unknown object' in str(exc.value)
def test_encode_polyline():
expected = "wp_~EvdatO{xiAfupD"
assert expected == encode_polyline(gj_point_features)
assert expected == encode_polyline(gj_multipoint_features)
assert expected == encode_polyline(gj_line_features)
def test_encode_coordinates_json():
expected = {
'coordinates': [
[-87.33787536621092, 36.539156961321574],
[-88.2476806640625, 36.92217534275667]]}
assert expected == json.loads(encode_coordinates_json(gj_point_features))
assert expected == json.loads(encode_coordinates_json(gj_multipoint_features))
assert expected == json.loads(encode_coordinates_json(gj_line_features))
def test_encode_waypoints_rounding():
expected = "1.0,0.0"
int_coord_features = [{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1, 0]
},
"properties": {}}]
assert expected == encode_waypoints(int_coord_features)
|
[
"perrygeo@gmail.com"
] |
perrygeo@gmail.com
|
94a7c9bb205a5af9de7c09c8beb9796010b2cc71
|
b059c2cf1e19932abb179ca3de74ced2759f6754
|
/S20/day04/03作业.py
|
89601d4d3d9c179bb55fbfb29fee50ae4e4ba7d1
|
[] |
no_license
|
Lwk1071373366/zdh
|
a16e9cad478a64c36227419d324454dfb9c43fd9
|
d41032b0edd7d96e147573a26d0e70f3d209dd84
|
refs/heads/master
| 2020-06-18T02:11:22.740239
| 2019-07-10T08:55:14
| 2019-07-10T08:55:14
| 196,130,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,864
|
py
|
# li = ["alex", "WuSir", "ritian", "barry", "wenzhou"].
# a.计算列表的⻓度并输出
# b. 列表中追加元素"seven",并输出添加后的列表
# c. 请在列表的第1个位置插⼊元素"Tony",并输出添加后的列表
# d. 请修改列表第2个位置的元素为"Kelly",并输出修改后的列表
# e. 请将列表l2=[1,"a",3,4,"heart"]的每⼀个元素添加到列表li中,⼀⾏代码实现,不
# 允许循环添加。
# f. 请将字符串s = "qwert"的每⼀个元素添加到列表li中,⼀⾏代码实现,不允许循
# 环添加。
# g. 请删除列表中的元素"ritian",并输出添加后的列表
# h. 请删除列表中的第2个元素,并输出删除的元素和删除元素后的列表
# i. 请删除列表中的第2⾄4个元素,并输出删除元素后的列表
# j. 请将列表所有得元素反转,并输出反转后的列表
# k. 请计算出"alex"元素在列表li中出现的次数,并输出该次数
# li = ["alex", "WuSir", "ritian", "barry", "wenzhou"]
# print(len(li))
# li.append('seven')
# print(li)
# li.insert(0,'Tony')
# print(li)
# li.insert(1,'Kelly')
# print(li)
# l2=[1,"a",3,4,"heart"]
# li.extend(l2)
# print(li)
# s = "qwert"
# li.extend(s)
# print(li)
# del li[2]
# print(li)
# li.pop(1)
# print(li.pop(1))
# print(li)
# del li[1:4]
# print(li)
# li.reverse()
# print(li)
# print(li.count('alex'))
# 写代码,有如下列表,利⽤切⽚实现每⼀个功能
# li = [1, 3, 2, "a", 4, "b", 5,"c"]
# a. 通过对li列表的切⽚形成新的列表l1,l1 = [1,3,2]
# b. 通过对li列表的切⽚形成新的列表l2,l2 = ["a",4,"b"]
# c. 通过对li列表的切⽚形成新的列表l3,l3 = ["1,2,4,5]
# d. 通过对li列表的切⽚形成新的列表l4,l4 = [3,"a","b"]
# e. 通过对li列表的切⽚形成新的列表l5,l5 = ["c"]
# f. 通过对li列表的切⽚形成新的列表l6,l6 = ["b","a",3]
# li = [1, 3, 2, "a", 4, "b", 5,"c"]
# # del li[3::1]
# # print(li)
# # del li[1::2]
# # print(li)
# # del li[:6:2]
# # print(li)
# # del li[:7:1]
# # print(li)
# print(li[-3::-2])
# lis = [2, 3, "k", ["qwe", 20, ["k1", ["tt", 3, "1"]], 89], "ab", "adv"]
# a. 将列表lis中的"tt"变成⼤写(⽤两种⽅式)。
# b. 将列表中的数字3变成字符串"100"(⽤两种⽅式)。
# c. 将列表中的字符串"1"变成数字101(⽤两种⽅式)。
# lis = [2, 3, "k", ["qwe", 20, ["k1", ["tt", 3, "1"]], 89], "ab", "adv"]
# lis[3][2][1][0]=lis[3][2][1][0].upper()
# print(lis)
# lis[1]=100
# lis[3][2][1][1]=100 还有一种
# print(lis)
# lis[3][2][1][2]=101
# print(lis)
# li = ["alex", "wusir", "taibai"]
# 利⽤下划线将列表的每⼀个元素拼接成字符串"alex_wusir_taibai"
# li = ["alex", "wusir", "taibai"]
# l1='_'.join(li)
# print(l1)
#
# 利⽤for循环和range打印出下⾯列表的索引。
# li = ["alex", "WuSir", "ritian", "barry", "wenzhou"]
# for i in range(len(li)):
# print(i)
# for i in range(len(li)):
# print(i)
# for i in range(len(li)):
# print(i)
# 利⽤for循环和range找出100以内所有的偶数并将这些偶数插⼊到⼀个新列表中
# list=[]
# for i in range(100):
# if i % 2 == 0:
# list.append(i)
# print(list)
# for i in range (2,100,2):
# list.append(i)
# print(list)
# 利⽤for循环和range从100~1,倒序打印
# for i in range(100,1,-1):
# print(i)
#-----------------------------------------------------------------------
# 利⽤for循环和range从100~10,倒序将所有的偶数添加到⼀个新列表中,然后对列
# # 表的元素进⾏筛选,将能被4整除的数留下来。
# list = [] #先定义一个空列表 用for循环 遍历 100~10的偶数
# # 既然是偶数 可以用加步长的方式解决这个问题
# #再用i 取4的倍数 将满足条件的 增加到列表中
# # list1=[]
# for i in range(100,10,-2):
# if i % 4 == 0:
# list.append(i)
# print(list)
# # --------------------------------------------------------------
# 利⽤for循环和range,将1-30的数字⼀次添加到⼀个列表中,并循环这个列表,将
# 能被3整除的数改成*
# list = []
# list1 = []
# for i in range(1,31,1):
# list.append(i)
# if i % 3 != 0:
# list1.append(i)
# else:i = '*'
# list1.append(i)
# print(list1)
#------------------------------------------------------------
# li=[]
# index=0 先定义一个空列表 先定义一个空列表,及index
# 在30以内遍历,遍历到的数据
# 添加到空列表中, 若 遍历到
# 的数字取3等于0.则视为3的倍
# 数,将index替换为星号,并
# 每次自加一。
# for i in range(1,31,1):
# li.append(i)
# for i in li:
# if i % 3==0:
# li[index]='*'
# index=index+1
# print(li)
# ----------------------------------------------------------
# lst = []
# for x in range(1,31):
# lst.append(x)
#
# index = 0
# while index < len(lst): # while循环做法
# if lst[index] % 3 == 0:
# lst[index] = '*'
# index += 1
#
# print(lst)
# -----------------------------------------------------------
# 查找列表li中的元素,移除每个元素的空格,并找出以"A"或者"a"开头,并以"c"结尾
# 的所有元素,并添加到⼀个新列表中,最后循环打印这个新列表。
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc"] 先用 for...in 取出元素
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc",]
# # lst = []
# # for x in li:
# # x = x.strip()
# # if (x.startswith('A') or x.startswith('a')) and x.endswith('c'):
# # lst.append(x)
# # for x in lst:
# # print(x,end=' ')
# #
# # lst=[]
# # for i in li:
# # i=i.strip()
# # if (i.startswith('A')or i.startswith('a')) and i.endswith('c'):
# # lst.append(i)
# # for i in lst:
# # print(i)
#
# lst = []
# for i in li:
# i=i.strip()
# if (i.startswith('A') or i.startswith('a') ) and i.endswith('c'):
# lst.append(i)
# if i in lst:
# print(i)
#先定义个空列表 给变量 lst ; 用for循环,若 i 在 列表li里:题中要求元素去空格,所以 将去掉空格的i 重新赋值给 i
#此时 得到的 i 是去掉空格的;用if 判断 若 以A或a 并以c开头的元素 添加的一个新列表中 ;若 遍历到的 i 在这个列表中,输出即可。
# list = [] 创建一个新列表
# for i in li : 取出的元素赋值一个变量‘J’并去空格
# j = i.strip() 判断条件:因为是以C为结尾的所有元素 所有是True
# if j.endswith('c')and j.startswith('A')or j.startswith('a') : 所以‘c’ 在前 ....
# # print(j)
# list.append(j)
# print(list)
# li = ["TaiBai ", "alexC", "AbC ", "egon", " riTiAn", "WuSir", " aqc"]
# list=[]
# for i in li:
# j = i.strip()
# if j.endswith('c')and j.startswith('A')or j.startswith('a'):
# list.append(j)
# print(list)
# list=[]
# for i in li:
# j = i.strip()
# if j.endswith('c')and j.startswith('A')or j.startswith('a'):
# list.append(j)
# print(j)
# 开发敏感词语过滤程序,提示⽤户输⼊评论内容,如果⽤户输⼊的内容中包含特殊的
# 字符:
# 敏感词列表 li = ["苍⽼师", "东京热", "武藤兰", "波多野结⾐"]
# 则将⽤户输⼊的内容中的敏感词汇替换成等⻓度的*(苍⽼师就替换***),并添加到⼀
# 个列表中;如果⽤户输⼊的内容没有敏感词汇,则直接添加到上述的列表中。
# li = ["苍老师", "东京热", "武藤兰", "波多野结⾐"]
# comment_list=[]
# comment=input('请输入你的评论:')
# for name in li:
# if name in comment:
# comment=comment.replace(name,len(name)*'*')
# comment_list.append(comment)
# print(comment_list)
# li = ["苍老师", "东京热", "武藤兰", "波多野结⾐"]
# comment_list=[]
# comment=input('请输入你的评论:')
# for name in li:
# if name in comment:
# comment=comment.replace(name,len(name)*'*')
# comment_list.append(comment)
# print(comment_list)
#
#
# li= ["苍老师", "东京热", "武藤兰", "波多野结衣"]
# l1=[]
# comment = input('请输入评论:')
# for i in li:
# if i in comment:
# comment=comment.replace(i,len(i))
# l1.append(comment)
# print(li)
# 利⽤下划线将列表的每⼀个元素拼接成字符串"alex_wusir_taibai"
# li = ["alex", "wusir", "taibai"]
# l1='_'.join(li)
# print(l1)
#
|
[
"1071373366@qq.com"
] |
1071373366@qq.com
|
9d75f4f664eb0f368c443272ef6b096804e26e20
|
18aee5d93a63eab684fe69e3aa0abd1372dd5d08
|
/test/legacy_test/test_numel_op.py
|
5c8c477877c3261829732e495a4f3679b18d2316
|
[
"Apache-2.0"
] |
permissive
|
Shixiaowei02/Paddle
|
8d049f4f29e281de2fb1ffcd143997c88078eadb
|
3d4d995f26c48f7792b325806ec3d110fc59f6fc
|
refs/heads/develop
| 2023-06-26T06:25:48.074273
| 2023-06-14T06:40:21
| 2023-06-14T06:40:21
| 174,320,213
| 2
| 1
|
Apache-2.0
| 2022-12-28T05:14:30
| 2019-03-07T10:09:34
|
C++
|
UTF-8
|
Python
| false
| false
| 4,760
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle import fluid
from paddle.fluid import core
class TestNumelOp(OpTest):
def setUp(self):
self.op_type = "size"
self.python_api = paddle.numel
self.init()
x = np.random.random(self.shape).astype(self.dtype)
self.inputs = {
'Input': x,
}
self.outputs = {'Out': np.array(np.size(x))}
def test_check_output(self):
self.check_output()
def init(self):
self.shape = (6, 56, 8, 55)
self.dtype = np.float64
class TestNumelOp1(TestNumelOp):
def init(self):
self.shape = (11, 66)
self.dtype = np.float64
class TestNumelOp2(TestNumelOp):
def init(self):
self.shape = (0,)
self.dtype = np.float64
class TestNumelOpFP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (6, 56, 8, 55)
class TestNumelOp1FP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (11, 66)
class TestNumelOp2FP16(TestNumelOp):
def init(self):
self.dtype = np.float16
self.shape = (0,)
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA and do not support bfloat16",
)
class TestNumelOpBF16(OpTest):
def setUp(self):
self.op_type = "size"
self.python_api = paddle.numel
self.dtype = np.uint16
self.init()
x = np.random.random(self.shape).astype(np.float32)
self.inputs = {'Input': convert_float_to_uint16(x)}
self.outputs = {'Out': np.array(np.size(x))}
def test_check_output(self):
place = paddle.CUDAPlace(0)
self.check_output_with_place(place)
def init(self):
self.shape = (6, 56, 8, 55)
class TestNumelOp1BF16(TestNumelOpBF16):
def init(self):
self.shape = (11, 66)
class TestNumelAPI(unittest.TestCase):
def test_numel_static(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5]
x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2)
exe = paddle.static.Executor(place=paddle.CPUPlace())
res_1, res_2 = exe.run(
feed={
"x_1": input_1,
"x_2": input_2,
},
fetch_list=[out_1, out_2],
)
assert np.array_equal(
res_1, np.array(np.size(input_1)).astype("int64")
)
assert np.array_equal(
res_2, np.array(np.size(input_2)).astype("int64")
)
def test_numel_imperative(self):
paddle.disable_static(paddle.CPUPlace())
input_1 = np.random.random([2, 1, 4, 5]).astype("int32")
input_2 = np.random.random([1, 4, 5]).astype("int32")
x_1 = paddle.to_tensor(input_1)
x_2 = paddle.to_tensor(input_2)
out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2)
assert np.array_equal(out_1.numpy().item(0), np.size(input_1))
assert np.array_equal(out_2.numpy().item(0), np.size(input_2))
paddle.enable_static()
def test_error(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
def test_x_type():
shape = [1, 4, 5]
input_1 = np.random.random(shape).astype("int32")
out_1 = paddle.numel(input_1)
self.assertRaises(TypeError, test_x_type)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Shixiaowei02.noreply@github.com
|
96c96b1cbf1f744df9693030c13335e0783a3353
|
1af6958461af6257264ace2a6d13385b47104606
|
/pyscf/semiempirical/umindo3_grad.py
|
f9223d6afa0b11f546adc5c837174e722f4b0638
|
[
"Apache-2.0"
] |
permissive
|
tmash/pyscf
|
ac9a86c078170044b52be71e5d00fa5f680f55af
|
89c101c1c963e8247808635c61cd165bffab42d6
|
refs/heads/master
| 2020-12-04T04:41:23.456744
| 2020-01-02T18:05:16
| 2020-01-02T18:05:16
| 231,615,690
| 1
| 0
|
Apache-2.0
| 2020-01-03T15:33:33
| 2020-01-03T15:33:32
| null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
#!/usr/bin/env python
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf.grad import uhf as uhf_grad
from pyscf.data.elements import _symbol
from pyscf.semiempirical import mopac_param
from pyscf.semiempirical import mindo3
from pyscf.semiempirical import rmindo3_grad
class Gradients(uhf_grad.Gradients):
get_hcore = None
hcore_generator = rmindo3_grad.hcore_generator
def get_ovlp(self, mol=None):
nao = self.base._mindo_mol.nao
return numpy.zeros((3,nao,nao))
def get_jk(self, mol=None, dm=None, hermi=0):
if dm is None: dm = self.base.make_rdm1()
vj, vk = rmindo3_grad.get_jk(self.base._mindo_mol, dm)
return vj, vk
def grad_nuc(self, mol=None, atmlst=None):
mol = self.base._mindo_mol
return rmindo3_grad.grad_nuc(mol, atmlst)
def grad_elec(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
with lib.temporary_env(self, mol=self.base._mindo_mol):
return uhf_grad.grad_elec(self, mo_energy, mo_coeff, mo_occ,
atmlst)
Grad = Gradients
if __name__ == '__main__':
from pyscf.data.nist import HARTREE2EV
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.spin = 2
mol.verbose = 0
mol.build()
mfs = mindo3.UMINDO3(mol).set(conv_tol=1e-8).as_scanner()
mfs(mol)
print(mfs.e_tot - -336.25080977434175/HARTREE2EV)
mol1 = mol.copy()
mol1.set_geom_([['O' , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
mol2 = mol.copy()
mindo_mol1 = mindo3._make_mindo_mol(mol1)
mol2.set_geom_([['O' , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]])
mindo_mol2 = mindo3._make_mindo_mol(mol2)
g1 = mfs.nuc_grad_method().kernel()
e1 = mfs(mol1)
e2 = mfs(mol2)
print(abs((e1-e2)/0.0002 - g1[0,2]))
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
510ae73b72ae1dfb9680491782b111449bfd44ff
|
d7b9b490c954c7a9160b69f8ce2c907ef4681ecb
|
/sponsors/migrations/0006_auto_20201016_1517.py
|
ff9d137547f788af7cd2e185b1028bb98733f640
|
[
"Apache-2.0"
] |
permissive
|
python/pythondotorg
|
00db93a4b1789a4d438806d106d9cee3349ad78c
|
c4ee749942227ca75c8e670546afe67232d647b2
|
refs/heads/main
| 2023-08-28T20:04:24.735314
| 2023-08-03T19:12:29
| 2023-08-03T19:12:29
| 6,127,047
| 1,131
| 646
|
Apache-2.0
| 2023-08-24T15:57:04
| 2012-10-08T16:00:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
# Generated by Django 2.0.13 on 2020-10-16 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0005_auto_20201015_0908"),
]
operations = [
migrations.RenameModel(
old_name="SponsorshipLevel",
new_name="SponsorshipPackage",
),
migrations.RemoveField(
model_name="sponsorshipbenefit",
name="levels",
),
migrations.RemoveField(
model_name="sponsorshipbenefit",
name="minimum_level",
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="new",
field=models.BooleanField(
default=False,
help_text='If selected, display a "New This Year" badge along side the benefit.',
verbose_name="New Benefit",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="package_only",
field=models.BooleanField(
default=False,
help_text="If a benefit is only available via a sponsorship package, select this option.",
verbose_name="Package Only Benefit",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="packages",
field=models.ManyToManyField(
help_text="What sponsorship packages this benefit is included in.",
related_name="benefits",
to="sponsors.SponsorshipPackage",
verbose_name="Sponsorship Packages",
),
),
migrations.AddField(
model_name="sponsorshipbenefit",
name="soft_capacity",
field=models.BooleanField(
default=False,
help_text="If a benefit's capacity is flexible, select this option.",
verbose_name="Soft Capacity",
),
),
migrations.AlterField(
model_name="sponsorshipbenefit",
name="internal_value",
field=models.PositiveIntegerField(
blank=True,
help_text="Value used internally to calculate sponsorship value when applicants construct their own sponsorship packages.",
null=True,
verbose_name="Internal Value",
),
),
]
|
[
"noreply@github.com"
] |
python.noreply@github.com
|
0510146ef6fac9025dff91e4eeac1220c8281527
|
bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd
|
/neekanee/job_scrapers/plugins/com/link/scotiabank.py
|
cb1391598e9dac9c05f05ebfd3b07cae11ae06a6
|
[] |
no_license
|
thayton/neekanee
|
0890dd5e5cf5bf855d4867ae02de6554291dc349
|
f2b2a13e584469d982f7cc20b49a9b19fed8942d
|
refs/heads/master
| 2021-03-27T11:10:07.633264
| 2018-07-13T14:19:30
| 2018-07-13T14:19:30
| 11,584,212
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
import re, urlparse, mechanize
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'Scotiabank',
'hq': 'Toronto, Canada',
'home_page_url': 'http://www.scotiabank.com',
'jobs_page_url': 'http://jobs.scotiabank.com/careers/',
'empcnt': [10001]
}
class ScotiabankJobScraper(JobScraper):
def __init__(self):
super(ScotiabankJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
while True:
s = soupify(self.br.response().read())
x = {'class': 'jobTitle'}
for td in s.findAll('td', attrs=x):
tr = td.findParent('tr')
l = tr.find('td', attrs={'class': 'location'})
l = self.parse_location(l.text)
if not l:
continue
job = Job(company=self.company)
job.title = td.text
job.url = urlparse.urljoin(self.br.geturl(), td.a['href'])
job.location = l
jobs.append(job)
try:
self.br.follow_link(self.br.find_link(text='Next page'))
except mechanize.LinkNotFoundError:
break
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
x = {'class': 'job-details'}
d = s.find('div')
job.desc = get_all_text(d)
job.save()
def get_scraper():
return ScotiabankJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
|
[
"thayton@neekanee.com"
] |
thayton@neekanee.com
|
21f276b882cd9006b94371de31160164730f6994
|
99f851bc034bdedd61ff673b4ca1d294e9451d04
|
/iprPy/records/LAMMPS-potential.py
|
bbade6130a0a6bfafae083165ae1d8893d518f71
|
[] |
no_license
|
njisrawi/iprPy
|
c583ba92b2537ce449c3fb6a832a06036dc1918f
|
5ce6c14b1cc889069495a2f29db19d5d78e29ede
|
refs/heads/master
| 2021-01-20T09:00:24.709510
| 2017-01-25T20:28:54
| 2017-01-25T20:28:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
from DataModelDict import DataModelDict as DM
import atomman as am
import atomman.unitconvert as uc
import numpy as np
def schema():
dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir, 'record-LAMMPS-potential.xsd')
def todict(record):
model = DM(record)
pot = model['LAMMPS-potential']
params = {}
params['pot_key'] = pot['potential']['key']
params['pot_id'] = pot['potential']['id']
params['units'] = pot['units']
params['atom_style'] = pot['atom_style']
params['pair_style'] = pot['pair_style']['type']
params['elements'] = []
params['masses'] = []
params['symbols'] = []
params['charge'] = []
for atom in pot.iteraslist('atom'):
params['elements'].append(atom.get('element', np.nan))
params['masses'].append(atom.get('mass', np.nan))
params['symbols'].append(atom.get('symbol', np.nan))
params['charge'].append(atom.get('charge', np.nan))
return params
|
[
"lucas.hale@nist.gov"
] |
lucas.hale@nist.gov
|
eab0f37861eb12d0b3543cafdd7136150516b581
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2285/60598/260635.py
|
6ae600bba2a2f7ddba8115e36439a621e275c9fc
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
times = int(input())
for i in range(times):
length = int(input())
nums = list(map(int, input().split(" ")))
j = 0
finish = False
result = []
while j < length-1:
start = j
while j < length-1 and nums[j] < nums[j+1]:
j += 1
if start != j:
result.append("("+str(start) +" " +str(j)+")")
finish = True
j += 1
if finish:
for k in range(len(result)-1):
print(result[k], "", end="")
print(result[-1])
else:
print("没有利润")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d3f26c05d3402fa44b20bfa369d5f437432ac93a
|
ce4a7ef82cf2146647714c7887c581bc0971f83e
|
/account/migrations/0001_create_sites.py
|
ceafd6dc51e23aae38c15ae11ea569fd78f6ee07
|
[] |
no_license
|
fbenke/BeamRemit
|
d15d8467c17ca15a1afc10c6bc23d756e3b13f75
|
2b894f56e3b1711334115085b6cd9379bd5bf1aa
|
refs/heads/master
| 2021-01-10T12:12:16.289891
| 2014-12-05T11:36:45
| 2014-12-05T11:36:45
| 52,040,642
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
orm['sites.site'].objects.all().delete()
site = orm['sites.site'].objects.create(
id=0,
domain=settings.ENV_SITE_MAPPING[settings.ENV][settings.SITE_USER],
name='Beam'
)
site.save()
def backwards(self, orm):
orm['sites.site'].objects.all().delete()
site = orm['sites.site'].objects.create(
id=0,
domain='example.com',
name='example.com'
)
site.save()
models = {
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sites']
symmetrical = True
|
[
"vagrant@precise64.(none)"
] |
vagrant@precise64.(none)
|
8d48d5283041505a2efe6dd502d7cefd20c39f93
|
6baf192a289f602407044e3b2100aeffc60e3897
|
/microblog.py
|
3d8c0872b222e8d7023fe352aaa3889b9d5f61fd
|
[] |
no_license
|
HaoREN211/hao_read
|
798adcb0c6bdd2372b050112e76b858e3a212276
|
ed126ffb424f4e128be02cbc06807f1e5c863a69
|
refs/heads/master
| 2023-05-12T18:20:20.315328
| 2020-02-03T14:23:43
| 2020-02-03T14:23:43
| 236,145,154
| 0
| 0
| null | 2023-05-01T21:20:55
| 2020-01-25T08:38:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# 作者:hao.ren3
# 时间:2019/11/5 14:34
# IDE:PyCharm
from flask import send_from_directory
from app import create_app, db
from app.models.User import User
from app.models.Post import Post
from os.path import join
app = create_app()
# 为网站添加图标
def favicon():
return send_from_directory(join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
app.add_url_rule('/favicon.ico',view_func=favicon)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post}
@app.template_filter('md')
def markdown_html(txt):
from markdown import markdown
post_content_html = markdown(txt, extensions=[
'markdown.extensions.extra',
'markdown.extensions.fenced_code',
'markdown.extensions.admonition',
'markdown.extensions.codehilite',
'markdown.extensions.meta',
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.smarty',
'markdown.extensions.toc',
'markdown.extensions.wikilinks',
'markdown.extensions.tables'
])
return post_content_html
if __name__ == '__main__':
app.run(host="0.0.0.0", port=3000)
|
[
"renhaojules@163.com"
] |
renhaojules@163.com
|
4f36cb2a52bc750cf728f622b4d832bbc4cfdf9b
|
70cc96b55c202245691463ee59e42e9801cde858
|
/python/rtypes/types/subset.py
|
5638f2520dcf59172857df76e81b5dcdca8f347a
|
[] |
no_license
|
rezafuru/spacetime
|
ad1da33fbcf9c358cf1b379507f0178155354f92
|
3b4b58775d41c75f103278c5e1553e5b36542d72
|
refs/heads/master
| 2023-03-08T09:01:48.286203
| 2019-10-14T18:07:15
| 2019-10-14T18:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from rtypes.attributes import PredicateFunction
from rtypes.metadata import SubsetMetadata
from rtypes.utils.enums import Rtype
def set_metadata(cls, parent):
cls.__r_table__ = parent.__r_table__
pred_func = None
for attr in dir(cls):
if isinstance(getattr(cls, attr), PredicateFunction):
pred_func = getattr(cls, attr)
meta = SubsetMetadata(Rtype.SUBSET, cls, parent, pred_func)
if hasattr(cls, "__r_meta__"):
TypeError("How am I here?")
else:
cls.__r_meta__ = meta
class subset(object):
def __init__(self, parent_cls):
self.parent = parent_cls
def __call__(self, cls):
set_metadata(cls, self.parent)
return cls
|
[
"ra.rohan@gmail.com"
] |
ra.rohan@gmail.com
|
6e8292163311f9d2d6a1c5cb60d88ddcffd2cf58
|
015383d460fa4321391d964c4f65c4d0c044dcc1
|
/.venv/lib/python3.7/site-packages/faker/providers/person/dk_DK/__init__.py
|
2b739525bda88b354c41c2ee0642b1e1f9a3a170
|
[
"Unlicense"
] |
permissive
|
kobbyrythm/temperature_stories_django
|
8f400c8d3c8190b0e83f7bcfece930d696c4afe9
|
552d39f1f6f3fc1f0a2f7308a7da61bf1b9b3de3
|
refs/heads/main
| 2023-07-03T21:28:46.020709
| 2021-07-20T09:44:29
| 2021-07-20T09:44:29
| 468,728,039
| 3
| 0
|
Unlicense
| 2022-03-11T11:41:47
| 2022-03-11T11:41:46
| null |
UTF-8
|
Python
| false
| false
| 7,565
|
py
|
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
)
first_names_male = (
'Adam', 'Albert', 'Aksel', 'Alex', 'Alexander', 'Alf', 'Allan',
'Alvin', 'Anders', 'André', 'Andreas', 'Anton', 'Arne', 'Asger',
'ugust', 'Benjamin', 'Benny', 'Bent', 'Bertil', 'Bertram', 'Birger',
'Bjarne', 'Bo', 'Bob', 'Bobby', 'Boe', 'Boris', 'Borris',
'Brian', 'Bruno', 'Bøje', 'Børge', 'Carl', 'Carlo', 'Carsten',
'Casper', 'Christian', 'Christoffer', 'Christopher', 'Claus', 'Clavs', 'Curt',
'Dan', 'Daniel', 'Danny', 'David', 'Dennis', 'Ebbe', 'Einar',
'Einer', 'Elias', 'Emil', 'Eric', 'Erik', 'Erling', 'Ernst',
'Esben', 'Finn', 'Flemming', 'Frank', 'Frans', 'Freddy', 'Frede',
'Frederik', 'Frode', 'Georg', 'George', 'Gert', 'Gorm', 'Gunnar',
'Gunner', 'Gustav', 'Hans', 'Helge', 'Henrik', 'Henry', 'Herbert',
'Herman', 'Hjalte', 'Holger', 'Hugo', 'Ib', 'Ivan', 'Iver',
'Jack', 'Jacob', 'Jakob', 'James', 'Jan', 'Jano', 'Jarl',
'Jean', 'Jens', 'Jeppe', 'Jesper', 'Jim', 'Jimmy', 'Joachim',
'Joakim', 'Johan', 'Johannes', 'John', 'Johnnie', 'Johnny', 'Jon',
'Jonas', 'Jonathan', 'Julius', 'Jørgen', 'Karl', 'Karlo', 'Karsten',
'Kaspar', 'Kasper', 'Keld', 'Ken', 'Kenn', 'Kenneth', 'Kenny',
'Kent', 'Kim', 'Kjeld', 'Klaus', 'Klavs', 'Kristian', 'Kurt',
'Kåre', 'Lars', 'Lasse', 'Laurits', 'Laus', 'Laust', 'Leif',
'Lennarth', 'Lucas', 'Ludvig', 'Mads', 'Magnus', 'Malthe', 'Marcus',
'Marius', 'Mark', 'Martin', 'Mathias', 'Matthias', 'Michael', 'Mik',
'Mikael', 'Mike', 'Mikkel', 'Mogens', 'Morten', 'Nick', 'Nicklas',
'Nicolai', 'Nicolaj', 'Niels', 'Nikolai', 'Nikolaj', 'Nils', 'Noah',
'Ole', 'Olfert', 'Oliver', 'Oscar', 'Oskar', 'Osvald', 'Otto',
'Ove', 'Palle', 'Patrick', 'Paw', 'Peder', 'Per', 'Pete',
'Peter', 'Paul', 'Philip', 'Poul', 'Preben', 'Ragnar', 'Ragner',
'Rasmus', 'René', 'Richard', 'Richardt', 'Robert', 'Robin', 'Rolf',
'Ron', 'Ronni', 'Ronnie', 'Ronny', 'Ruben', 'Rune', 'Sam',
'Sebastian', 'Silas', 'Simon', 'Simon', 'Sonny', 'Steen', 'Stefan',
'Sten', 'Stephan', 'Steve', 'Steven', 'Stig', 'Svenning', 'Søren',
'Tage', 'Tejs', 'Thomas', 'Tim', 'Timmy', 'Tobias', 'Tom',
'Tommy', 'Tonny', 'Torben', 'Troels', 'Uffe', 'Ulf', 'Ulrik',
'Vagn', 'Valdemar', 'Verner', 'Victor', 'Villads', 'Werner', 'William',
'Yan', 'Yannick', 'Yngve', 'Zacharias', 'Ziggy', 'Øivind', 'Øjvind',
'Ørni', 'Øvli', 'Øystein', 'Øyvind', 'Åbjørn', 'Aage', 'Åge',
)
first_names_female = (
'Abelone', 'Agnes', 'Agnete', 'Alberte', 'Alma', 'Amalie', 'Amanda',
'Andrea', 'Ane', 'Anette', 'Anna', 'Anne', 'Annemette', 'Annette',
'Asta', 'Astrid', 'Benedicte', 'Benedikte', 'Bente', 'Benthe', 'Berit',
'Berta', 'Beth', 'Bettina', 'Birgit', 'Birgitte', 'Birte', 'Birthe',
'Bitten', 'Bodil', 'Britt', 'Britta', 'Camilla', 'Carina', 'Carla',
'Caroline', 'Cathrine', 'Catrine', 'Cecilie', 'Charlotte', 'Christina', 'Christine',
'Cirkeline', 'Clara', 'Connie', 'Conny', 'Dagmar', 'Dagny', 'Daniella',
'Dina', 'Ditte', 'Doris', 'Dorte', 'Dorthe', 'Edith', 'Elin',
'Elisabeth', 'Ella', 'Ellen', 'Elna', 'Else', 'Elsebeth', 'Emilie',
'Emily', 'Emma', 'Erna', 'Esmarelda', 'Ester', 'Filippa', 'Frederikke',
'Freja', 'Frida', 'Gerda', 'Gertrud', 'Gitte', 'Grete', 'Grethe',
'Gundhild', 'Gunhild', 'Gurli', 'Gyda', 'Hannah', 'Hanne', 'Heidi',
'Helen', 'Helle', 'Henriette', 'Herdis', 'Iben', 'Ida', 'Inga',
'Inge', 'Ingelise', 'Inger', 'Ingrid', 'Irma', 'Isabella', 'Jacobine',
'Jacqueline', 'Janne', 'Janni', 'Jannie', 'Jasmin', 'Jean', 'Jenny',
'Joan', 'Johanne', 'Jonna', 'Josefine', 'Josephine', 'Julie', 'Justina',
'Jytte', 'Karen', 'Karin', 'Karina', 'Karla', 'Karoline', 'Katcha',
'Katja', 'Katrine', 'Kirsten', 'Kirstin', 'Kirstine', 'Klara', 'Kristina',
'Kristine', 'Laura', 'Lea', 'Lena', 'Lene', 'Leonora', 'Line',
'Liva', 'Lona', 'Lone', 'Lotte', 'Louise', 'Lærke', 'Maiken',
'Maja', 'Majken', 'Malene', 'Malou', 'Maren', 'Margit', 'Margrethe',
'Maria', 'Marianne', 'Marie', 'Marlene', 'Mathilde', 'Maya', 'Merete',
'Merethe', 'Mette', 'Mia', 'Michala', 'Michelle', 'Mie', 'Mille',
'Mimi', 'Minna', 'Nadia', 'Naja', 'Nana', 'Nanna', 'Nanni',
'Natasha', 'Natasja', 'Nete', 'Nicoline', 'Nina', 'Nora', 'Oda',
'Odeline', 'Odette', 'Ofelia', 'Olga', 'Olivia', 'Patricia', 'Paula',
'Paulina', 'Pernille', 'Pia', 'Ragna', 'Ragnhild', 'Randi', 'Rebecca',
'Regitse', 'Regitze', 'Rikke', 'Rita', 'Ritt', 'Ronja', 'Rosa',
'Ruth', 'Sabine', 'Sandra', 'Sanne', 'Sara', 'Sarah', 'Selma',
'Signe', 'Sigrid', 'Silje', 'Sille', 'Simone', 'Sine', 'Sofia',
'Sofie', 'Solveig', 'Solvej', 'Sonja', 'Sophie', 'Stina', 'Stine',
'Susanne', 'Sussanne', 'Sussie', 'Sys', 'Sørine', 'Søs', 'Tammy',
'Tanja', 'Thea', 'Tilde', 'Tina', 'Tine', 'Tove', 'Trine',
'Ulla', 'Ulrike', 'Ursula', 'Vera', 'Victoria', 'Viola', 'Vivian',
'Weena', 'Winni', 'Winnie', 'Xenia', 'Yasmin', 'Yda', 'Yrsa',
'Yvonne', 'Zahra', 'Zara', 'Zehnia', 'Zelma', 'Zenia', 'Åse',
)
first_names = first_names_male + first_names_female
last_names = (
'Jensen', 'Nielsen', 'Hansen', 'Pedersen', 'Andersen', 'Christensen', 'Larsen',
'Sørensen', 'Rasmussen', 'Petersen', 'Jørgensen', 'Madsen', 'Kristensen', 'Olsen',
'Christiansen', 'Thomsen', 'Poulsen', 'Johansen', 'Knudsen', 'Mortensen', 'Møller',
'Jacobsen', 'Jakobsen', 'Olesen', 'Frederiksen', 'Mikkelsen', 'Henriksen', 'Laursen',
'Lund', 'Schmidt', 'Eriksen', 'Holm', 'Kristiansen', 'Clausen', 'Simonsen',
'Svendsen', 'Andreasen', 'Iversen', 'Jeppesen', 'Mogensen', 'Jespersen', 'Nissen',
'Lauridsen', 'Frandsen', 'Østergaard', 'Jepsen', 'Kjær', 'Carlsen', 'Vestergaard',
'Jessen', 'Nørgaard', 'Dahl', 'Christoffersen', 'Skov', 'Søndergaard', 'Bertelsen',
'Bruun', 'Lassen', 'Bach', 'Gregersen', 'Friis', 'Johnsen', 'Steffensen',
'Kjeldsen', 'Bech', 'Krogh', 'Lauritsen', 'Danielsen', 'Mathiesen', 'Andresen',
'Brandt', 'Winther', 'Toft', 'Ravn', 'Mathiasen', 'Dam', 'Holst',
'Nilsson', 'Lind', 'Berg', 'Schou', 'Overgaard', 'Kristoffersen', 'Schultz',
'Klausen', 'Karlsen', 'Paulsen', 'Hermansen', 'Thorsen', 'Koch', 'Thygesen',
)
prefixes_male = (
'Hr', 'Dr.', 'Prof.', 'Univ.Prof.',
)
prefixes_female = (
'Fru', 'Dr.', 'Prof.', 'Univ.Prof.',
)
|
[
"b.scharlau@abdn.ac.uk"
] |
b.scharlau@abdn.ac.uk
|
f8482f84e76853cc7b2a70e7460d1e2cd3e290db
|
6ac77834909c485686638d27c0bf41e6d1765cf7
|
/src/database/module_user.py
|
b042699456da0a2849bcae55811c5f87ed51da84
|
[] |
no_license
|
YangXinNewlife/gears
|
4144e451861efb0f3ae1d738eb5fcd6cec46a833
|
486b1ce5a7b8d8682bb1394be8f5dd6ae0fca837
|
refs/heads/master
| 2021-01-20T01:41:30.074696
| 2017-05-26T08:17:45
| 2017-05-26T08:17:45
| 89,316,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,005
|
py
|
#-*- coding:utf-8 -*-
__author__ = 'yx'
from module_base import *
import sys
reload(sys)
class ModuleUser(ModuleBase):
def __init__(self, table="t_user"):
self.schema = "ehc"
self.table = table
self.table_name = "\"%s\".\"%s\"" % (self.schema, self.table)
# def __init__(self, table_name="\"ehc\".\"t_user\""):
# self.table = table_name
# def get(self, user_id=None, user_name=None):
# client = PostgresClient()
# row = client.fetch_data(self.table, "WHERE \"autoKey\" = %s" % user_id)
# client.close()
# return row if not row else row[0]
#
# def get_by_partner_id(self, partner_userid):
# #sql = "SELECT * FROM %s WHERE autoKey = %s" % (self.table, env_id)
# client = PostgresClient()
# row = client.fetch_data(self.table, "WHERE partner_user_id = '%s'" % partner_userid)
# client.close()
# return row
# def add(self, name, partnerRawdata, partner_user_id, email="", phone=""):
# sql = "INSERT INTO %s (name, email, phone, partnerRawdata, partner_user_id) VALUES ('%s', '%s', '%s', '%s', '%s') returning *;" \
# % (self.table, name, email, phone, partnerRawdata, partner_user_id)
# print sql
# client = PostgresClient()
# ret = client.insert_sql(sql)
# client.close()
# return ret
def update_access_info(self, access, user_id):
sql = "UPDATE %s SET access_info = '%s' WHERE \"autoKey\" = %s" % (self.table, access, user_id)
client = PostgresClient()
client.execute_sql(sql)
client.close()
def update_status(self, status, user_id):
sql = "UPDATE %s SET status = '%s' WHERE \"autoKey\" = %s" % (self.table, status, user_id)
client = PostgresClient()
client.execute_sql(sql)
client.close()
def get_all(self):
client = PostgresClient()
rows = client.fetch_data(self.table)
client.close()
return rows
|
[
"yangxin@zetyun.com"
] |
yangxin@zetyun.com
|
62a3e940766f49a1361fa806c29ca65246258810
|
a6da9040a6dad7db109cc163b76acd3e6c8be56f
|
/hafta01/ders06.py
|
8dfdb173da15c242146905782db437de1c0a0502
|
[] |
no_license
|
sinanurun/Python_8181
|
f04677ada7f1f6daadddaaf49211b20e8197ad7f
|
710b2bc4573d22988376fd3680c5be0dc011f5bc
|
refs/heads/master
| 2020-04-27T13:14:14.544839
| 2019-04-15T04:50:33
| 2019-04-15T04:50:33
| 174,362,253
| 21
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# ad = input("adınız nedir")
#
# if ad == "fatih" or ad == "serhat":
# print("bilişmcisin")
# else:
# print("farklı branştasın")
cinsiyet = input("cinsiyet")
meslek = input("mesleğiniz")
if cinsiyet =="kadın" and meslek =="bilisim":
print("8 Mart dünya kadınlar gününüz kutlu olsun")
else:
print("her gününüz de kutlu olsun")
|
[
"sinanurun24@gmail.com"
] |
sinanurun24@gmail.com
|
19842f088f7b8feaf1c34853366f4f1851fb997b
|
9303910239ca531d512460553e291960f3b0bd1c
|
/setup.py
|
866ecd51fea15b427bb5e38717130887e781a264
|
[
"BSD-3-Clause"
] |
permissive
|
basnijholt/pyfeast
|
2c81e67b221a2450f514b7da853342d91eae54bd
|
b6d8832b3a101900ed8b50127c1884ef74b34750
|
refs/heads/master
| 2020-03-18T08:06:20.356311
| 2018-05-23T18:08:54
| 2018-05-23T18:33:48
| 134,490,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
#!/usr/bin/env python3
import configparser
import sys
import os.path
import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from create_cython_files import create_feast_pxd, create_feast_pyx
def guess_libraries():
"""Return the configuration for FEAST if it is available in a known way.
This is known to work with the FEAST binaries in the conda-forge channel."""
import ctypes.util
common_libs = ['mkl_rt', 'gfortran', 'iomp5']
for lib in ['blas', 'openblas']:
if ctypes.util.find_library(lib):
return common_libs + [lib]
else:
print('Cannot find MKL or openBLAS!')
sys.exit(1)
def guess_libraries_dirs():
return [os.path.join(sys.exec_prefix, 'lib')]
def guess_include_dirs():
return [os.path.join(sys.exec_prefix, 'include')]
def guess(key):
if key == 'library_dirs':
return guess_libraries_dirs()
elif key == 'include_dirs':
return guess_include_dirs()
elif key == 'libraries':
return guess_libraries()
def get_config(config_file='build.conf'):
# Read build configuration file.
configs = configparser.ConfigParser()
try:
with open(config_file) as f:
configs.read_file(f)
config = dict(configs['feast'])
except IOError:
print('User-configured build config.')
config = {}
except KeyError:
print('User-configured build config, '
'but no `feast` section.')
config = {}
keys = ['include_dirs', 'library_dirs', 'libraries']
for k in keys:
if k in config:
config[k] = config[k].split()
else:
print('Auto configuring `{}` (best guess)'.format(k))
config[k] = guess(k)
config['include_dirs'].append(numpy.get_include())
return config
if __name__ == '__main__':
ext_params = get_config()
create_feast_pxd()
create_feast_pyx()
ext_modules=[
Extension("feast",
sources=["feast.pyx"],
**ext_params,
)
]
setup(
name="pyfeast",
ext_modules=cythonize(ext_modules),
)
|
[
"basnijholt@gmail.com"
] |
basnijholt@gmail.com
|
710bafb6e75878a0ed6c139caf4c0f43bac256d9
|
66f4c011237e9fcad12d5f5508589b01a66a2a91
|
/neural_sp/models/modules/attention.py
|
23eead75b60255abae0090078e2902d3e3121e7e
|
[
"Apache-2.0"
] |
permissive
|
nikhil-garg/neural_sp
|
5004624ed1c23ff1ce5fefba1538e25eabab1e8c
|
0df9107bf2515f6fba6d2a5910c6878daa06193f
|
refs/heads/master
| 2023-01-22T14:44:17.338339
| 2020-10-31T13:49:50
| 2020-10-31T13:49:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,022
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Single-head attention layer."""
import numpy as np
import torch
import torch.nn as nn
class AttentionMechanism(nn.Module):
"""Single-head attention layer.
Args:
kdim (int): dimension of key
qdim (int): dimension of query
atype (str): type of attention mechanisms
adim: (int) dimension of attention space
sharpening_factor (float): sharpening factor in the softmax layer
for attention weights
sigmoid_smoothing (bool): replace the softmax layer for attention weights
with the sigmoid function
conv_out_channels (int): number of channles of conv outputs.
This is used for location-based attention.
conv_kernel_size (int): size of kernel.
This must be the odd number.
dropout (float): dropout probability for attention weights
lookahead (int): lookahead frames for triggered attention
"""
def __init__(self, kdim, qdim, adim, atype,
sharpening_factor=1, sigmoid_smoothing=False,
conv_out_channels=10, conv_kernel_size=201, dropout=0.,
lookahead=2):
super().__init__()
assert conv_kernel_size % 2 == 1, "Kernel size should be odd for 'same' conv."
self.atype = atype
self.adim = adim
self.sharpening_factor = sharpening_factor
self.sigmoid_smoothing = sigmoid_smoothing
self.n_heads = 1
self.lookahead = lookahead
self.reset()
# attention dropout applied after the softmax layer
self.dropout = nn.Dropout(p=dropout)
if atype == 'no':
raise NotImplementedError
# NOTE: sequence-to-sequence without attetnion (use the last state as a context vector)
elif atype in ['add', 'triggered_attention']:
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'location':
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.w_conv = nn.Linear(conv_out_channels, adim, bias=False)
self.conv = nn.Conv2d(in_channels=1,
out_channels=conv_out_channels,
kernel_size=(1, conv_kernel_size),
stride=1,
padding=(0, (conv_kernel_size - 1) // 2),
bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'dot':
self.w_key = nn.Linear(kdim, adim, bias=False)
self.w_query = nn.Linear(qdim, adim, bias=False)
elif atype == 'luong_dot':
assert kdim == qdim
# NOTE: no additional parameters
elif atype == 'luong_general':
self.w_key = nn.Linear(kdim, qdim, bias=False)
elif atype == 'luong_concat':
self.w = nn.Linear(kdim + qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
else:
raise ValueError(atype)
def reset(self):
self.key = None
self.mask = None
def forward(self, key, value, query, mask=None, aw_prev=None,
cache=False, mode='', trigger_points=None):
"""Forward pass.
Args:
key (FloatTensor): `[B, klen, kdim]`
klens (IntTensor): `[B]`
value (FloatTensor): `[B, klen, vdim]`
query (FloatTensor): `[B, 1, qdim]`
mask (ByteTensor): `[B, qlen, klen]`
aw_prev (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
cache (bool): cache key and mask
mode: dummy interface for MoChA/MMA
trigger_points (IntTensor): `[B]`
Returns:
cv (FloatTensor): `[B, 1, vdim]`
aw (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
beta: dummy interface for MoChA/MMA
p_choose_i: dummy interface for MoChA/MMA
"""
bs, klen = key.size()[:2]
qlen = query.size(1)
if aw_prev is None:
aw_prev = key.new_zeros(bs, 1, klen)
else:
aw_prev = aw_prev.squeeze(1) # remove head dimension
# Pre-computation of encoder-side features for computing scores
if self.key is None or not cache:
if self.atype in ['add', 'trigerred_attention',
'location', 'dot', 'luong_general']:
self.key = self.w_key(key)
else:
self.key = key
self.mask = mask
if mask is not None:
assert self.mask.size() == (bs, 1, klen), (self.mask.size(), (bs, 1, klen))
# for batch beam search decoding
if self.key.size(0) != query.size(0):
self.key = self.key[0: 1, :, :].repeat([query.size(0), 1, 1])
if self.atype == 'no':
raise NotImplementedError
elif self.atype in ['add', 'triggered_attention']:
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp)).squeeze(3)
elif self.atype == 'location':
conv_feat = self.conv(aw_prev.unsqueeze(1)).squeeze(2) # `[B, ch, klen]`
conv_feat = conv_feat.transpose(2, 1).contiguous().unsqueeze(1) # `[B, 1, klen, ch]`
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp + self.w_conv(conv_feat))).squeeze(3)
elif self.atype == 'dot':
e = torch.bmm(self.w_query(query), self.key.transpose(2, 1))
elif self.atype in ['luong_dot', 'luong_general']:
e = torch.bmm(query, self.key.transpose(2, 1))
elif self.atype == 'luong_concat':
query = query.repeat([1, klen, 1])
e = self.v(torch.tanh(self.w(torch.cat([self.key, query], dim=-1)))).transpose(2, 1)
assert e.size() == (bs, qlen, klen), (e.size(), (bs, qlen, klen))
NEG_INF = float(np.finfo(torch.tensor(0, dtype=e.dtype).numpy().dtype).min)
# Mask the right part from the trigger point
if self.atype == 'triggered_attention':
assert trigger_points is not None
for b in range(bs):
e[b, :, trigger_points[b] + self.lookahead + 1:] = NEG_INF
# Compute attention weights, context vector
if self.mask is not None:
e = e.masked_fill_(self.mask == 0, NEG_INF)
if self.sigmoid_smoothing:
aw = torch.sigmoid(e) / torch.sigmoid(e).sum(-1).unsqueeze(-1)
else:
aw = torch.softmax(e * self.sharpening_factor, dim=-1)
aw = self.dropout(aw)
cv = torch.bmm(aw, value)
return cv, aw.unsqueeze(1), None, None
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
bc728071d8ce05a274be6f3ab7e50341062153cc
|
a127d0feb3bcf4f2581f385bb24f2b789c771c9c
|
/2syo/17.py
|
81b2851a04ffe18296b8930111e36f4c0080e7e3
|
[] |
no_license
|
NgoVanDau/nlp100knock
|
01383e4cc5a1470508744668103b9ea1a238b892
|
3ef63c0d2dfb55c0e6a31aced645f284325a98a5
|
refs/heads/master
| 2023-03-22T13:19:23.932429
| 2018-08-05T05:27:11
| 2018-08-05T05:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
'''indexが入ってしまうので少し意図とずれますが,とりあえず先に進みます'''
import pandas as pd
# f = open('hightemp.txt', 'r')
# lines = f.readlines()
# f.close()
# hightemp = pd.read_table('input/hightemp.txt')
# print(hightemp)
cols = ['prefecture','city','degree','date']
hightemp = pd.read_table('input/hightemp.txt', header=None)
hightemp.columns = cols
# print(hightemp)
for col in cols:
print(hightemp[col].value_counts())
# print(type(hightemp[col].value_counts()))
exit()
|
[
"kota.k.1132.pda@gmail.com"
] |
kota.k.1132.pda@gmail.com
|
5555d92655b3c29e89c2358b93be0e313d1b0343
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03103/s900243702.py
|
79c2efdabaa3a89657cec5473f6e626b9bfafc0b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
n,m = map(int,input().split(" "))
li = []
for i in range(n):
a,b = map(int,input().split(" "))
li.append((a,b))
li.sort()
result = 0
count = 0
flag = False
for i in range(m):
for j in range(li[i][1]):
result += li[i][0]
count += 1
if count == m:
flag = True
break
if flag:
break
print(result)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9d762fd0e7c422218e547c5cb69f8b1a0f8bdd98
|
ca1151bb86a2445d74b24e6ec27c353a36cc511e
|
/setup.py
|
d932b3a8d81634b19ad4eeaced1b1a66de5c8646
|
[
"MIT"
] |
permissive
|
williamfzc/stagesep2
|
6739c8f32a5fb81f4907e92aa953b2b2d437cbdd
|
20c00187c86e8b807bbb3373f58918a575c2ccc9
|
refs/heads/master
| 2020-04-06T22:06:21.683906
| 2019-08-17T10:07:31
| 2019-08-17T10:07:31
| 157,824,967
| 22
| 3
|
MIT
| 2019-03-13T07:55:29
| 2018-11-16T06:49:13
|
Python
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
from setuptools import setup, find_packages
setup(
name='stagesep2',
version='0.2.6',
description='Analyse, and convert video into useful data.',
author='williamfzc',
author_email='fengzc@vip.qq.com',
url='https://github.com/williamfzc/stagesep2',
packages=find_packages(),
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
python_requires=">=3.6",
install_requires=[
'opencv-python',
'structlog',
'numpy',
'jieba',
'scikit-image',
'pyecharts==0.5.11',
'pyecharts_snapshot',
'findit',
'tesserocr',
'Pillow',
]
)
|
[
"178894043@qq.com"
] |
178894043@qq.com
|
31d37ea5367ad2ba461b4028cc146828551bad82
|
131ccf66fb787e9b1f0773a25fa518d1f2a3c5d0
|
/gui_programming/menu_demo.py
|
4621009871473d50b9017a2adab00a3842a40797
|
[] |
no_license
|
jocogum10/learning-python-programming
|
a0ba62abde49fd79762bcb7ba4a94bf8126afa77
|
035858bd332e3970d95db8bce7b1175e450802db
|
refs/heads/master
| 2020-07-07T17:08:00.743196
| 2019-12-13T05:32:47
| 2019-12-13T05:32:47
| 203,416,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,770
|
py
|
#!/usr/local/bin/python
"""
Tk8.0 style main window menus
menu/tool bars packed before middle, fill=X (pack first=clip last);
adds photo menu entries; see also: add_checkbutton, add_radiobutton
"""
from tkinter import *
from tkinter.messagebox import *
class NewMenuDemo(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH)
self.createWidgets()
self.master.title("Toolbars and Menus")
self.master.iconname("tkpython")
def createWidgets(self):
self.makeMenuBar()
self.makeToolBar()
L = Label(self, text="Menu and Toolbar Demo")
L.config(relief=SUNKEN, width=40, height=10, bg="white")
L.pack(expand=YES, fill=BOTH)
def makeToolBar(self):
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
Button(toolbar, text='Quit', command=self.quit).pack(side=RIGHT)
Button(toolbar, text='Hello', command=self.greeting).pack(side=LEFT)
def makeMenuBar(self):
self.menubar = Menu(self.master)
self.master.config(menu=self.menubar) #master=top-level window
self.fileMenu()
self.editMenu()
self.imageMenu()
def fileMenu(self):
pulldown = Menu(self.menubar)
pulldown.add_command(label="Open...", command=self.notdone)
pulldown.add_command(label="Quit...", command=self.quit)
self.menubar.add_cascade(label='File', underline=0, menu=pulldown)
def editMenu(self):
pulldown = Menu(self.menubar)
pulldown.add_command(label='Paste', command=self.notdone)
pulldown.add_command(label='Spam', command=self.greeting)
pulldown.add_separator()
pulldown.add_command(label='Delete', command=self.greeting)
pulldown.entryconfig(4, state="disable")
self.menubar.add_cascade(label='Edit', underline=0, menu=pulldown)
def imageMenu(self):
photoFiles = ('1.png', '2.png', '3.png')
pulldown = Menu(self.menubar)
self.photoObjs = []
for file in photoFiles:
img = PhotoImage(file='./images/' + file)
pulldown.add_command(image=img, command=self.notdone)
self.photoObjs.append(img) #keep a reference
self.menubar.add_cascade(label='Image', underline=0, menu=pulldown)
def greeting(self):
showinfo('greeting', 'Greetings')
def notdone(self):
showerror('Not implemented', 'Not yet available')
def quit(self):
if askyesno('Verify quit', 'Are you sure you want to quit?'):
Frame.quit(self)
if __name__ == '__main__':
NewMenuDemo().mainloop()
|
[
"jocogum10@gmail.com"
] |
jocogum10@gmail.com
|
4ba22a2cd9579de26bad0952e3b979925435e5ce
|
1763b41a702b8e8b15e3767676fb201de927cca6
|
/Yelp_CF.py
|
09913bdba4902eb3c7adf26662e19e3115f1eda4
|
[] |
no_license
|
chixujohnny/Yelp_project
|
fe81701d642729cf850c1a4adf5734cc052561d3
|
844da018885b107246c39c1942cec0575e051b59
|
refs/heads/master
| 2021-01-12T06:23:22.318169
| 2017-05-11T10:42:10
| 2017-05-11T10:42:10
| 77,352,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# coding: utf-8
#####################
# 基于协同过滤的推荐 #
#####################
import json
def CF_Data_Preprocess(review_path):
# 搞一个 dict
# {'user_id':['business_id', 'stars', 'date']}
User_Rate_Dict = {}
lines = open(review_path)
for line in lines:
line_json = json.loads(line)
uid = line_json['user_id']
bid = line_json['business_id']
stars = line_json['stars']
date = line_json['date']
|
[
"1390463349@qq.com"
] |
1390463349@qq.com
|
420b7dde5c7f879259a4d295a2879d5337315c01
|
38433574de70ccc0472daeabb614b491ac8526c0
|
/sqlalchemy-stubs/dialects/firebird/fdb.pyi
|
d4d08d972401a15e71addb27f7142773657389cf
|
[
"MIT"
] |
permissive
|
Parnassius/sqlalchemy2-stubs
|
28fd7611a50b60415062fdb6d367da14c9a69462
|
68f8417888456588714fcced1c6799f3eb00ff2d
|
refs/heads/main
| 2023-09-01T18:03:58.568139
| 2021-10-14T17:54:15
| 2021-10-14T17:54:15
| 363,415,451
| 0
| 0
|
MIT
| 2021-10-14T20:29:03
| 2021-05-01T13:22:32
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
pyi
|
from typing import Any
from .kinterbasdb import FBDialect_kinterbasdb as FBDialect_kinterbasdb
from ... import util as util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(
self, enable_rowcount: bool = ..., retaining: bool = ..., **kwargs: Any
) -> None: ...
@classmethod
def dbapi(cls): ...
def create_connect_args(self, url: Any): ...
dialect = FBDialect_fdb
|
[
"mike_mp@zzzcomputing.com"
] |
mike_mp@zzzcomputing.com
|
e5f65f77df2260dd3efa5fb59971d518f10a7410
|
481641e0179b5d416a7c48481455874767ae2575
|
/Course Schedule.py
|
c2898ffbfd7fa05912babe048fa6ac2bbf524b85
|
[] |
no_license
|
nixonpj/leetcode
|
776dad03a9de43a8c046b1ea1bbb3dd5e9f256ca
|
de756337b11e578e25f6d0cc0c70a22ae0b8fdc5
|
refs/heads/main
| 2023-05-01T11:38:44.530120
| 2021-05-17T15:13:16
| 2021-05-17T15:13:16
| 304,484,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
"""
There are a total of numCourses courses you have to take, labeled from
0 to numCourses - 1. You are given an array prerequisites where
prerequisites[i] = [ai, bi] indicates that you must take course bi first
if you want to take course ai.
For example, the pair [0, 1], indicates that to take course 0 you
have to first take course 1.
Return true if you can finish all courses. Otherwise, return false.
"""
from typing import List
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
self.prereqs = {i: [] for i in range(numCourses)}
for course, prereq in prerequisites:
self.prereqs[course].append(prereq)
print(self.prereqs)
if not prerequisites:
return True
self.taken = {i: "Unvisited" for i in range(numCourses)}
for course in range(numCourses):
if self.taken[course] == "Unvisited":
if not self.can_take_course(course):
return False
self.taken[course] = "Visited"
print("---")
return True
def can_take_course(self, course):
print(course, self.taken)
self.taken[course] = "Visiting"
can_take = True
for prereq in self.prereqs[course]:
if self.taken[prereq] == "Unvisited":
if not self.can_take_course(prereq):
return False
# return can_take and self.can_take_course(prereq)
elif self.taken[prereq] == "Visiting":
print("cycle", course, prereq, self.taken)
return False
self.taken[course] = "Visited"
return True
s = Solution()
# print(s.canFinish(numCourses=2, prerequisites=[[1,0], [0,1]]))
# print(s.canFinish(numCourses=5, prerequisites=[[1,4],[2,4],[3,1],[3,2]]))
print(s.canFinish(numCourses=5, prerequisites=[[0,1],[1,2],[0,3],[4,0], [3,1], [4,1], [2,4]]))
|
[
"nixon.pj93@gmail.com"
] |
nixon.pj93@gmail.com
|
0b53643e6c106365bda081bdb298e397550c27bd
|
c40d1eb90464fa61c7c290ccd4f4a6416d7ed2ff
|
/1094. Car Pooling.py
|
3cdb8a616a16184f9f22f3d0baaab05d4e69970d
|
[] |
no_license
|
mh-rahman/Programming-Practice
|
2bebdd5c68490882efefa9e262d2a90bb0da51fa
|
e4ceb275a6c9a56999289751f13e74548d9cd185
|
refs/heads/master
| 2021-07-26T12:48:43.158183
| 2020-09-19T22:34:25
| 2020-09-19T22:34:25
| 219,614,371
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
trips.sort(key = lambda x: (x[1],x[2]))
passengers, dropHeap = 0, []
heapq.heapify(dropHeap)
for nPassengers, startLoc, endLoc in trips:
#Drop passengers
while dropHeap and dropHeap[0][0] <= startLoc:
_, drop = heapq.heappop(dropHeap)
passengers -= drop
#Check capacity and return false
if nPassengers + passengers > capacity:
return False
#Add to heap
heapq.heappush(dropHeap,(endLoc, nPassengers))
passengers += nPassengers
return True
|
[
"rahmanhaseeb32@gmail.com"
] |
rahmanhaseeb32@gmail.com
|
397ef26f523edeb63c9bc75e90425a4be6ca01c7
|
149e9e52304a970ffb256f290fce5f614c9e20c4
|
/Python Programming language/DataCampPractice/Intermediate Python for DS/DictionariesAndPandas/DeP2.py
|
2c6e6d638adfb8dd2b65d73c8fa29af218c8603c
|
[] |
no_license
|
Pasquale-Silv/Improving_Python
|
7451e0c423d73a91fa572d44d3e4133b0b4f5c98
|
96b605879810a9ab6c6459913bd366b936e603e4
|
refs/heads/master
| 2023-06-03T15:00:21.554783
| 2021-06-22T15:26:28
| 2021-06-22T15:26:28
| 351,806,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe["italy"] = "rome"
# Print out italy in europe
print(europe["italy"])
print("italy" in europe)
# Add poland to europe
europe["poland"] = "warsaw"
# Print europe
print(europe)
|
[
"55320885+Pasquale-Silv@users.noreply.github.com"
] |
55320885+Pasquale-Silv@users.noreply.github.com
|
864cfe6ca7cb9e8a0d840fa7b0fd3f5bb4df3542
|
c7d4b0a7278df76c65108a637be0ee144f3b0edd
|
/graphite_api/finders/__init__.py
|
4b4f76a120412c53cbd1f6dfeaef5925ff3e0f34
|
[
"Apache-2.0"
] |
permissive
|
dkulikovsky/graphite-api
|
e21f0441319e9a58a6f06bfee8369e8d0f13cab8
|
f42d14eb2bd8112f12212318f8ca5b6f859b12f0
|
refs/heads/master
| 2021-01-23T00:25:39.428369
| 2015-07-08T23:27:09
| 2015-07-08T23:27:09
| 38,269,511
| 2
| 1
| null | 2015-06-29T20:26:27
| 2015-06-29T20:26:27
| null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
import fnmatch
import os.path
def get_real_metric_path(absolute_path, metric_path):
# Support symbolic links (real_metric_path ensures proper cache queries)
if os.path.islink(absolute_path):
real_fs_path = os.path.realpath(absolute_path)
relative_fs_path = metric_path.replace('.', os.sep)
base_fs_path = absolute_path[:-len(relative_fs_path)]
relative_real_fs_path = real_fs_path[len(base_fs_path):]
return fs_to_metric(relative_real_fs_path)
return metric_path
def fs_to_metric(path):
dirpath = os.path.dirname(path)
filename = os.path.basename(path)
return os.path.join(dirpath, filename.split('.')[0]).replace(os.sep, '.')
def _deduplicate(entries):
yielded = set()
for entry in entries:
if entry not in yielded:
yielded.add(entry)
yield entry
def match_entries(entries, pattern):
"""A drop-in replacement for fnmatch.filter that supports pattern
variants (ie. {foo,bar}baz = foobaz or barbaz)."""
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations]
matching = []
for variant in variants:
matching.extend(fnmatch.filter(entries, variant))
# remove dupes without changing order
return list(_deduplicate(matching))
else:
matching = fnmatch.filter(entries, pattern)
matching.sort()
return matching
|
[
"brutasse@gmail.com"
] |
brutasse@gmail.com
|
ea400d3b5f0ad457cae4af10cb257e224a88cc0a
|
9d41f4df737dc2e6fd3fcf4c6f50028fd483cdd0
|
/python_Django/fc_community/board/migrations/0002_auto_20200430_1106.py
|
66d148799c461106ca4773eddece35b4ca8a41f6
|
[] |
no_license
|
Ha-Young/byte_degree_python
|
33a730f4c1f4a99fea03fb923ad73edee2dd1d48
|
7fcbfed832dec3d7cb8503b86d9457e1f2ae0ccf
|
refs/heads/master
| 2022-11-16T16:54:52.978443
| 2020-07-04T14:32:16
| 2020-07-04T14:32:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Generated by Django 3.0.5 on 2020-04-30 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='board',
name='registered_dttm',
field=models.DateTimeField(auto_now_add=True, verbose_name='등록일자'),
),
]
|
[
"hayeong28@naver.com"
] |
hayeong28@naver.com
|
2fa05b1d216249b5cab111a5a004c50d74328970
|
cc43149992c5f79718279ee47e5db4617b1b42e9
|
/pytorch_toolkit/nncf/examples/common/models/classification/squeezenet.py
|
8bac5c962959fb4ff1fec14ff7328bba41855a48
|
[
"Apache-2.0"
] |
permissive
|
tongni1975/openvino_training_extensions
|
174be009bb2fedf6bc774426f340960a90635600
|
3ff9796a2fc413564726916d5c11b42738bb40ef
|
refs/heads/develop
| 2020-06-07T19:06:46.120350
| 2020-04-17T11:27:08
| 2020-04-17T11:27:08
| 193,077,317
| 0
| 0
|
Apache-2.0
| 2020-04-17T11:47:00
| 2019-06-21T10:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,531
|
py
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# SqueezeNet implementation from:
# torchvision/models/squeezenet.py
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1))
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0_custom(pretrained=False, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))
return model
def squeezenet1_1_custom(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
|
[
"48012821+AlexanderDokuchaev@users.noreply.github.com"
] |
48012821+AlexanderDokuchaev@users.noreply.github.com
|
e295f4546f92d704e963f88e2f69e970dcdbf3df
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_gainsay.py
|
0939ed1baa146c43e3236b450df317cc19978b75
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#calss header
class _GAINSAY():
def __init__(self,):
self.name = "GAINSAY"
self.definitions = [u'to refuse to accept something as the truth: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
217e3f356c285d0b8b0da2c8c12ff234b8ca8f10
|
1064db5dfd154c4bc600e0e03841b0f73f0eefbc
|
/home/api/v1/urls.py
|
68d0f35d9043e634ceb843ff272945032f9cd6ae
|
[] |
no_license
|
crowdbotics-apps/web-29-dev-5196
|
3303921a0e5c8794e8e67f55c9841f3ec7610c16
|
7beda8f7d57ce9b9858a46f7e3940d6eed4b5725
|
refs/heads/master
| 2023-05-26T23:00:23.271209
| 2020-05-29T12:47:07
| 2020-05-29T12:47:07
| 267,768,914
| 0
| 0
| null | 2021-06-13T04:08:30
| 2020-05-29T04:59:18
|
Python
|
UTF-8
|
Python
| false
| false
| 614
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import CustomTextViewSet, GgfhgfhViewSet, HomePageViewSet
from home.api.v1.viewsets import (
SignupViewSet,
LoginViewSet,
HomePageViewSet,
CustomTextViewSet,
)
router = DefaultRouter()
router.register("signup", SignupViewSet, basename="signup")
router.register("login", LoginViewSet, basename="login")
router.register("customtext", CustomTextViewSet)
router.register("homepage", HomePageViewSet)
router.register("ggfhgfh", GgfhgfhViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
98661a7646afcda1edc0dedb83f90e8b72a50dc0
|
650b516b1214c4d44fd6f04941e87e28e9049cde
|
/addons/plugin.video.fanfilm/resources/lib/libraries/cleandate.py
|
85151cc3c8bb936f60ac45d4dfad364b8b419655
|
[] |
no_license
|
MultiWu/build
|
b85cc45a33b871f4ade58de8457fcd094761f385
|
f50a64f674b6499668e0a5758fe0879b016f5c38
|
refs/heads/master
| 2022-10-31T20:35:53.382826
| 2019-12-20T22:50:16
| 2019-12-20T22:50:16
| 228,462,984
| 0
| 3
| null | 2022-10-07T08:47:18
| 2019-12-16T19:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import datetime
import time
def iso_2_utc(iso_ts):
if not iso_ts or iso_ts is None: return 0
delim = -1
if not iso_ts.endswith('Z'):
delim = iso_ts.rfind('+')
if delim == -1: delim = iso_ts.rfind('-')
if delim > -1:
ts = iso_ts[:delim]
sign = iso_ts[delim]
tz = iso_ts[delim + 1:]
else:
ts = iso_ts
tz = None
if ts.find('.') > -1:
ts = ts[:ts.find('.')]
try:
d = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S')
except TypeError:
d = datetime.datetime(*(time.strptime(ts, '%Y-%m-%dT%H:%M:%S')[0:6]))
dif = datetime.timedelta()
if tz:
hours, minutes = tz.split(':')
hours = int(hours)
minutes = int(minutes)
if sign == '-':
hours = -hours
minutes = -minutes
dif = datetime.timedelta(minutes=minutes, hours=hours)
utc_dt = d - dif
epoch = datetime.datetime.utcfromtimestamp(0)
delta = utc_dt - epoch
try:
seconds = delta.total_seconds() # works only on 2.7
except:
seconds = delta.seconds + delta.days * 24 * 3600 # close enough
return seconds
|
[
"oliwierminota@gmail.com"
] |
oliwierminota@gmail.com
|
8decbdca81e3e4b91373b7e04f95168420879c90
|
98d9305b1717642bcfb842eecd84d63b6eeaf759
|
/Funtions/Favorite Book.py
|
e2892efd0cc0ff10b3dc5face5cc9e96c6f9880e
|
[] |
no_license
|
er-aditi/Learning-Python
|
5ceb020f4df8db9e34df78edfaecca3e1854c8a9
|
297eda435ee2e1cee643f94ea4c5de6a82e3c8a7
|
refs/heads/master
| 2020-03-24T17:22:22.129081
| 2019-06-19T05:47:26
| 2019-06-19T05:47:26
| 142,856,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
#Parameter
def favorite_book(title):
print("One of my favorite books is Alice in " + title)
#Argument
favorite_book("Wonderland")
|
[
"aditijainnoida85@gmail.com"
] |
aditijainnoida85@gmail.com
|
416be490276271568c133407975891c44a56e873
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/countingE_20200723125242.py
|
ee2eed06a056b5d20bc8221422b9c32e3af0b259
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
def counting(arr):
# keep count of the occurrences
m = max(arr)+1
counts = [0 for i in range(m)]
outputs = [0 for i in range(m)]
# we keep the record of the occurences of the various numbers
for i in range(len(arr)):
counts[arr[i]] +=1
# now to get the running sum
total = 0
for i in range(len(counts)):
total += counts[i]
counts[i] = total
# next step is to now map the numbers to there proper positions starting from the end of the arr
for k in range(len(arr)-1,-1,-1):
position = counts[arr[k]]- 1
outputs[position] = arr[k]
counts[arr[k]] -=1
print('out',outputs)
def swap(A,B):
n = len(A)
sum_a = sum(A)
sum_
# 22
# 24
swap([1,4,1,2,7,5,4],[2,4,5,6,2,2,3])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
04640dde560910e0832261ea2972e720f222af3c
|
c0b6f77fce4a35001ac75d9375eac682780c72cd
|
/experiments/heli/plotting/plot_trajectories.py
|
5e11cf21d79a94fe0198595dcd8167631e0f9ca0
|
[
"MIT"
] |
permissive
|
sisl/CEEM
|
aef7854211887939f582fef9ce4fa9ac23a30567
|
6154587fe3cdb92e8b7f70eedb1262caa1553cc8
|
refs/heads/master
| 2023-07-19T17:56:31.222617
| 2021-03-16T15:59:43
| 2021-03-16T15:59:43
| 266,781,728
| 6
| 1
|
MIT
| 2023-07-06T21:36:26
| 2020-05-25T13:07:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
import matplotlib.pyplot as plt
import torch
import numpy as np
from ceem.data_utils import *
from ceem.smoother import EKF
import pandas as pd
import click
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}", r"\usepackage{siunitx}"]
ttl = [
'$a_x$ \n $(\si{\meter\per\second\squared})$', '$a_y$ \n $(\si{\meter\per\second\squared})$', '$a_z$ \n $(\si{\meter\per\second\squared})$',
'$\dot{\omega}_x$ \n $(\si{\meter\per\second\squared})$', '$\dot{\omega}_y$ \n $(\si{\meter\per\second\squared})$',
'$\dot{\omega}_z$ \n $(\si{\meter\per\second\squared})$'
]
figsizes = {'large': (10, 4), 'small': (6.4, 4.8)}
@click.command()
@click.option('-b', '--trajectory', type=int, default=9)
@click.option('--datadir', type=click.Path(), default='./datasets/split_normalized')
@click.option('--modelfile', type=click.Path(), default='./experiments/heli/trajectories')
@click.option('-m', '--moments', is_flag=True)
@click.option('-s', '--savename', type=str, default=None)
@click.option('--figsize', type=str, default='large')
def main(trajectory, datadir, modelfile, moments, savename, figsize):
# load test data
test_u, test_y, demos = load_helidata(datadir, 'test', return_files=True)
y_mean, y_std, u_mean, u_std = load_statistics(datadir)
test_u = test_u * u_std + u_mean
test_y = test_y * y_std + y_mean
dt = 0.01
T = torch.arange(test_y.shape[1], dtype=torch.float32) * dt
# load predictions
naivepred = torch.load(f'{modelfile}/naivepred')
h25pred = torch.load(f'{modelfile}/h25pred')
sidpred = torch.load(f'{modelfile}/sidpred')
nlpred = torch.load(f'{modelfile}/nlpred')
# create plot
f, ax = plt.subplots(3, 1, figsize=figsizes[figsize])
b = trajectory
i = 0
lines = []
c = 3 if moments else 0
for j in range(3):
lines.append(ax[i].plot(T, test_y[b, :, j + c], alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], h25pred[b, 1:, j + c], '--', alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], nlpred[b, 25:, j + c], '--', alpha=0.8)[0])
lines.append(ax[i].plot(T[25:], sidpred[b, 25:, j + c], '--', alpha=0.8)[0])
ax[i].set_ylabel(ttl[j + c], rotation=0, ha='center', fontweight='bold', labelpad=20)
ax[i].grid(True)
i += 1
ax[i - 1].set_xlabel('time (s)', fontweight='bold', labelpad=-5)
lgd = plt.figlegend(handles=lines[:4], labels=['dataset', 'H25', 'NL (ours)', 'SID'],
loc='upper center', shadow=True, ncol=4)
f.subplots_adjust(bottom=0.1)
plt.tight_layout(rect=[0, 0., 1., .935])
if savename is None:
plt.show()
else:
plt.savefig(f'./experiments/heli/plotting/{savename}.pdf', bbox_extra_artists=(lgd,),
bbox_inches='tight', dpi=400)
if __name__ == "__main__":
main()
|
[
"kunal.menda@gmail.com"
] |
kunal.menda@gmail.com
|
1096ceab5c58b730c11c204555f2b606334dfd5b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/hzQ6dEJ2GfscAZzND_23.py
|
6d6b618bba5d6f707d882f094ff6b102bf50e42f
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
"""
Create a function that takes a number as its parameter and returns another
function.The returned function must take a list of numbers as its parameter,
and return a list of the numbers divided by the number that was passed into
the first function.
### Examples
first = factory(15)
// returns a function first.
lst = [30, 45, 60]
// 30 / 15 = 2, 45 / 15 = 3, 60 / 15 = 4
first(lst) ➞ [2, 3, 4]
second = factory(2)
// returns a function second.
lst = [2, 4, 6]
// 2 / 2 = 1, 4 / 2 = 2, 6 / 2 = 3
second(lst) ➞ [1, 2, 3]
### Notes
Rounding not required.
"""
def factory(n):
def newFunc(l):
return [x/n for x in l]
return newFunc
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7791443a22dfd1c3fc5ec4d1ed8346c1da591f42
|
d6183f3762b0ecc4b580642fac8db9707a94679a
|
/cluster/server/app/conftest.py
|
e7f1ec17c423166f5cbe35ce1f1346e698b6f69c
|
[] |
no_license
|
luke-zhu/blueno
|
4c5fd8b66df5c75e2d28f0cc9e32b45c75386beb
|
09fbb603468a4de8567e0fe4debd575da81672b2
|
refs/heads/master
| 2022-12-02T10:16:23.693589
| 2019-05-31T21:16:12
| 2019-05-31T22:20:12
| 184,935,571
| 2
| 0
| null | 2022-11-22T03:33:43
| 2019-05-04T19:20:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
import datetime
import random
import string
from typing import Tuple
import psycopg2
import pytest
import testing.postgresql
from werkzeug import security
from app import env, db
@pytest.fixture(scope='session')
def test_user() -> Tuple[str, str]:
created_at = datetime.datetime.now(datetime.timezone.utc)
test_email = f"test-{created_at.utcnow()}"
test_password = ''.join(random.choice(string.ascii_letters)
for _ in range(24))
pwd_hash = security.generate_password_hash(test_password)
# Initialize a testing database if env vars not defined
if not env.POSTGRES_CONFIG:
postgresql = testing.postgresql.Postgresql()
env.POSTGRES_CONFIG = postgresql.dsn()
db.init_db()
conn = psycopg2.connect(**env.POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(
"""
INSERT INTO users (email, pwhash, created_at)
VALUES (%s, %s, %s)
ON CONFLICT DO NOTHING;
""",
(test_email, pwd_hash, created_at)
)
conn.commit()
yield test_email, test_password
# Clean up the database
with conn.cursor() as cur:
cur.execute(
"""
DELETE FROM samples
WHERE dataset_id IN (
SELECT datasets.id
FROM datasets
WHERE datasets.name ILIKE %s);
""",
('test%',)
)
cur.execute(
"""
DELETE FROM datasets
WHERE datasets.name ILIKE %s;
""",
('test%',)
)
cur.execute(
"""
DELETE FROM users
WHERE email = %s;
""",
(test_email,)
)
conn.commit()
|
[
"luke_zhu@brown.edu"
] |
luke_zhu@brown.edu
|
68516eb8465dd0d6a43d17922aeacc2e62549fc3
|
b3ab2979dd8638b244abdb2dcf8da26d45d7b730
|
/test/test_update_request_permission_set_request_model.py
|
1cd01805888beee5d37b87cb5335ebce540773ae
|
[] |
no_license
|
CU-CommunityApps/ct-cloudcheckr-cmx-client
|
4b3d9b82c5dfdaf24f8f443526868e971d8d1b15
|
18ac9fd4d6c4ae799c0d21745eaecd783da68c0c
|
refs/heads/main
| 2023-03-03T19:53:57.685925
| 2021-02-09T13:05:07
| 2021-02-09T13:05:07
| 329,308,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
# coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: support@cloudcheckr.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudcheckr_cmx_client
from cloudcheckr_cmx_client.models.update_request_permission_set_request_model import UpdateRequestPermissionSetRequestModel # noqa: E501
from cloudcheckr_cmx_client.rest import ApiException
class TestUpdateRequestPermissionSetRequestModel(unittest.TestCase):
"""UpdateRequestPermissionSetRequestModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateRequestPermissionSetRequestModel(self):
"""Test UpdateRequestPermissionSetRequestModel"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudcheckr_cmx_client.models.update_request_permission_set_request_model.UpdateRequestPermissionSetRequestModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"pea1@cornell.edu"
] |
pea1@cornell.edu
|
7c0c761e0eb874e87225b67b92c6c871ca3ea0aa
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/str_cat-207.py
|
53a36c0c4f2221975566cc4362fa0f79bd194121
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def cat2(a:str, b:str) -> str:
return a + b
def cat3(a:str, b:str, c:str) -> str:
return a + b + c
print(cat2(a, b))
print(cat2("", c))
print(cat3(a, " ", c))
print(len(a))
print(len(cat2(a,a)))
print(len(cat2($Exp,"")))
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
44ec356d33a6eeb229f8a08e5c38a02ca2b32098
|
75a179e8ddba54442697de87a3846f1711a30bae
|
/custompermission/api/views.py
|
a22c47ef90caba6c8771d00567decb8b3d0a670d
|
[] |
no_license
|
amanlalwani007/drftutorial
|
2b5a5338b3146b1feb88c4d815fbf996dd49cb9d
|
4f5c651f4dee98a359b7a6e34d0ae9a8f8630e68
|
refs/heads/master
| 2023-07-09T01:28:04.921042
| 2021-08-21T10:59:06
| 2021-08-21T10:59:06
| 392,457,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
from .models import Student
from .serializers import StudentSerializer
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser, IsAuthenticatedOrReadOnly, \
DjangoModelPermissions
from .custompermissions import Mypermission
class StudentModelViewSet(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerializer
authentication_classes = [SessionAuthentication]
permission_classes = [Mypermission]
|
[
"amanlalwani0807@gmail.com"
] |
amanlalwani0807@gmail.com
|
567854f29d38416103ec3318189d55778dbeb556
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/suvari/gtk2chain/gtk2deps/libXinerama/actions.py
|
c2c1791cced00ef75b6b069b187e330f85971b39
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506
| 2022-08-11T18:28:19
| 2022-08-11T18:28:19
| 8,429,459
| 16
| 22
| null | 2022-08-11T18:28:20
| 2013-02-26T09:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ChangeLog", "COPYING", "README")
|
[
"suvarice@gmail.com"
] |
suvarice@gmail.com
|
b5862c54903294ec07c26c54d450861018205faf
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_blitzes.py
|
605e73b62180965fa4fde07a03eb8b38914f2909
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.nouns._blitz import _BLITZ
#calss header
class _BLITZES(_BLITZ, ):
def __init__(self,):
_BLITZ.__init__(self)
self.name = "BLITZES"
self.specie = 'nouns'
self.basic = "blitz"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7bbe58a925f858deabcb88cfe329c58ff4da3f6d
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/N/narphorium/canadian_charities.py
|
b127dd024b64b13a4a719a865d73f8ac0e00bf51
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,780
|
py
|
import scraperwiki
from string import Template
import re
from math import ceil
from BeautifulSoup import BeautifulSoup
start_page = scraperwiki.sqlite.get_var("current_page", 1)
page = start_page
num_pages = 1
max_pages = 500
for p in range(1, max_pages):
page = start_page + p
if page > num_pages:
page -= num_pages
scraperwiki.sqlite.save_var("current_page", page)
page_url = Template("http://www.cra-arc.gc.ca/ebci/haip/srch/basicsearchresult-eng.action?s=+&k=&b=true&f=25&p=$page").substitute(page=page)
html = scraperwiki.scrape(page_url)
soup = BeautifulSoup(html)
for result in soup.find('div', {'class':'center'}).findAll('div', {'class':'alignLeft'}, recursive=False):
record = {}
for entry in result.findAll('div'):
entry_content = str(entry)
entry_content = entry_content.replace('<div>','')
entry_content = entry_content.replace('</div>','')
entry_content = entry_content.replace(' ',' ')
for sub_entry in entry_content.split('<b>'):
parts = sub_entry.split(':</b>')
if len(parts) > 1:
key = parts[0].strip()
value = parts[1].strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', key)
if m:
key = m.group(1).strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', value)
if m:
value = m.group(1).strip()
if key == "Charity Name":
m = re.search('(.+)\s+\/\s+([A-Z,\d]+)', value)
if m:
name = m.group(1).strip()
id = m.group(2).strip()
record['ID'] = id
record['Name'] = name
else:
key = key.replace('/',' ')
key = key.replace('\s+','_')
record[key] = value
if record.has_key('ID'):
#print record
# save records to the datastore
scraperwiki.sqlite.save(["ID"], record)
m = re.search('<b>([\d,]+) matches found\.<\/b>', html)
if m:
num_results = int(m.group(1).replace(',',''))
num_pages = ceil(num_results / 25.0)
import scraperwiki
from string import Template
import re
from math import ceil
from BeautifulSoup import BeautifulSoup
start_page = scraperwiki.sqlite.get_var("current_page", 1)
page = start_page
num_pages = 1
max_pages = 500
for p in range(1, max_pages):
page = start_page + p
if page > num_pages:
page -= num_pages
scraperwiki.sqlite.save_var("current_page", page)
page_url = Template("http://www.cra-arc.gc.ca/ebci/haip/srch/basicsearchresult-eng.action?s=+&k=&b=true&f=25&p=$page").substitute(page=page)
html = scraperwiki.scrape(page_url)
soup = BeautifulSoup(html)
for result in soup.find('div', {'class':'center'}).findAll('div', {'class':'alignLeft'}, recursive=False):
record = {}
for entry in result.findAll('div'):
entry_content = str(entry)
entry_content = entry_content.replace('<div>','')
entry_content = entry_content.replace('</div>','')
entry_content = entry_content.replace(' ',' ')
for sub_entry in entry_content.split('<b>'):
parts = sub_entry.split(':</b>')
if len(parts) > 1:
key = parts[0].strip()
value = parts[1].strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', key)
if m:
key = m.group(1).strip()
m = re.search('<a[^>]+>([^<]+)<\/a>', value)
if m:
value = m.group(1).strip()
if key == "Charity Name":
m = re.search('(.+)\s+\/\s+([A-Z,\d]+)', value)
if m:
name = m.group(1).strip()
id = m.group(2).strip()
record['ID'] = id
record['Name'] = name
else:
key = key.replace('/',' ')
key = key.replace('\s+','_')
record[key] = value
if record.has_key('ID'):
#print record
# save records to the datastore
scraperwiki.sqlite.save(["ID"], record)
m = re.search('<b>([\d,]+) matches found\.<\/b>', html)
if m:
num_results = int(m.group(1).replace(',',''))
num_pages = ceil(num_results / 25.0)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
eb8e4a117c13531cce84262fa88ef50c79dff1be
|
ee3d8e233370d5a890ba61b00b768f743c979e67
|
/baekjoon/1904.py
|
3e6a0c99876563e3fe760c5e26fcfe350281dd62
|
[
"MIT"
] |
permissive
|
alinghi/PracticeAlgorithm
|
50384fd12a29964e5aa704784a8867046693eff2
|
dea49e17337d9d7711e694059e27ceefb4b9d5d5
|
refs/heads/master
| 2023-01-23T06:03:40.314459
| 2020-12-09T14:20:09
| 2020-12-09T14:20:09
| 285,253,698
| 0
| 0
| null | 2020-08-13T05:25:44
| 2020-08-05T10:19:42
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
N=int(input())
#111 001 100
#001+1 00+00 100+1 11+00 111+1
a,b=0,1
for i in range(N):
a,b=b%15746,(a+b)%15746
print(b%15746)
|
[
"alinghi@kaist.ac.kr"
] |
alinghi@kaist.ac.kr
|
322d29fe0931fd88622b497e37f99ea0d0d93cf8
|
13ce3959fca0e51d5d17b4bf5b99d55b4a3d8ee0
|
/setup.py
|
e84b1d4bdd1371f6a1b0b7bdfbced243bda9212f
|
[
"MIT"
] |
permissive
|
guadagn0/flavio
|
2b4d4bb450dfcbd21a92f55c5dfdbf6488cbf331
|
5a9b64e38b828fcd7907fd6fe7eb79b1dcefd4d3
|
refs/heads/master
| 2020-04-05T12:29:25.207810
| 2019-03-12T22:32:29
| 2019-03-12T22:32:29
| 156,872,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
from setuptools import setup, find_packages
with open('flavio/_version.py', encoding='utf-8') as f:
exec(f.read())
with open('README.md', encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(name='flavio',
version=__version__,
author='David M. Straub',
author_email='david.straub@tum.de',
url='https://flav-io.github.io',
description='A Python package for flavour physics phenomenology in the Standard Model and beyond',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license='MIT',
packages=find_packages(),
package_data={
'flavio':['data/*.yml',
'data/test/*',
'physics/data/arXiv-0810-4077v3/*',
'physics/data/arXiv-1503-05534v1/*',
'physics/data/arXiv-1503-05534v2/*',
'physics/data/arXiv-1501-00367v2/*',
'physics/data/arXiv-1602-01399v1/*',
'physics/data/arXiv-1602-01399v1/*',
'physics/data/arXiv-1811-00983v1/*',
'physics/data/pdg/*',
'physics/data/qcdf_interpolate/*',
'physics/data/wcsm/*',
]
},
install_requires=['numpy', 'scipy', 'setuptools>=3.3', 'pyyaml',
'ckmutil', 'wilson>=1.6', ],
extras_require={
'testing': ['nose'],
'plotting': ['matplotlib>=1.4'],
'sampling': ['pypmc>=1.1', 'emcee', 'iminuit',],
},
)
|
[
"david.straub@tum.de"
] |
david.straub@tum.de
|
413bc5ab7c9a5eeea98c1310fcf21c955a3b899d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/qwDPeZeufrHo2ejAY_5.py
|
dd2776cc8868b371d82d76aca6a7ce2578b301f3
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
"""
Given a _string_ containing an _algebraic equation_ , calculate and **return
the value of x**. You'll only be given equations for simple **addition** and
**subtraction**.
### Examples
eval_algebra("2 + x = 19") ➞ 17
eval_algebra("4 - x = 1") ➞ 3
eval_algebra("23 + 1 = x") ➞ 24
### Notes
* There are spaces between every number and symbol in the string.
* x may be a negative number.
"""
def eval_algebra(eq):
eq='-'.join(eq.split('='))
if '- x' in eq:return eval(eq.replace('x','0'))
else:return -eval(eq.replace('x','0'))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ca6a14711f5fbc121792986501439f1f313ec357
|
a4186aadd9de84de34cde0d9bf443f802117260d
|
/scrapenews/spiders/news24.py
|
20561989efbb09f476923702ccff8a4ccb499508
|
[
"MIT"
] |
permissive
|
public-people/scrape-news
|
bf1bd414d0c94d591aed0c515befaa7a76069093
|
b057f100e4db567dbadba26e18728d4ff5cd5fb3
|
refs/heads/master
| 2023-05-29T13:41:14.181360
| 2020-04-18T20:52:02
| 2020-04-18T20:52:02
| 123,028,777
| 10
| 20
|
MIT
| 2023-05-22T22:28:35
| 2018-02-26T20:50:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
# -*- coding: utf-8 -*-
from .sitemap import SitemapSpider
from scrapenews.items import ScrapenewsItem
from datetime import datetime
import pytz
SAST = pytz.timezone('Africa/Johannesburg')
class News24Spider(SitemapSpider):
name = 'news24'
allowed_domains = ['www.news24.com']
sitemap_urls = ['https://www.news24.com/robots.txt']
sitemap_rules = [
('www.news24.com/SouthAfrica/News', 'parse'),
('www.news24.com/Columnists', 'parse'),
('www.news24.com/Green/News', 'parse'),
('www.news24.com/Obituaries', 'parse'),
('www.news24.com/PressReleases', 'parse'),
]
publication_name = 'News24'
def parse(self, response):
if '/News/' not in response.url:
self.logger.info("Ignoring %s", response.url)
return
title = response.xpath('//div[contains(@class, "article_details")]/h1/text()').extract_first()
self.logger.info('%s %s', response.url, title)
article_body = response.xpath('//article[@id="article-body"]')
if article_body:
body_html = article_body.extract_first()
byline = response.xpath('//div[contains(@class, "ByLineWidth")]/p/text()').extract_first()
publication_date_str = response.xpath('//span[@id="spnDate"]/text()').extract_first()
accreditation = response.xpath('//div[contains(@class, "ByLineWidth")]/div[contains(@class, "accreditation")]/a/@href').extract_first()
publication_date = datetime.strptime(publication_date_str, '%Y-%m-%d %H:%M')
publication_date = SAST.localize(publication_date)
item = ScrapenewsItem()
item['body_html'] = body_html
item['title'] = title
item['byline'] = byline
item['published_at'] = publication_date.isoformat()
item['retrieved_at'] = datetime.utcnow().isoformat()
item['url'] = response.url
item['file_name'] = response.url.split('/')[-1]
item['spider_name'] = self.name
item['publication_name'] = self.publication_name
if accreditation:
item['publication_name'] += " with " + accreditation[1:]
yield item
self.logger.info("")
|
[
"jbothma@gmail.com"
] |
jbothma@gmail.com
|
050c6d64a7e34ab1e92d1ca8a82c006208da4b2e
|
32e0dcfe03fc8a54fd218bfb5fe8741a5ea8fc39
|
/UserCode/jzhang/sbc_run6_mergeall.py
|
013b5ffbce8389c2ccc782914a093247c85589c1
|
[
"MIT"
] |
permissive
|
RunzZhang/SBCcode
|
e480ab85d165b42de060b1778a2e2af38b2f511e
|
e75b8e751cec5fb2c28950edef0c82f005caedcb
|
refs/heads/master
| 2021-09-08T03:41:56.222249
| 2019-06-17T19:52:32
| 2019-06-17T19:52:32
| 192,990,370
| 0
| 0
|
MIT
| 2019-06-20T21:36:26
| 2019-06-20T21:36:26
| null |
UTF-8
|
Python
| false
| false
| 7,163
|
py
|
import numpy as np
import SBCcode as sbc
import os
import re
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as wb
# import ipdb
modules = [
'AcousticAnalysis_',
'DytranAnalysis_',
'EventAnalysis_',
'HistoryAnalysis_',
'ImageAnalysis_',
'TimingAnalysis_',
'PMTfastDAQalignment_']
# modules = ['PMTpulseAnalysis_']
# modules = ['ImageAnalysis_']
# modules = ['AcousticAnalysis_']
# modules = ['TimingAnalysis_']
# recondir = '/bluearc/storage/recon/devel/SBC-17/output'
recondir = '/pnfs/coupp/persistent/grid_output/SBC-17/output'
merge_dir = '/bluearc/storage/recon/devel/SBC-17/output'
runlist = os.listdir(recondir)
runlist = filter(lambda fn: (not re.search('^\d+_\d+$', fn) is None)
and os.path.isdir(os.path.join(recondir, fn))
and (len(os.listdir(os.path.join(recondir, fn))) > 0),
runlist)
# runlist = ['20170706_6']
# runlist = ['20170621_7','20170625_2']
print(runlist)
# one_piezo_list = [
# '20170619_3',
# '20170621_0',
# '20170621_2',
# '20170621_3',
# '20170621_4',
# '20170621_5',
# '20170621_6',
# '20170621_7',
# '20170621_8',
# '20170622_0',
# '20170622_1',
# '20170622_2',
# '20170622_3',
# '20170622_5',
# '20170622_6',
# '20170622_7',
# '20170622_8',
# '20170622_9',
# '20170623_0',
# '20170623_1',
# '20170623_2']
# merge out by category to save memory
for module in modules:
# bad_list = [
# '20170624_2',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170625_2',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5',
# '20170713_3',
# '20170713_4',
# '20170713_5',
# '20170714_0',
# '20170714_1',
# '20170714_2',
# '20170715_0',
# '20170715_1',
# '20170715_2',
# '20170715_4',
# '20170716_0',
# '20170716_1',
# '20170716_2',
# '20170716_3',
# '20170716_5',
# '20170716_6',
# '20170716_7',
# '20170717_0']
# if key == 'AcousticAnalysis_':
# bad_list += [
# '20170621_1', '20170622_4', '20170624_3', '20170711_13', '20170706_6', '20170708_2', '20170719_11']
# bad_list = []
# if key == 'ImageAnalysis_':
# bad_list = ['20170626_9', '20170703_3', '20170707_4']
# elif key == 'DytranAnalysis_':
# bad_list = [
# '20170622_9',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5']
# elif key == 'EventAnalysis_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
# elif key == 'PMTfastDAQalignment_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
bad_list = []
print("Loading " + module)
merge_out = []
shapes0 = []
for runname in runlist:
if runname in set(bad_list):
print(runname + ' is in bad_list')
continue
runid_str = runname.split('_')
runid = np.int32(runid_str)
runsn = runid[0] * 1000 + runid[1]
if (runsn >= 20170619003) and (runsn < 20170901000):
fpath = os.path.join(recondir, runname, module + runname + '.bin')
if os.path.exists(fpath):
if os.stat(fpath).st_size > 0:
data = sbc.read_bin(fpath)
# # check array sizes
# shapes = [data[x].shape for x in data.keys()]
# if len(shapes0) < 1:
# shapes0 = shapes
# print(runname + "\t" + str(shapes))
# Pad 0's to fields without Piezo2
if module == 'AcousticAnalysis_' and len(data['piezo_list'].shape) == 1:
size = [data['piezo_list'].shape[0], 2]
tmp = data['piezo_list']
data['piezo_list'] = np.zeros(size, dtype=np.int32)
data['piezo_list'][:, 0] = tmp
tmp = data['bubble_t0']
data['bubble_t0'] = np.zeros(size, dtype=np.float64)
data['bubble_t0'][:, 0] = tmp
tmp = data['peak_t0']
data['peak_t0'] = np.zeros(size, dtype=np.float64)
data['peak_t0'][:, 0] = tmp
size = list(data['piezoE'].shape)
size[1] += 1
tmp = data['piezoE']
data['piezoE'] = np.zeros(size, dtype=np.float64)
# ipdb.set_trace()
data['piezoE'][:, 0, :, :] = tmp[:, 0, :, :]
if module == 'TimingAnalysis_' and len(data['PMTmatch_t0'].shape) == 1:
var_names = ['CAMstate', 'PMTmatch_area', 'PMTmatch_area_nobs', 'PMTmatch_baseline', 'PMTmatch_baserms', 'PMTmatch_coinc', 'PMTmatch_ix', 'PMTmatch_lag', 'PMTmatch_max', 'PMTmatch_min', 'PMTmatch_pulse_area', 'PMTmatch_pulse_height', 'PMTmatch_pulse_t10', 'PMTmatch_pulse_t90', 'PMTmatch_pulse_tend', 'PMTmatch_pulse_tpeak', 'PMTmatch_pulse_tstart', 'PMTmatch_t0', 'nPMThits_fastdaq', 'nVetohits_fastdaq', 't_nearestPMThit', 't_nearestVetohit']
for var_name in var_names:
if len(data[var_name].shape) == 1:
data[var_name] = np.stack((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)), axis=1)
elif len(data[var_name].shape) > 1:
data[var_name] = np.concatenate((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)),
axis=1)
if module == 'TimingAnalysis_': # fix int32/int64 problem
var_name = 'PMTmatch_ix'
data[var_name] = np.int64(data[var_name])
shapes = [(x, data[x].dtype, data[x].shape) for x in data.keys()]
if len(shapes0) < 1:
shapes0 = shapes
print(runname + "\t" + str(shapes))
# ipdb.set_trace()
merge_out.append(data)
else:
print("zero size file: " + fpath)
else:
print("nonexis file: " + fpath)
merge_name = 'all'
rowdef = 1
if module in set(['PMTpulseAnalysis_', 'PMTpheAnalysis_']):
rowdef = 7
if module in set(['HumanGetBub_']):
rowdef = 8
print("Writing " + module)
wb(os.path.join(merge_dir, module + merge_name + '.bin'), merge_out,
rowdef=rowdef, initialkeys=['runid', 'ev'], drop_first_dim=True)
|
[
"j.gresl12@gmail.com"
] |
j.gresl12@gmail.com
|
730ad7c52a87d68667cc6d4ef736c84998330595
|
e17ab8f50b8a1f13b52aa770269eb469c87161b8
|
/apps/contact/forms.py
|
61cca7b816d64ca24e3016b278acd325dd385140
|
[] |
no_license
|
masterfung/bond
|
6baa84a4322801aeb3f466d8f83e7e18c1d91731
|
bc283ec1bd7a52f77b6fc788d5e818bd7233fc1d
|
refs/heads/master
| 2022-12-09T17:45:47.243471
| 2015-03-08T19:29:21
| 2015-03-08T19:29:21
| 22,904,060
| 0
| 0
| null | 2022-12-07T23:22:59
| 2014-08-13T05:42:10
|
Python
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
__author__ = '@masterfung'
from captcha.fields import ReCaptchaField # Only import different from yesterday
import floppyforms as forms
class ContactForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
subject = forms.CharField(required=True)
message = forms.CharField(widget=forms.Textarea)
captcha = ReCaptchaField()
|
[
"hungmasterj@gmail.com"
] |
hungmasterj@gmail.com
|
2b80e991998881a815ef8d991d0d1747dd9a3be1
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_list_diagnostics.py
|
9c7e8d5fde159226518502a2d34c5ee855e982f4
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_list_diagnostics.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.diagnostic.list_by_service(
resource_group_name="rg1",
service_name="apimService1",
)
for item in response:
print(item)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2022-08-01/examples/ApiManagementListDiagnostics.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
63cdacc5b1a7bf38a597105b8ec6af5aa9c3117b
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_package_tools_sdk/model/ops_automation/job_details_pb2.pyi
|
3a118c77a835875581e4960eaa7aa1f7fb4251e6
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,971
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_package_tools_sdk.model.ops_automation.bind_resource_pb2 import (
BindResource as resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource,
)
from resource_package_tools_sdk.model.ops_automation.mail_info_pb2 import (
MailInfo as resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class JobDetails(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Scheduler(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
isBound = ... # type: builtin___bool
isActive = ... # type: builtin___bool
def __init__(self,
*,
isBound : typing___Optional[builtin___bool] = None,
isActive : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> JobDetails.Scheduler: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> JobDetails.Scheduler: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"isActive",b"isActive",u"isBound",b"isBound"]) -> None: ...
version = ... # type: builtin___int
createTime = ... # type: typing___Text
updateTime = ... # type: typing___Text
creator = ... # type: typing___Text
org = ... # type: builtin___int
name = ... # type: typing___Text
category = ... # type: typing___Text
menuId = ... # type: typing___Text
desc = ... # type: typing___Text
allowModify = ... # type: builtin___bool
id = ... # type: typing___Text
@property
def scheduler(self) -> JobDetails.Scheduler: ...
@property
def bindResource(self) -> resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource: ...
@property
def mail(self) -> resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo: ...
def __init__(self,
*,
version : typing___Optional[builtin___int] = None,
createTime : typing___Optional[typing___Text] = None,
updateTime : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
scheduler : typing___Optional[JobDetails.Scheduler] = None,
name : typing___Optional[typing___Text] = None,
category : typing___Optional[typing___Text] = None,
menuId : typing___Optional[typing___Text] = None,
bindResource : typing___Optional[resource_package_tools_sdk___model___ops_automation___bind_resource_pb2___BindResource] = None,
desc : typing___Optional[typing___Text] = None,
allowModify : typing___Optional[builtin___bool] = None,
mail : typing___Optional[resource_package_tools_sdk___model___ops_automation___mail_info_pb2___MailInfo] = None,
id : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> JobDetails: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> JobDetails: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"bindResource",b"bindResource",u"mail",b"mail",u"scheduler",b"scheduler"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"allowModify",b"allowModify",u"bindResource",b"bindResource",u"category",b"category",u"createTime",b"createTime",u"creator",b"creator",u"desc",b"desc",u"id",b"id",u"mail",b"mail",u"menuId",b"menuId",u"name",b"name",u"org",b"org",u"scheduler",b"scheduler",u"updateTime",b"updateTime",u"version",b"version"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
a04fcffa8e327209c8e04feb7c9e4aec49f1d73b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/fRB5QRYn5WC8jMGTe_10.py
|
ad1e01971694e69b65bc1bfc1f4ba58bef2fe277
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
import datetime as dt
def time_difference(city_a, timestamp, city_b):
gmt = {"Los Angeles": {"h": -8,"m":0},
"New York": {"h":-5,"m":0},
"Caracas": {"h":-4,"m":-30},
"Buenos Aires": {"h":-3,"m":0},
"London": {"h":0,"m":0},
"Rome": {"h":1,"m":0},
"Moscow": {"h":3,"m":0},
"Tehran": {"h":3,"m":30},
"New Delhi": {"h":5,"m":30},
"Beijing": {"h":8,"m":0},
"Canberra": {"h":10,"m":0}
}
t = dt.datetime.strptime(timestamp, "%B %d, %Y %H:%M")
ot = (t - dt.timedelta(hours=gmt[city_a]["h"], minutes=gmt[city_a]["m"]) +
dt.timedelta(hours=gmt[city_b]["h"], minutes=gmt[city_b]["m"]))
return "{}-{}-{} {:02d}:{:02d}".format(ot.year, ot.month, ot.day,
ot.hour, ot.minute)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7d7d9e3f4ee38535b9909b2b3168a029ffb5622e
|
e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551
|
/uri/uri_python/matematica/p1198.py
|
26ea0741bc55d2e35e7fe6e038e30ff4b41afddf
|
[] |
no_license
|
GabrielEstevam/icpc_contest_training
|
b8d97184ace8a0e13e1c0bf442baa36c853a6837
|
012796c2ceb901cf7aa25d44a93614696a7d9c58
|
refs/heads/master
| 2020-04-24T06:15:16.826669
| 2019-10-08T23:13:15
| 2019-10-08T23:13:15
| 171,758,893
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
import math
while True:
try:
entrada = input().split(" ")
A = int(entrada[0])
B = int(entrada[1])
print(int(math.fabs(A-B)))
except EOFError:
break
|
[
"gabrielestevam@hotmail.com"
] |
gabrielestevam@hotmail.com
|
60925646feb8473a3fff7eec5ed67860e4efff65
|
aea8fea216234fd48269e4a1830b345c52d85de2
|
/fhir/resources/STU3/tests/test_episodeofcare.py
|
2a6f7c5f3886e9275ff0a301dc3bf923e2cac14a
|
[
"BSD-3-Clause"
] |
permissive
|
mmabey/fhir.resources
|
67fce95c6b35bfdc3cbbc8036e02c962a6a7340c
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
refs/heads/master
| 2023-04-12T15:50:30.104992
| 2020-04-11T17:21:36
| 2020-04-11T17:21:36
| 269,712,884
| 0
| 0
|
NOASSERTION
| 2020-06-05T17:03:04
| 2020-06-05T17:03:04
| null |
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EpisodeOfCare
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import episodeofcare
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class EpisodeOfCareTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("EpisodeOfCare", js["resourceType"])
return episodeofcare.EpisodeOfCare(js)
def testEpisodeOfCare1(self):
inst = self.instantiate_from("episodeofcare-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance")
self.implEpisodeOfCare1(inst)
js = inst.as_json()
self.assertEqual("EpisodeOfCare", js["resourceType"])
inst2 = episodeofcare.EpisodeOfCare(js)
self.implEpisodeOfCare1(inst2)
def implEpisodeOfCare1(self, inst):
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].code), force_bytes("CC")
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].display),
force_bytes("Chief complaint"),
)
self.assertEqual(
force_bytes(inst.diagnosis[0].role.coding[0].system),
force_bytes("http://hl7.org/fhir/diagnosis-role"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://example.org/sampleepisodeofcare-identifier"),
)
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123"))
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(
inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date
)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14")
self.assertEqual(
inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date
)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01")
self.assertEqual(
force_bytes(inst.statusHistory[0].status), force_bytes("planned")
)
self.assertEqual(
inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date
)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21")
self.assertEqual(
inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date
)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15")
self.assertEqual(
force_bytes(inst.statusHistory[1].status), force_bytes("active")
)
self.assertEqual(
inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date
)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24")
self.assertEqual(
inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date
)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22")
self.assertEqual(
force_bytes(inst.statusHistory[2].status), force_bytes("onhold")
)
self.assertEqual(
inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date
)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25")
self.assertEqual(
force_bytes(inst.statusHistory[3].status), force_bytes("active")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type[0].coding[0].code), force_bytes("hacc"))
self.assertEqual(
force_bytes(inst.type[0].coding[0].display),
force_bytes("Home and Community Care"),
)
self.assertEqual(
force_bytes(inst.type[0].coding[0].system),
force_bytes("http://hl7.org/fhir/episodeofcare-type"),
)
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
f383d554c135fc392f72f27540010b2c2a96e753
|
743da4642ac376e5c4e1a3b63c079533a5e56587
|
/build/lib.win-amd64-3.6/fairseq/modules/adaptive_softmax.py
|
1c60d09568cbafd7a449a66bea1936644528f85f
|
[
"MIT"
] |
permissive
|
tmtmaj/Exploiting-PrLM-for-NLG-tasks
|
cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b
|
e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5
|
refs/heads/main
| 2023-06-16T08:26:32.560746
| 2021-07-14T17:50:19
| 2021-07-14T17:50:19
| 371,899,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,028
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
import functools
import torch
import torch.nn.functional as F
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size),
self.word_proj,
)
self.class_proj = quant_noise(nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size)
self.out_dim = self.num_words + num_classes
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout = dropout
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size)
else:
self.head = quant_noise(nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('version', torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \
if adaptive_inputs is not None else (None, None)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size)
if tied_emb is None:
out_proj = nn.Linear(dim, self.cutoff[i + 1] - self.cutoff[i], bias=False)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + '.version'
if version_name not in state_dict:
raise Exception('This version of the model is no longer supported')
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero().squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = F.dropout(input, p=self.dropout, training=self.training)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
log_probs = log_probs.view(bsz, length, -1)
return log_probs
|
[
"qkrwjdgur09@naver.com"
] |
qkrwjdgur09@naver.com
|
d17cf53c623fa6d7bd0d5d74da87667c85fca93f
|
f730a1fc0fe7021d68cec973125d605c10ac7a64
|
/code/camera.py
|
b9bfb55be94621c6619c04db7f15b9de8a045fcd
|
[] |
no_license
|
wwxFromTju/TJU_AR_alpha0.1
|
47a248b6861dfcdc47a9eefd86250d616a4d71f8
|
e435424943846a7812e22afb7ca66a5065d70aec
|
refs/heads/master
| 2021-04-12T11:29:44.434154
| 2016-07-27T10:20:59
| 2016-07-27T10:20:59
| 64,299,053
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
#!/usr/bin/env python
# encoding=utf-8
from scipy import linalg
class Camera(object):
"""
相机的类
"""
def __init__(self, P):
"""
初始化相机类
"""
self.P = P
# 标定矩阵
self.K = None
# 旋转矩阵
self.R = None
# 平移矩阵
self.t = None
# 相机中心
self.c = None
def project(self, X):
"""
:param X: (4, n) 的投影点, 并且对坐标归一化
:return:
"""
x = linalg.dot(self.P, X)
for i in range(3):
x[i] /= x[2]
return x
|
[
"wxwang@tju.edu.cn"
] |
wxwang@tju.edu.cn
|
9eaa19c9d5828a8c9d3014e6f598ade1b040dc26
|
8be39cae865fa2163c131a34051c4867ad0350a0
|
/examples/quickhowto2/app/views.py
|
965acaaf1bbf79ecf7beb5b956b8ac0d380fcf32
|
[
"BSD-3-Clause"
] |
permissive
|
ben-github/Flask-AppBuilder
|
fd13f694457ef4fbc8c73f8b0b90083dc5b978bc
|
e52947f3e4494a84017bf101b19823df91a41448
|
refs/heads/master
| 2021-01-17T17:52:19.125926
| 2015-01-09T18:13:30
| 2015-01-09T18:13:30
| 25,661,891
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,050
|
py
|
import calendar
from flask import redirect
from flask_appbuilder import ModelView, GroupByChartView, aggregate_count, action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.models.generic.interface import GenericInterface
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.widgets import ListThumbnail
from flask.ext.appbuilder.models.generic import PSSession
from flask_appbuilder.models.generic import PSModel
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import db, appbuilder
from .models import ContactGroup, Gender, Contact, FloatModel, Product, ProductManufacturer, ProductModel
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
sess = PSSession()
class PSView(ModelView):
datamodel = GenericInterface(PSModel, sess)
base_permissions = ['can_list', 'can_show']
list_columns = ['UID', 'C', 'CMD', 'TIME']
search_columns = ['UID', 'C', 'CMD']
class ProductManufacturerView(ModelView):
datamodel = SQLAInterface(ProductManufacturer)
class ProductModelView(ModelView):
datamodel = SQLAInterface(ProductModel)
class ProductView(ModelView):
datamodel = SQLAInterface(Product)
list_columns = ['name','product_manufacturer', 'product_model']
add_columns = ['name','product_manufacturer', 'product_model']
edit_columns = ['name','product_manufacturer', 'product_model']
add_widget = FormVerticalWidget
class ContactModelView2(ModelView):
datamodel = SQLAInterface(Contact)
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
add_form_query_rel_fields = {'contact_group':[['name',FilterStartsWith,'p']],
'gender':[['name',FilterStartsWith,'F']]}
class ContactModelView(ModelView):
datamodel = SQLAInterface(Contact)
add_widget = FormVerticalWidget
show_widget = ShowBlockWidget
list_columns = ['name', 'personal_celphone', 'birthday', 'contact_group.name']
list_template = 'list_contacts.html'
list_widget = ListThumbnail
show_template = 'show_contacts.html'
extra_args = {'extra_arg_obj1': 'Extra argument 1 injected'}
base_order = ('name', 'asc')
show_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
add_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
edit_fieldsets = [
('Summary', {'fields': ['name', 'gender', 'contact_group']}),
(
'Personal Info',
{'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
]
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GroupModelView(ModelView):
datamodel = SQLAInterface(ContactGroup)
related_views = [ContactModelView]
show_template = 'appbuilder/general/model/show_cascade.html'
list_columns = ['name', 'extra_col']
class FloatModelView(ModelView):
datamodel = SQLAInterface(FloatModel)
class ContactChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped contacts'
label_columns = ContactModelView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group': 'contact_group.name',
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'gender',
'series': [(aggregate_count, 'gender')]
}
]
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
class ContactTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Contact)
chart_title = 'Grouped Birth contacts'
chart_type = 'AreaChart'
label_columns = ContactModelView.label_columns
definitions = [
{
'group': 'month_year',
'formatter': pretty_month_year,
'series': [(aggregate_count, 'contact_group')]
},
{
'group': 'year',
'formatter': pretty_year,
'series': [(aggregate_count, 'contact_group')]
}
]
db.create_all()
fill_gender()
appbuilder.add_view(PSView, "List PS", icon="fa-folder-open-o", category="Contacts", category_icon='fa-envelope')
appbuilder.add_view(GroupModelView, "List Groups", icon="fa-folder-open-o", category="Contacts",
category_icon='fa-envelope')
appbuilder.add_view(ContactModelView, "List Contacts", icon="fa-envelope", category="Contacts")
appbuilder.add_view(ContactModelView2, "List Contacts 2", icon="fa-envelope", category="Contacts")
appbuilder.add_view(FloatModelView, "List Float Model", icon="fa-envelope", category="Contacts")
appbuilder.add_separator("Contacts")
appbuilder.add_view(ContactChartView, "Contacts Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ContactTimeChartView, "Contacts Birth Chart", icon="fa-dashboard", category="Contacts")
appbuilder.add_view(ProductManufacturerView, "List Manufacturer", icon="fa-folder-open-o", category="Products",
category_icon='fa-envelope')
appbuilder.add_view(ProductModelView, "List Models", icon="fa-envelope", category="Products")
appbuilder.add_view(ProductView, "List Products", icon="fa-envelope", category="Products")
appbuilder.security_cleanup()
|
[
"danielvazgaspar@gmail.com"
] |
danielvazgaspar@gmail.com
|
6e820d1d5f5954963c01bd964aa9c66f883d00d7
|
61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5
|
/evennumberedexercise/Exercise6_24.py
|
2b58b016281f39f12c87f0eed9c9473c43981ad8
|
[] |
no_license
|
bong1915016/Introduction-to-Programming-Using-Python
|
d442d2252d13b731f6cd9c6356032e8b90aba9a1
|
f23e19963183aba83d96d9d8a9af5690771b62c2
|
refs/heads/master
| 2020-09-25T03:09:34.384693
| 2019-11-28T17:33:28
| 2019-11-28T17:33:28
| 225,904,132
| 1
| 0
| null | 2019-12-04T15:56:55
| 2019-12-04T15:56:54
| null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
def main():
count = 1
i = 2
while count <= 100:
# Display each number in five positions
if isPrime(i) and isPalindrome(i):
print(i, end = " ")
if count % 10 == 0:
print()
count += 1 # Increase count
i += 1
def isPrime(number):
divisor = 2
while divisor <= number / 2:
if number % divisor == 0:
# If true, number is not prime
return False # number is not a prime
divisor += 1
return True # number is prime
# Return the reversal of an integer, i.e. reverse(456) returns 654
def isPalindrome(number):
return number == reverse(number)
# Return the reversal of an integer, i.e. reverse(456) returns 654
def reverse(number):
result = 0
while number != 0:
remainder = number % 10
result = result * 10 + remainder
number = number // 10
return result
main()
|
[
"38396747+timmy61109@users.noreply.github.com"
] |
38396747+timmy61109@users.noreply.github.com
|
25bc3be33edf11b325941a166313b77fcd34b28a
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Machine-Learning-Python/9781118961742_all code files/06/simpleBagging.py
|
72c83810c832b413e58d7f6b9fbb92e5a85022e9
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
__author__ = 'mike-bowles'
import numpy
import matplotlib.pyplot as plot
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
from math import floor
import random
#Build a simple data set with y = x + random
nPoints = 1000
#x values for plotting
xPlot = [(float(i)/float(nPoints) - 0.5) for i in range(nPoints + 1)]
#x needs to be list of lists.
x = [[s] for s in xPlot]
#y (labels) has random noise added to x-value
#set seed
random.seed(1)
y = [s + numpy.random.normal(scale=0.1) for s in xPlot]
#take fixed test set 30% of sample
nSample = int(nPoints * 0.30)
idxTest = random.sample(range(nPoints), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nPoints) if not(idx in idxTest)]
#Define test and training attribute and label sets
xTrain = [x[r] for r in idxTrain]
xTest = [x[r] for r in idxTest]
yTrain = [y[r] for r in idxTrain]
yTest = [y[r] for r in idxTest]
#train a series of models on random subsets of the training data
#collect the models in a list and check error of composite as list grows
#maximum number of models to generate
numTreesMax = 20
#tree depth - typically at the high end
treeDepth = 1
#initialize a list to hold models
modelList = []
predList = []
#number of samples to draw for stochastic bagging
nBagSamples = int(len(xTrain) * 0.5)
for iTrees in range(numTreesMax):
idxBag = []
for i in range(nBagSamples):
idxBag.append(random.choice(range(len(xTrain))))
xTrainBag = [xTrain[i] for i in idxBag]
yTrainBag = [yTrain[i] for i in idxBag]
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
modelList[-1].fit(xTrainBag, yTrainBag)
#make prediction with latest model and add to list of predictions
latestPrediction = modelList[-1].predict(xTest)
predList.append(list(latestPrediction))
#build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
#average first "iModels" of the predictions
prediction = []
for iPred in range(len(xTest)):
prediction.append(sum([predList[i][iPred] for i in range(iModels + 1)])/(iModels + 1))
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
plot.plot(nModels,mse)
plot.axis('tight')
plot.xlabel('Number of Models in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
plot.show()
plotList = [0, 9, 19]
for iPlot in plotList:
plot.plot(xTest, allPredictions[iPlot])
plot.plot(xTest, yTest, linestyle="--")
plot.axis('tight')
plot.xlabel('x value')
plot.ylabel('Predictions')
plot.show()
print('Minimum MSE')
print(min(mse))
#With treeDepth = 1
#Minimum MSE
#0.0242960117899
#With treeDepth = 5
#Minimum MSE
#0.0118893503384
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
b4524a2c6c4dec9afdd81e0de0712e0042927eb8
|
3950cb348a4a3ff6627d502dbdf4e576575df2fb
|
/.venv/Lib/site-packages/numba/np/ufunc/sigparse.py
|
a54df0e25537c1d62b56201d92da6306fa0fa4ba
|
[] |
no_license
|
Bdye15/Sample_Programs
|
a90d288c8f5434f46e1d266f005d01159d8f7927
|
08218b697db91e55e8e0c49664a0b0cb44b4ab93
|
refs/heads/main
| 2023-03-02T04:40:57.737097
| 2021-01-31T03:03:59
| 2021-01-31T03:03:59
| 328,053,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
import tokenize
import string
def parse_signature(sig):
'''Parse generalized ufunc signature.
NOTE: ',' (COMMA) is a delimiter; not separator.
This means trailing comma is legal.
'''
def stripws(s):
return ''.join(c for c in s if c not in string.whitespace)
def tokenizer(src):
def readline():
yield src
gen = readline()
return tokenize.generate_tokens(lambda: next(gen))
def parse(src):
tokgen = tokenizer(src)
while True:
tok = next(tokgen)
if tok[1] == '(':
symbols = []
while True:
tok = next(tokgen)
if tok[1] == ')':
break
elif tok[0] == tokenize.NAME:
symbols.append(tok[1])
elif tok[1] == ',':
continue
else:
raise ValueError('bad token in signature "%s"' % tok[1])
yield tuple(symbols)
tok = next(tokgen)
if tok[1] == ',':
continue
elif tokenize.ISEOF(tok[0]):
break
elif tokenize.ISEOF(tok[0]):
break
else:
raise ValueError('bad token in signature "%s"' % tok[1])
ins, _, outs = stripws(sig).partition('->')
inputs = list(parse(ins))
outputs = list(parse(outs))
# check that all output symbols are defined in the inputs
isym = set()
osym = set()
for grp in inputs:
isym |= set(grp)
for grp in outputs:
osym |= set(grp)
diff = osym.difference(isym)
if diff:
raise NameError('undefined output symbols: %s' % ','.join(sorted(diff)))
return inputs, outputs
|
[
"brady.dye@bison.howard.edu"
] |
brady.dye@bison.howard.edu
|
bc0ad0f7ec39d42a50304cbfb1480cfe527a4b4f
|
d4df738d2066c5222080e043a95a9b230673af81
|
/course_512/3.6_API/problem_3.6.4.py
|
fd758a474fa3c86d4e73a0aa1cafbcef08e81973
|
[] |
no_license
|
kazamari/Stepik
|
c2277f86db74b285e742854f1072897f371e87f5
|
bf0224a4c4e9322e481263f42451cd263b10724c
|
refs/heads/master
| 2021-05-04T19:06:02.110827
| 2018-03-26T09:06:09
| 2018-03-26T09:06:09
| 105,513,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,756
|
py
|
'''
В этой задаче вам необходимо воспользоваться API сайта artsy.net
API проекта Artsy предоставляет информацию о некоторых деятелях искусства, их работах, выставках.
В рамках данной задачи вам понадобятся сведения о деятелях искусства (назовем их, условно, художники).
Вам даны идентификаторы художников в базе Artsy.
Для каждого идентификатора получите информацию о имени художника и годе рождения.
Выведите имена художников в порядке неубывания года рождения. В случае если у художников одинаковый год рождения,
выведите их имена в лексикографическом порядке.
Работа с API Artsy
Полностью открытое и свободное API предоставляют совсем немногие проекты. В большинстве случаев, для получения доступа
к API необходимо зарегистрироваться в проекте, создать свое приложение, и получить уникальный ключ (или токен),
и в дальнейшем все запросы к API осуществляются при помощи этого ключа.
Чтобы начать работу с API проекта Artsy, вам необходимо пройти на стартовую страницу документации к API
https://developers.artsy.net/start и выполнить необходимые шаги, а именно зарегистрироваться, создать приложение,
и получить пару идентификаторов Client Id и Client Secret. Не публикуйте эти идентификаторы.
После этого необходимо получить токен доступа к API. На стартовой странице документации есть примеры того, как можно
выполнить запрос и как выглядит ответ сервера. Мы приведем пример запроса на Python.
import requests
import json
client_id = '...'
client_secret = '...'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
})
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
Теперь все готово для получения информации о художниках. На стартовой странице документации есть пример того, как
осуществляется запрос и как выглядит ответ сервера. Пример запроса на Python.
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
# инициируем запрос с заголовком
r = requests.get("https://api.artsy.net/api/artists/4d8b92b34eb68a1b2c0003f4", headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
Примечание:
В качестве имени художника используется параметр sortable_name в кодировке UTF-8.
Пример входных данных:
4d8b92b34eb68a1b2c0003f4
537def3c139b21353f0006a6
4e2ed576477cc70001006f99
Пример выходных данных:
Abbott Mary
Warhol Andy
Abbas Hamra
Примечание для пользователей Windows
При открытии файла для записи на Windows по умолчанию используется кодировка CP1251, в то время как для записи имен на
сайте используется кодировка UTF-8, что может привести к ошибке при попытке записать в файл имя с необычными символами.
Вы можете использовать print, или аргумент encoding функции open.
'''
import requests
import json
client_id = '8e3ae03a8bf8050b30c9'
client_secret = 'd3a41eb062e10a397dbcab18b31b317f'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
}, verify=False)
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token": token}
artists = []
with open('dataset_24476_4.txt', 'r') as f:
for line in f:
# инициируем запрос с заголовком
res = requests.get("https://api.artsy.net/api/artists/{}".format(line.strip()), headers=headers, verify=False)
res.encoding = 'utf-8'
j = res.json()
artists.append((j['birthday'], j['sortable_name']))
with open('test_24476_4.txt', 'w', encoding="utf-8") as file:
for bd, name in sorted(artists):
file.write(name + '\n')
|
[
"maha_on@yahoo.com"
] |
maha_on@yahoo.com
|
c2471403aa320202deac3015c37cb0a0ac6e08a3
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/storage/v20190601/get_private_endpoint_connection.py
|
198ad11aa90ab756d8d0907c319a4050996605d9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,983
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(account_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20190601:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(account_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
The Private Endpoint Connection resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
b3cbcb1d5bbbf22e60bf51058c034822d2297c4c
|
f8da830331428a8e1bbeadf23345f79f1750bd98
|
/msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/_configuration.py
|
76b296b982dacd86747b02dc4fa3d3ca51ea1334
|
[
"MIT"
] |
permissive
|
ezkemboi/msgraph-cli
|
e023e1b7589461a738e42cbad691d9a0216b0779
|
2ceeb27acabf7cfa219c8a20238d8c7411b9e782
|
refs/heads/main
| 2023-02-12T13:45:03.402672
| 2021-01-07T11:33:54
| 2021-01-07T11:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class SearchConfiguration(Configuration):
"""Configuration for Search.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
"""
def __init__(
self,
credential, # type: "TokenCredential"
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
search=None, # type: Optional[str]
filter=None, # type: Optional[str]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SearchConfiguration, self).__init__(**kwargs)
self.credential = credential
self.top = top
self.skip = skip
self.search = search
self.filter = filter
self.count = count
self.credential_scopes = ['https://management.azure.com/.default']
self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
kwargs.setdefault('sdk_moniker', 'search/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
4b2fa6673d63d5e719510a8281c35d5055a55f66
|
b3d552675b36cb88a1388fcfc531e497ad7cbee9
|
/qfpython/apps/news/templatetags/news_filters.py
|
3a666825994e57a123163079c2f8ecd8013170d7
|
[
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
gaohj/1902_django
|
3cea1f0935fd983f25c6fd832b103ac5165a2e30
|
822af7b42120c6edc699bf97c800887ff84f5621
|
refs/heads/master
| 2022-12-11T10:02:50.233398
| 2019-11-26T08:33:38
| 2019-11-26T08:33:38
| 209,241,390
| 2
| 0
| null | 2022-12-08T07:28:24
| 2019-09-18T07:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
from datetime import datetime
from django import template
from django.utils.timezone import now as now_func,localtime
register = template.Library()
@register.filter
def time_since(value):
if not isinstance(value,datetime):
return value
now = now_func()
timestamp = (now-value).total_seconds()
if timestamp < 60:
return '刚刚'
elif timestamp >=60 and timestamp < 60*60:
minitues = int(timestamp/60)
return '%s分钟前'% minitues
elif timestamp >=60*60 and timestamp < 60*60*24:
hours = int(timestamp/3600)
return '%s小时前'% hours
elif timestamp >=60*60*24 and timestamp < 60*60*24*30:
days = int(timestamp/3600*24)
return '%s天前'% days
else:
return value.strftime('%Y/%m/%d %H:%M')
@register.filter
def time_format(value):
if not isinstance(value,datetime):
return value
return localtime(value).strftime('%Y/%m/%d %H:%M:%S')
|
[
"gaohj@163.com"
] |
gaohj@163.com
|
156130cd7d52ce78d3ffe0cfb0f1316f7548cdbf
|
d125c002a6447c3f14022b786b07712a7f5b4974
|
/tests/functional/intfunc/math/test_ceil_01.py
|
97611af9f2cd0d2d47f57bfbf85d1844845159dc
|
[
"MIT"
] |
permissive
|
FirebirdSQL/firebird-qa
|
89d5b0035071f9f69d1c869997afff60c005fca9
|
cae18186f8c31511a7f68248b20f03be2f0b97c6
|
refs/heads/master
| 2023-08-03T02:14:36.302876
| 2023-07-31T23:02:56
| 2023-07-31T23:02:56
| 295,681,819
| 3
| 2
|
MIT
| 2023-06-16T10:05:55
| 2020-09-15T09:41:22
|
Python
|
UTF-8
|
Python
| false
| false
| 707
|
py
|
#coding:utf-8
"""
ID: intfunc.math.ceil
TITLE: CEIL( <number>)
DESCRIPTION:
Returns a value representing the smallest integer that is greater than or equal to the input argument.
FBTEST: functional.intfunc.math.ceil_01
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """select CEIL( 2.1) from rdb$database;
select CEIL( -2.1) from rdb$database;
"""
act = isql_act('db', test_script)
expected_stdout = """
CEIL
=====================
3
CEIL
=====================
-2
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
|
[
"pcisar@ibphoenix.cz"
] |
pcisar@ibphoenix.cz
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.